diff --git a/.editorconfig b/.editorconfig index d53c0e8dded..19fe6f5ad6e 100644 --- a/.editorconfig +++ b/.editorconfig @@ -15,6 +15,9 @@ indent_size = 4 [*.rs] max_line_length = 100 +[*.{yml,yaml}] +indent_size = 2 + [*.md] # double whitespace at end of line # denotes a line break in Markdown diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index c0969200407..ee2646490db 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -5,4 +5,4 @@ contact_links: about: Please ask and answer questions here to keep the issue tracker clean. - name: Security url: mailto:georgios@paradigm.xyz - about: Please report security vulnerabilities here. \ No newline at end of file + about: Please report security vulnerabilities here. diff --git a/.github/ISSUE_TEMPLATE/feature.yml b/.github/ISSUE_TEMPLATE/feature.yml index 2e33e3bc6ee..005c33ae3fa 100644 --- a/.github/ISSUE_TEMPLATE/feature.yml +++ b/.github/ISSUE_TEMPLATE/feature.yml @@ -11,7 +11,7 @@ body: label: Describe the feature description: | Please describe the feature and what it is aiming to solve, if relevant. - + If the feature is for a crate, please include a proposed API surface. validations: required: true diff --git a/.github/workflows/assertoor.yml b/.github/workflows/assertoor.yml index 3d1ee58c480..becbf4a3a59 100644 --- a/.github/workflows/assertoor.yml +++ b/.github/workflows/assertoor.yml @@ -29,7 +29,7 @@ jobs: id: services run: | export github_sha=${{ github.sha }} - export github_repository=${{ github.repository }} + export github_repository=${{ github.repository }} cat etc/assertoor/assertoor-template.yaml | envsubst > etc/assertoor/assertoor.yaml @@ -92,7 +92,7 @@ jobs: elif [ "$task_result" == "failure" ]; then task_result="${RED}failure${NC}" fi - + echo -e " $(printf '%-4s' "$task_id")\t$task_status\t$task_result\t$(printf '%-50s' "$task_graph$task_name") \t$task_title" done <<< $(echo "$tasks") } @@ -153,7 +153,7 @@ jobs: echo "$task_lines" fi - if [ $failed_tests -gt 0 ]; then + if [ $failed_tests -gt 0 ]; then final_test_result="failure" break fi @@ -197,7 +197,7 @@ jobs: with: name: "kurtosis-enclave-dump-${{ github.run_id }}" path: ./temp/dump - + - name: Return test result shell: bash run: | @@ -227,4 +227,3 @@ jobs: exit 1 # fail action fi - diff --git a/.github/workflows/label-pr.yml b/.github/workflows/label-pr.yml index e52721b9cc8..857d354a8fb 100644 --- a/.github/workflows/label-pr.yml +++ b/.github/workflows/label-pr.yml @@ -21,4 +21,3 @@ jobs: script: | const label_pr = require('./.github/scripts/label_pr.js') await label_pr({github, context}) - diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 4f3632875af..bd73e09e841 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -107,6 +107,25 @@ jobs: components: rustfmt - run: cargo fmt --all --check + book: + name: book + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - uses: actions/checkout@v4 + - uses: dtolnay/rust-toolchain@master + with: + toolchain: "1.76" # MSRV + - uses: Swatinem/rust-cache@v2 + with: + cache-on-failure: true + - run: cargo build --bin reth --workspace --features ethereum + env: + RUSTFLAGS: -D warnings + - run: ./book/cli/update.sh target/debug/reth + - name: Check book changes + run: git diff --exit-code + codespell: runs-on: ubuntu-latest timeout-minutes: 30 @@ -127,7 +146,7 @@ jobs: name: lint success runs-on: ubuntu-latest if: always() - needs: [clippy-binaries, clippy, crate-checks, docs, fmt, codespell, grafana] + needs: [clippy-binaries, clippy, crate-checks, docs, fmt, book, codespell, grafana] timeout-minutes: 30 steps: - name: Decide whether the needed jobs succeeded or failed diff --git a/Cargo.lock b/Cargo.lock index 6b944ab54d2..f9969005192 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -342,9 +342,9 @@ dependencies = [ [[package]] name = "alloy-rlp" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d58d9f5da7b40e9bfff0b7e7816700be4019db97d4b6359fe7f94a9e22e42ac" +checksum = "b155716bab55763c95ba212806cf43d05bcc70e5f35b02bad20cf5ec7fe11fed" dependencies = [ "alloy-rlp-derive", "arrayvec", @@ -353,9 +353,9 @@ dependencies = [ [[package]] name = "alloy-rlp-derive" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a047897373be4bbb0224c1afdabca92648dc57a9c9ef6e7b0be3aff7a859c83" +checksum = "8037e03c7f462a063f28daec9fda285a9a89da003c552f8637a80b9c8fd96241" dependencies = [ "proc-macro2", "quote", @@ -631,9 +631,9 @@ dependencies = [ [[package]] name = "alloy-trie" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d55bd16fdb7ff4bd74cc4c878eeac7e8a27c0d7ba9df4ab58d9310aaafb62d43" +checksum = "03704f265cbbb943b117ecb5055fd46e8f41e7dc8a58b1aed20bcd40ace38c15" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -645,6 +645,7 @@ dependencies = [ "proptest", "proptest-derive", "serde", + "smallvec", "tracing", ] @@ -4566,7 +4567,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", - "windows-targets 0.52.5", + "windows-targets 0.48.5", ] [[package]] @@ -6390,6 +6391,7 @@ dependencies = [ "reth-consensus", "reth-consensus-common", "reth-db", + "reth-db-common", "reth-discv4", "reth-discv5", "reth-downloaders", @@ -6449,6 +6451,7 @@ dependencies = [ "reth-revm", "reth-rpc-types", "reth-stages-api", + "reth-tokio-util", "reth-transaction-pool", "tokio", "tokio-stream", @@ -6532,6 +6535,7 @@ dependencies = [ "reth-db", "reth-evm", "reth-evm-ethereum", + "reth-execution-errors", "reth-interfaces", "reth-metrics", "reth-network", @@ -6539,6 +6543,7 @@ dependencies = [ "reth-provider", "reth-revm", "reth-stages-api", + "reth-storage-errors", "reth-trie", "reth-trie-parallel", "tokio", @@ -6589,11 +6594,8 @@ version = "0.2.0-beta.7" dependencies = [ "confy", "humantime-serde", - "reth-discv4", - "reth-net-nat", "reth-network", "reth-primitives", - "secp256k1 0.28.2", "serde", "tempfile", "toml", @@ -6647,6 +6649,7 @@ dependencies = [ "reth-metrics", "reth-nippy-jar", "reth-primitives", + "reth-storage-errors", "reth-tracing", "rustc-hash", "serde", @@ -6657,6 +6660,25 @@ dependencies = [ "thiserror", ] +[[package]] +name = "reth-db-common" +version = "0.2.0-beta.7" +dependencies = [ + "eyre", + "reth-codecs", + "reth-config", + "reth-db", + "reth-etl", + "reth-interfaces", + "reth-primitives", + "reth-provider", + "reth-trie", + "serde", + "serde_json", + "thiserror", + "tracing", +] + [[package]] name = "reth-discv4" version = "0.2.0-beta.7" @@ -6784,6 +6806,7 @@ dependencies = [ "reth-provider", "reth-rpc", "reth-rpc-layer", + "reth-tokio-util", "reth-tracing", "secp256k1 0.28.2", "serde_json", @@ -6881,7 +6904,7 @@ dependencies = [ "proptest", "proptest-derive", "rand 0.8.5", - "reth-codecs", + "reth-codecs-derive", "reth-net-common", "reth-primitives", "reth-tracing", @@ -6961,8 +6984,9 @@ version = "0.2.0-beta.7" dependencies = [ "futures-util", "parking_lot 0.12.2", - "reth-interfaces", + "reth-execution-errors", "reth-primitives", + "reth-storage-errors", "revm", "revm-primitives", ] @@ -6972,20 +6996,21 @@ name = "reth-evm-ethereum" version = "0.2.0-beta.7" dependencies = [ "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", + "reth-ethereum-consensus", "reth-evm", - "reth-interfaces", "reth-primitives", "reth-revm", "revm-primitives", - "tracing", ] [[package]] name = "reth-evm-optimism" version = "0.2.0-beta.7" dependencies = [ + "reth-consensus-common", "reth-evm", - "reth-interfaces", + "reth-execution-errors", + "reth-optimism-consensus", "reth-primitives", "reth-provider", "reth-revm", @@ -6995,6 +7020,16 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-execution-errors" +version = "0.2.0-beta.7" +dependencies = [ + "reth-consensus", + "reth-primitives", + "reth-storage-errors", + "thiserror", +] + [[package]] name = "reth-exex" version = "0.2.0-beta.7" @@ -7027,21 +7062,14 @@ dependencies = [ name = "reth-interfaces" version = "0.2.0-beta.7" dependencies = [ - "auto_impl", - "clap", - "futures", - "parking_lot 0.12.2", - "rand 0.8.5", "reth-consensus", - "reth-eth-wire-types", + "reth-execution-errors", "reth-fs-util", "reth-network-api", - "reth-network-types", + "reth-network-p2p", "reth-primitives", - "secp256k1 0.28.2", + "reth-storage-errors", "thiserror", - "tokio", - "tracing", ] [[package]] @@ -7126,8 +7154,8 @@ dependencies = [ name = "reth-net-common" version = "0.2.0-beta.7" dependencies = [ + "alloy-primitives", "pin-project", - "reth-network-types", "tokio", ] @@ -7206,17 +7234,36 @@ dependencies = [ name = "reth-network-api" version = "0.2.0-beta.7" dependencies = [ + "alloy-primitives", "enr", - "reth-discv4", "reth-eth-wire", "reth-network-types", - "reth-primitives", "reth-rpc-types", "serde", "thiserror", "tokio", ] +[[package]] +name = "reth-network-p2p" +version = "0.2.0-beta.7" +dependencies = [ + "auto_impl", + "futures", + "parking_lot 0.12.2", + "rand 0.8.5", + "reth-consensus", + "reth-eth-wire-types", + "reth-network-api", + "reth-network-types", + "reth-primitives", + "reth-storage-errors", + "secp256k1 0.28.2", + "thiserror", + "tokio", + "tracing", +] + [[package]] name = "reth-network-types" version = "0.2.0-beta.7" @@ -7283,6 +7330,7 @@ dependencies = [ "reth-config", "reth-consensus", "reth-db", + "reth-db-common", "reth-downloaders", "reth-evm", "reth-exex", @@ -7298,6 +7346,7 @@ dependencies = [ "reth-rpc", "reth-rpc-engine-api", "reth-rpc-layer", + "reth-rpc-types", "reth-stages", "reth-static-file", "reth-tasks", @@ -7312,6 +7361,7 @@ dependencies = [ name = "reth-node-core" version = "0.2.0-beta.7" dependencies = [ + "alloy-rpc-types-engine", "assert_matches", "clap", "const-str", @@ -7333,34 +7383,27 @@ dependencies = [ "proptest", "rand 0.8.5", "reth-beacon-consensus", - "reth-codecs", "reth-config", "reth-consensus-common", "reth-db", "reth-discv4", "reth-discv5", "reth-engine-primitives", - "reth-etl", - "reth-evm", "reth-fs-util", "reth-interfaces", "reth-metrics", "reth-net-nat", "reth-network", - "reth-network-api", "reth-primitives", "reth-provider", "reth-rpc", "reth-rpc-api", "reth-rpc-builder", - "reth-rpc-engine-api", - "reth-rpc-layer", "reth-rpc-types", "reth-rpc-types-compat", "reth-tasks", "reth-tracing", "reth-transaction-pool", - "reth-trie", "secp256k1 0.28.2", "serde", "serde_json", @@ -7496,6 +7539,24 @@ dependencies = [ "tracing", ] +[[package]] +name = "reth-optimism-rpc" +version = "0.2.0-beta.7" +dependencies = [ + "jsonrpsee", + "reth-evm", + "reth-evm-optimism", + "reth-network-api", + "reth-primitives", + "reth-provider", + "reth-rpc", + "reth-rpc-api", + "reth-rpc-types", + "reth-transaction-pool", + "revm", + "thiserror", +] + [[package]] name = "reth-payload-builder" version = "0.2.0-beta.7" @@ -7543,7 +7604,6 @@ dependencies = [ "byteorder", "bytes", "c-kzg", - "clap", "criterion", "derive_more", "hash-db", @@ -7560,13 +7620,13 @@ dependencies = [ "reth-codecs", "reth-ethereum-forks", "reth-network-types", + "reth-static-file-types", "revm", "revm-primitives", "roaring", "secp256k1 0.28.2", "serde", "serde_json", - "strum", "sucds", "tempfile", "test-fuzz", @@ -7594,11 +7654,13 @@ dependencies = [ "reth-codecs", "reth-db", "reth-evm", + "reth-execution-errors", "reth-fs-util", "reth-interfaces", "reth-metrics", "reth-nippy-jar", "reth-primitives", + "reth-storage-errors", "reth-trie", "revm", "strum", @@ -7628,7 +7690,6 @@ dependencies = [ "reth-tracing", "thiserror", "tokio", - "tokio-stream", "tracing", ] @@ -7638,9 +7699,10 @@ version = "0.2.0-beta.7" dependencies = [ "alloy-eips 0.1.0 (git+https://github.com/alloy-rs/alloy?rev=64feb9b)", "reth-consensus-common", - "reth-interfaces", + "reth-execution-errors", "reth-primitives", "reth-provider", + "reth-storage-errors", "reth-trie", "revm", "tracing", @@ -7671,7 +7733,6 @@ dependencies = [ "reth-consensus-common", "reth-evm", "reth-evm-ethereum", - "reth-evm-optimism", "reth-interfaces", "reth-metrics", "reth-network-api", @@ -7755,6 +7816,7 @@ dependencies = [ "reth-rpc-types", "reth-rpc-types-compat", "reth-tasks", + "reth-tokio-util", "reth-tracing", "reth-transaction-pool", "serde", @@ -7789,6 +7851,7 @@ dependencies = [ "reth-rpc-types", "reth-rpc-types-compat", "reth-tasks", + "reth-tokio-util", "serde", "thiserror", "tokio", @@ -7921,10 +7984,32 @@ dependencies = [ "reth-stages", "reth-tokio-util", "tempfile", + "tokio", "tokio-stream", "tracing", ] +[[package]] +name = "reth-static-file-types" +version = "0.2.0-beta.7" +dependencies = [ + "alloy-primitives", + "clap", + "derive_more", + "serde", + "strum", +] + +[[package]] +name = "reth-storage-errors" +version = "0.2.0-beta.7" +dependencies = [ + "clap", + "reth-fs-util", + "reth-primitives", + "thiserror", +] + [[package]] name = "reth-tasks" version = "0.2.0-beta.7" @@ -7956,6 +8041,7 @@ version = "0.2.0-beta.7" dependencies = [ "tokio", "tokio-stream", + "tracing", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index ebf86a15fcb..c0ee3e9d0a1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,14 +6,18 @@ members = [ "crates/config/", "crates/consensus/auto-seal/", "crates/consensus/beacon/", - "crates/ethereum/consensus/", "crates/consensus/common/", "crates/consensus/consensus/", - "crates/ethereum-forks/", "crates/e2e-test-utils/", + "crates/engine-primitives/", + "crates/ethereum-forks/", + "crates/ethereum/consensus/", + "crates/ethereum/engine-primitives/", + "crates/ethereum/evm", + "crates/ethereum/node", "crates/etl/", "crates/evm/", - "crates/ethereum/evm", + "crates/evm/execution-errors", "crates/exex/", "crates/interfaces/", "crates/metrics/", @@ -24,12 +28,22 @@ members = [ "crates/net/dns/", "crates/net/downloaders/", "crates/net/ecies/", - "crates/net/eth-wire/", "crates/net/eth-wire-types", + "crates/net/eth-wire/", "crates/net/nat/", - "crates/net/network/", "crates/net/network-api/", + "crates/net/network/", + "crates/net/p2p/", "crates/net/types/", + "crates/node-core/", + "crates/node/api/", + "crates/node/builder/", + "crates/node/events/", + "crates/optimism/consensus", + "crates/optimism/evm/", + "crates/optimism/node/", + "crates/optimism/payload/", + "crates/optimism/rpc", "crates/payload/basic/", "crates/payload/builder/", "crates/payload/ethereum/", @@ -37,32 +51,24 @@ members = [ "crates/primitives/", "crates/prune/", "crates/revm/", - "crates/node/events/", "crates/rpc/ipc/", - "crates/rpc/rpc/", "crates/rpc/rpc-api/", "crates/rpc/rpc-builder/", "crates/rpc/rpc-engine-api/", + "crates/rpc/rpc-layer", "crates/rpc/rpc-testing-util/", - "crates/rpc/rpc-types/", "crates/rpc/rpc-types-compat/", - "crates/rpc/rpc-layer", - "crates/engine-primitives/", - "crates/ethereum/engine-primitives/", - "crates/ethereum/node", - "crates/node/builder/", - "crates/optimism/consensus", - "crates/optimism/evm/", - "crates/optimism/node/", - "crates/optimism/payload/", - "crates/node-core/", - "crates/node/api/", - "crates/stages/", + "crates/rpc/rpc-types/", + "crates/rpc/rpc/", "crates/stages-api", + "crates/stages/", + "crates/static-file-types/", "crates/static-file/", "crates/storage/codecs/", "crates/storage/codecs/derive/", "crates/storage/db/", + "crates/storage/db-common", + "crates/storage/errors/", "crates/storage/libmdbx-rs/", "crates/storage/libmdbx-rs/mdbx-sys/", "crates/storage/nippy-jar/", @@ -71,26 +77,26 @@ members = [ "crates/tokio-util/", "crates/tracing/", "crates/transaction-pool/", - "crates/trie/", "crates/trie-parallel/", - "examples/node-custom-rpc/", + "crates/trie/", "examples/beacon-api-sse/", - "examples/node-event-hooks/", - "examples/custom-evm/", + "examples/bsc-p2p", + "examples/custom-dev-node/", "examples/custom-engine-types/", + "examples/custom-evm/", + "examples/custom-inspector/", "examples/custom-node-components/", - "examples/custom-dev-node/", "examples/custom-payload-builder/", + "examples/db-access", + "examples/exex/*", "examples/manual-p2p/", - "examples/network/", "examples/network-txpool/", + "examples/network/", + "examples/node-custom-rpc/", + "examples/node-event-hooks/", + "examples/polygon-p2p/", "examples/rpc-db/", "examples/txpool-tracing/", - "examples/polygon-p2p/", - "examples/custom-inspector/", - "examples/exex/*", - "examples/db-access", - "examples/bsc-p2p", "testing/ef-tests/", "testing/testing-utils", ] @@ -105,7 +111,7 @@ rust.missing_debug_implementations = "warn" rust.missing_docs = "warn" rust.unreachable_pub = "warn" rust.unused_must_use = "deny" -rust.rust_2018_idioms = "deny" +rust.rust_2018_idioms = { level = "deny", priority = -1 } rustdoc.all = "warn" [workspace.lints.clippy] @@ -210,38 +216,35 @@ reth = { path = "bin/reth" } reth-auto-seal-consensus = { path = "crates/consensus/auto-seal" } reth-basic-payload-builder = { path = "crates/payload/basic" } reth-beacon-consensus = { path = "crates/consensus/beacon" } -reth-ethereum-consensus = { path = "crates/ethereum/consensus" } reth-blockchain-tree = { path = "crates/blockchain-tree" } reth-cli-runner = { path = "crates/cli/runner" } reth-codecs = { path = "crates/storage/codecs" } +reth-codecs-derive = { path = "crates/storage/codecs/derive" } reth-config = { path = "crates/config" } reth-consensus = { path = "crates/consensus/consensus" } reth-consensus-common = { path = "crates/consensus/common" } reth-db = { path = "crates/storage/db" } +reth-db-common = { path = "crates/storage/db-common" } reth-discv4 = { path = "crates/net/discv4" } reth-discv5 = { path = "crates/net/discv5" } reth-dns-discovery = { path = "crates/net/dns" } -reth-e2e-test-utils = { path = "crates/e2e-test-utils" } -reth-engine-primitives = { path = "crates/engine-primitives" } -reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives" } -reth-node-builder = { path = "crates/node/builder" } -reth-node-ethereum = { path = "crates/ethereum/node" } -reth-node-optimism = { path = "crates/optimism/node" } -reth-evm-optimism = { path = "crates/optimism/evm" } -reth-node-core = { path = "crates/node-core" } -reth-node-api = { path = "crates/node/api" } reth-downloaders = { path = "crates/net/downloaders" } +reth-e2e-test-utils = { path = "crates/e2e-test-utils" } reth-ecies = { path = "crates/net/ecies" } +reth-engine-primitives = { path = "crates/engine-primitives" } reth-eth-wire = { path = "crates/net/eth-wire" } reth-eth-wire-types = { path = "crates/net/eth-wire-types" } +reth-ethereum-consensus = { path = "crates/ethereum/consensus" } +reth-ethereum-engine-primitives = { path = "crates/ethereum/engine-primitives" } reth-ethereum-forks = { path = "crates/ethereum-forks" } reth-ethereum-payload-builder = { path = "crates/payload/ethereum" } reth-etl = { path = "crates/etl" } reth-evm = { path = "crates/evm" } reth-evm-ethereum = { path = "crates/ethereum/evm" } +reth-evm-optimism = { path = "crates/optimism/evm" } +reth-execution-errors = { path = "crates/evm/execution-errors" } reth-exex = { path = "crates/exex" } reth-fs-util = { path = "crates/fs-util" } -reth-optimism-payload-builder = { path = "crates/optimism/payload" } reth-interfaces = { path = "crates/interfaces" } reth-ipc = { path = "crates/rpc/ipc" } reth-libmdbx = { path = "crates/storage/libmdbx-rs" } @@ -253,7 +256,17 @@ reth-net-nat = { path = "crates/net/nat" } reth-network = { path = "crates/net/network" } reth-network-api = { path = "crates/net/network-api" } reth-network-types = { path = "crates/net/types" } +reth-network-p2p = { path = "crates/net/p2p" } reth-nippy-jar = { path = "crates/storage/nippy-jar" } +reth-node-api = { path = "crates/node/api" } +reth-node-builder = { path = "crates/node/builder" } +reth-node-core = { path = "crates/node-core" } +reth-node-ethereum = { path = "crates/ethereum/node" } +reth-node-events = { path = "crates/node/events" } +reth-node-optimism = { path = "crates/optimism/node" } +reth-optimism-consensus = { path = "crates/optimism/consensus" } +reth-optimism-payload-builder = { path = "crates/optimism/payload" } +reth-optimism-rpc = { path = "crates/optimism/rpc" } reth-payload-builder = { path = "crates/payload/builder" } reth-payload-validator = { path = "crates/payload/validator" } reth-primitives = { path = "crates/primitives" } @@ -265,21 +278,21 @@ reth-rpc-api = { path = "crates/rpc/rpc-api" } reth-rpc-api-testing-util = { path = "crates/rpc/rpc-testing-util" } reth-rpc-builder = { path = "crates/rpc/rpc-builder" } reth-rpc-engine-api = { path = "crates/rpc/rpc-engine-api" } +reth-rpc-layer = { path = "crates/rpc/rpc-layer" } reth-rpc-types = { path = "crates/rpc/rpc-types" } reth-rpc-types-compat = { path = "crates/rpc/rpc-types-compat" } -reth-rpc-layer = { path = "crates/rpc/rpc-layer" } reth-stages = { path = "crates/stages" } reth-stages-api = { path = "crates/stages-api" } reth-static-file = { path = "crates/static-file" } +reth-static-file-types = { path = "crates/static-file-types" } +reth-storage-errors = { path = "crates/storage/errors" } reth-tasks = { path = "crates/tasks" } +reth-testing-utils = { path = "testing/testing-utils" } reth-tokio-util = { path = "crates/tokio-util" } reth-tracing = { path = "crates/tracing" } reth-transaction-pool = { path = "crates/transaction-pool" } reth-trie = { path = "crates/trie" } reth-trie-parallel = { path = "crates/trie-parallel" } -reth-optimism-consensus = { path = "crates/optimism/consensus" } -reth-node-events = { path = "crates/node/events" } -reth-testing-utils = { path = "testing/testing-utils" } # revm revm = { version = "9.0.0", features = [ @@ -297,7 +310,7 @@ alloy-primitives = "0.7.2" alloy-dyn-abi = "0.7.2" alloy-sol-types = "0.7.2" alloy-rlp = "0.3.4" -alloy-trie = "0.4.0" +alloy-trie = "0.4" alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b" } alloy-rpc-types-anvil = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b" } alloy-rpc-types-trace = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9b" } diff --git a/Makefile b/Makefile index bfa56011c1a..f62ef19f334 100644 --- a/Makefile +++ b/Makefile @@ -413,9 +413,9 @@ fix-lint-other-targets: -- -D warnings fix-lint: - make lint-reth && \ - make lint-op-reth && \ - make lint-other-targets && \ + make fix-lint-reth && \ + make fix-lint-op-reth && \ + make fix-lint-other-targets && \ make fmt .PHONY: rustdocs diff --git a/README.md b/README.md index 47d8337126d..cc720c9efa0 100644 --- a/README.md +++ b/README.md @@ -42,7 +42,7 @@ Reth is performant, feature-complete, [Cancun-ready](https://paradigmxyz.github. We actively recommend professional node operators to switch to Reth in production for performance and cost reasons in use cases where high performance with great margins is required such as RPC, MEV, Indexing, Simulations, and P2P activities. -While we are aware of parties running Reth staking nodes in production, we do *not* encourage usage in production staking environments by non-professionals until our audits are done, and the 1.0 version of Reth is released, but we are available to support without warranty or liability. +While we are aware of parties running Reth staking nodes in production, we do *not* encourage usage in production staking environments by non-professionals until our audits are done, and the 1.0 version of Reth is released, but we are available to support without warranty or liability. More historical context below: * We are releasing 1.0 "production-ready" stable Reth once our Reth & Revm audits are done. ETA ~May 2024. @@ -155,5 +155,9 @@ None of this would have been possible without them, so big shoutout to the teams - [Erigon](https://github.com/ledgerwatch/erigon) (fka Turbo-Geth): Erigon pioneered the ["Staged Sync" architecture](https://erigon.substack.com/p/erigon-stage-sync-and-control-flows) that Reth is using, as well as [introduced MDBX](https://github.com/ledgerwatch/erigon/wiki/Choice-of-storage-engine) as the database of choice. We thank Erigon for pushing the state of the art research on the performance limits of Ethereum nodes. - [Akula](https://github.com/akula-bft/akula/): Reth uses forks of the Apache versions of Akula's [MDBX Bindings](https://github.com/paradigmxyz/reth/pull/132), [FastRLP](https://github.com/paradigmxyz/reth/pull/63) and [ECIES](https://github.com/paradigmxyz/reth/pull/80) . Given that these packages were already released under the Apache License, and they implement standardized solutions, we decided not to reimplement them to iterate faster. We thank the Akula team for their contributions to the Rust Ethereum ecosystem and for publishing these packages. +## Warning + +The `NippyJar` and `Compact` encoding formats and their implementations are designed for storing and retrieving data internally. They are not hardened to safely read potentially malicious data. + [book]: https://paradigmxyz.github.io/reth/ [tg-url]: https://t.me/paradigm_reth diff --git a/bin/reth/Cargo.toml b/bin/reth/Cargo.toml index b95140aadfa..ab1e9927ada 100644 --- a/bin/reth/Cargo.toml +++ b/bin/reth/Cargo.toml @@ -54,6 +54,7 @@ reth-node-optimism = { workspace = true, optional = true, features = [ "optimism", ] } reth-node-core.workspace = true +reth-db-common.workspace = true reth-node-builder.workspace = true reth-node-events.workspace = true reth-consensus.workspace = true @@ -134,7 +135,6 @@ min-trace-logs = ["tracing/release_max_level_trace"] optimism = [ "reth-primitives/optimism", - "reth-interfaces/optimism", "reth-rpc/optimism", "reth-provider/optimism", "reth-beacon-consensus/optimism", diff --git a/bin/reth/src/commands/db/checksum.rs b/bin/reth/src/commands/db/checksum.rs index 689b6ca5a94..9562c983923 100644 --- a/bin/reth/src/commands/db/checksum.rs +++ b/bin/reth/src/commands/db/checksum.rs @@ -1,12 +1,15 @@ -use crate::utils::DbTool; -use ahash::AHasher; +use crate::{ + commands::db::get::{maybe_json_value_parser, table_key}, + utils::DbTool, +}; +use ahash::RandomState; use clap::Parser; use reth_db::{ cursor::DbCursorRO, database::Database, table::Table, transaction::DbTx, DatabaseEnv, RawKey, RawTable, RawValue, TableViewer, Tables, }; use std::{ - hash::Hasher, + hash::{BuildHasher, Hasher}, time::{Duration, Instant}, }; use tracing::{info, warn}; @@ -16,35 +19,81 @@ use tracing::{info, warn}; pub struct Command { /// The table name table: Tables, + + /// The start of the range to checksum. + #[arg(long, value_parser = maybe_json_value_parser)] + start_key: Option, + + /// The end of the range to checksum. + #[arg(long, value_parser = maybe_json_value_parser)] + end_key: Option, + + /// The maximum number of records that are queried and used to compute the + /// checksum. + #[arg(long)] + limit: Option, } impl Command { /// Execute `db checksum` command pub fn execute(self, tool: &DbTool) -> eyre::Result<()> { warn!("This command should be run without the node running!"); - self.table.view(&ChecksumViewer { tool }) + self.table.view(&ChecksumViewer { + tool, + start_key: self.start_key, + end_key: self.end_key, + limit: self.limit, + }) } } pub(crate) struct ChecksumViewer<'a, DB: Database> { tool: &'a DbTool, + start_key: Option, + end_key: Option, + limit: Option, } impl ChecksumViewer<'_, DB> { pub(crate) fn new(tool: &'_ DbTool) -> ChecksumViewer<'_, DB> { - ChecksumViewer { tool } + ChecksumViewer { tool, start_key: None, end_key: None, limit: None } } pub(crate) fn get_checksum(&self) -> Result<(u64, Duration), eyre::Report> { let provider = self.tool.provider_factory.provider()?.disable_long_read_transaction_safety(); let tx = provider.tx_ref(); + info!( + "Start computing checksum, start={:?}, end={:?}, limit={:?}", + self.start_key, self.end_key, self.limit + ); let mut cursor = tx.cursor_read::>()?; - let walker = cursor.walk(None)?; + let walker = match (self.start_key.as_deref(), self.end_key.as_deref()) { + (Some(start), Some(end)) => { + let start_key = table_key::(start).map(RawKey::::new)?; + let end_key = table_key::(end).map(RawKey::::new)?; + cursor.walk_range(start_key..=end_key)? + } + (None, Some(end)) => { + let end_key = table_key::(end).map(RawKey::::new)?; + + cursor.walk_range(..=end_key)? + } + (Some(start), None) => { + let start_key = table_key::(start).map(RawKey::::new)?; + cursor.walk_range(start_key..)? + } + (None, None) => cursor.walk_range(..)?, + }; let start_time = Instant::now(); - let mut hasher = AHasher::default(); + let mut hasher = RandomState::with_seeds(1, 2, 3, 4).build_hasher(); + let mut total = 0; + + let limit = self.limit.unwrap_or(usize::MAX); + let mut enumerate_start_key = None; + let mut enumerate_end_key = None; for (index, entry) in walker.enumerate() { let (k, v): (RawKey, RawValue) = entry?; @@ -54,6 +103,22 @@ impl ChecksumViewer<'_, DB> { hasher.write(k.raw_key()); hasher.write(v.raw_value()); + + if enumerate_start_key.is_none() { + enumerate_start_key = Some(k.clone()); + } + enumerate_end_key = Some(k); + + total = index + 1; + if total >= limit { + break + } + } + + info!("Hashed {total} entries."); + if let (Some(s), Some(e)) = (enumerate_start_key, enumerate_end_key) { + info!("start-key: {}", serde_json::to_string(&s.key()?).unwrap_or_default()); + info!("end-key: {}", serde_json::to_string(&e.key()?).unwrap_or_default()); } let checksum = hasher.finish(); diff --git a/bin/reth/src/commands/db/get.rs b/bin/reth/src/commands/db/get.rs index 80e3ae393d1..f1f6b963cf1 100644 --- a/bin/reth/src/commands/db/get.rs +++ b/bin/reth/src/commands/db/get.rs @@ -125,7 +125,7 @@ impl Command { } /// Get an instance of key for given table -fn table_key(key: &str) -> Result { +pub(crate) fn table_key(key: &str) -> Result { serde_json::from_str::(key).map_err(|e| eyre::eyre!(e)) } @@ -188,7 +188,7 @@ impl TableViewer<()> for GetValueViewer<'_, DB> { } /// Map the user input value to json -fn maybe_json_value_parser(value: &str) -> Result { +pub(crate) fn maybe_json_value_parser(value: &str) -> Result { if serde_json::from_str::(value).is_ok() { Ok(value.to_string()) } else { diff --git a/bin/reth/src/commands/db/list.rs b/bin/reth/src/commands/db/list.rs index 1c1839188d2..df05638bc9e 100644 --- a/bin/reth/src/commands/db/list.rs +++ b/bin/reth/src/commands/db/list.rs @@ -90,8 +90,6 @@ impl TableViewer<()> for ListTableViewer<'_> { fn view(&self) -> Result<(), Self::Error> { self.tool.provider_factory.db_ref().view(|tx| { - // Disable timeout because we are entering a TUI which might read for a long time - tx.inner.disable_timeout(); let table_db = tx.inner.open_db(Some(self.args.table.name())).wrap_err("Could not open db.")?; let stats = tx.inner.db_stat(&table_db).wrap_err(format!("Could not find table: {}", stringify!($table)))?; let total_entries = stats.entries(); diff --git a/bin/reth/src/commands/db/stats.rs b/bin/reth/src/commands/db/stats.rs index a59a904eb74..03c384b2ffc 100644 --- a/bin/reth/src/commands/db/stats.rs +++ b/bin/reth/src/commands/db/stats.rs @@ -377,7 +377,7 @@ impl Command { let max_widths = table.column_max_content_widths(); let mut separator = Row::new(); for width in max_widths { - separator.add_cell(Cell::new(&"-".repeat(width as usize))); + separator.add_cell(Cell::new("-".repeat(width as usize))); } table.add_row(separator); diff --git a/bin/reth/src/commands/debug_cmd/build_block.rs b/bin/reth/src/commands/debug_cmd/build_block.rs index 31585c2f6ea..7914ec7829d 100644 --- a/bin/reth/src/commands/debug_cmd/build_block.rs +++ b/bin/reth/src/commands/debug_cmd/build_block.rs @@ -298,7 +298,7 @@ impl Command { consensus.validate_header_with_total_difficulty(block, U256::MAX)?; consensus.validate_header(block)?; - consensus.validate_block(block)?; + consensus.validate_block_pre_execution(block)?; let senders = block.senders().expect("sender recovery failed"); let block_with_senders = diff --git a/bin/reth/src/commands/debug_cmd/execution.rs b/bin/reth/src/commands/debug_cmd/execution.rs index 3e647423680..c07efab2b9f 100644 --- a/bin/reth/src/commands/debug_cmd/execution.rs +++ b/bin/reth/src/commands/debug_cmd/execution.rs @@ -17,6 +17,7 @@ use reth_cli_runner::CliContext; use reth_config::{config::EtlConfig, Config}; use reth_consensus::Consensus; use reth_db::{database::Database, init_db, DatabaseEnv}; +use reth_db_common::init::init_genesis; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, headers::reverse_headers::ReverseHeadersDownloaderBuilder, @@ -26,7 +27,6 @@ use reth_fs_util as fs; use reth_interfaces::p2p::{bodies::client::BodiesClient, headers::client::HeadersClient}; use reth_network::{NetworkEvents, NetworkHandle}; use reth_network_api::NetworkInfo; -use reth_node_core::init::init_genesis; use reth_primitives::{ stage::StageId, BlockHashOrNumber, BlockNumber, ChainSpec, PruneModes, B256, }; @@ -187,7 +187,7 @@ impl Command { match get_single_header(&client, BlockHashOrNumber::Number(block)).await { Ok(tip_header) => { info!(target: "reth::cli", ?block, "Successfully fetched block"); - return Ok(tip_header.hash()) + return Ok(tip_header.hash()); } Err(error) => { error!(target: "reth::cli", ?block, %error, "Failed to fetch the block. Retrying..."); @@ -255,7 +255,7 @@ impl Command { provider.get_stage_checkpoint(StageId::Finish)?.map(|ch| ch.block_number); if latest_block_number.unwrap_or_default() >= self.to { info!(target: "reth::cli", latest = latest_block_number, "Nothing to run"); - return Ok(()) + return Ok(()); } let pipeline_events = pipeline.events(); diff --git a/bin/reth/src/commands/debug_cmd/merkle.rs b/bin/reth/src/commands/debug_cmd/merkle.rs index 40d79a85d57..291788bad75 100644 --- a/bin/reth/src/commands/debug_cmd/merkle.rs +++ b/bin/reth/src/commands/debug_cmd/merkle.rs @@ -197,7 +197,7 @@ impl Command { )), PruneModes::none(), ); - executor.execute_one((&sealed_block.clone().unseal(), td).into())?; + executor.execute_and_verify_one((&sealed_block.clone().unseal(), td).into())?; let BatchBlockExecutionOutput { bundle, receipts, first_block } = executor.finalize(); BundleStateWithReceipts::new(bundle, receipts, first_block).write_to_storage( provider_rw.tx_ref(), diff --git a/bin/reth/src/commands/import.rs b/bin/reth/src/commands/import.rs index 235ada84854..70a2c339cad 100644 --- a/bin/reth/src/commands/import.rs +++ b/bin/reth/src/commands/import.rs @@ -16,6 +16,7 @@ use reth_beacon_consensus::EthBeaconConsensus; use reth_config::{config::EtlConfig, Config}; use reth_consensus::Consensus; use reth_db::{database::Database, init_db, tables, transaction::DbTx}; +use reth_db_common::init::init_genesis; use reth_downloaders::{ bodies::bodies::BodiesDownloaderBuilder, file_client::{ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE}, @@ -25,7 +26,6 @@ use reth_interfaces::p2p::{ bodies::downloader::BodyDownloader, headers::downloader::{HeaderDownloader, SyncTarget}, }; -use reth_node_core::init::init_genesis; use reth_node_events::node::NodeEvent; use reth_primitives::{stage::StageId, ChainSpec, PruneModes, B256}; use reth_provider::{ @@ -257,7 +257,7 @@ where let max_block = file_client.max_block().unwrap_or(0); - let mut pipeline = Pipeline::builder() + let pipeline = Pipeline::builder() .with_tip_sender(tip_tx) // we want to sync all blocks the file client provides or 0 if empty .with_max_block(max_block) diff --git a/bin/reth/src/commands/import_op.rs b/bin/reth/src/commands/import_op.rs index b1ae8e8cb36..a85fc4e3dcd 100644 --- a/bin/reth/src/commands/import_op.rs +++ b/bin/reth/src/commands/import_op.rs @@ -13,19 +13,15 @@ use crate::{ use clap::Parser; use reth_beacon_consensus::EthBeaconConsensus; use reth_config::{config::EtlConfig, Config}; - use reth_db::{init_db, tables, transaction::DbTx}; +use reth_db_common::init::init_genesis; use reth_downloaders::file_client::{ ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE, }; - -use reth_node_core::init::init_genesis; - use reth_primitives::{op_mainnet::is_dup_tx, stage::StageId, PruneModes}; use reth_provider::{ProviderFactory, StageCheckpointReader, StaticFileProviderFactory}; use reth_static_file::StaticFileProducer; use std::{path::PathBuf, sync::Arc}; - use tracing::{debug, error, info}; /// Syncs RLP encoded blocks from a file. diff --git a/bin/reth/src/commands/init_cmd.rs b/bin/reth/src/commands/init_cmd.rs index bdd8acb52d1..3b900b3f01a 100644 --- a/bin/reth/src/commands/init_cmd.rs +++ b/bin/reth/src/commands/init_cmd.rs @@ -9,7 +9,7 @@ use crate::{ }; use clap::Parser; use reth_db::init_db; -use reth_node_core::init::init_genesis; +use reth_db_common::init::init_genesis; use reth_primitives::ChainSpec; use reth_provider::ProviderFactory; use std::sync::Arc; diff --git a/bin/reth/src/commands/init_state.rs b/bin/reth/src/commands/init_state.rs index ef640e01cf1..f5ee0c4b1c1 100644 --- a/bin/reth/src/commands/init_state.rs +++ b/bin/reth/src/commands/init_state.rs @@ -10,7 +10,7 @@ use crate::{ use clap::Parser; use reth_config::config::EtlConfig; use reth_db::{database::Database, init_db}; -use reth_node_core::init::init_from_state_dump; +use reth_db_common::init::init_from_state_dump; use reth_primitives::{ChainSpec, B256}; use reth_provider::ProviderFactory; diff --git a/bin/reth/src/commands/p2p/mod.rs b/bin/reth/src/commands/p2p/mod.rs index 8ad8fadf1d3..b6710a363a9 100644 --- a/bin/reth/src/commands/p2p/mod.rs +++ b/bin/reth/src/commands/p2p/mod.rs @@ -15,6 +15,7 @@ use discv5::ListenConfig; use reth_config::Config; use reth_db::create_db; use reth_interfaces::p2p::bodies::client::BodiesClient; +use reth_network::NetworkConfigBuilder; use reth_primitives::{BlockHashOrNumber, ChainSpec}; use reth_provider::ProviderFactory; use std::{ @@ -112,8 +113,9 @@ impl Command { let rlpx_socket = (self.network.addr, self.network.port).into(); let boot_nodes = self.chain.bootnodes().unwrap_or_default(); - let mut network_config_builder = config - .network_config(self.network.nat, None, p2p_secret_key) + let mut network_config_builder = NetworkConfigBuilder::new(p2p_secret_key) + .peer_config(config.peers_config_with_basic_nodes_from_file(None)) + .external_ip_resolver(self.network.nat) .chain_spec(self.chain.clone()) .disable_discv4_discovery_if(self.chain.chain.is_optimism()) .boot_nodes(boot_nodes.clone()); diff --git a/bin/reth/src/commands/recover/storage_tries.rs b/bin/reth/src/commands/recover/storage_tries.rs index 025a170a035..583829bc39b 100644 --- a/bin/reth/src/commands/recover/storage_tries.rs +++ b/bin/reth/src/commands/recover/storage_tries.rs @@ -9,7 +9,8 @@ use reth_db::{ init_db, tables, transaction::DbTx, }; -use reth_node_core::{args::DatabaseArgs, init::init_genesis}; +use reth_db_common::init::init_genesis; +use reth_node_core::args::DatabaseArgs; use reth_primitives::ChainSpec; use reth_provider::{BlockNumReader, HeaderProvider, ProviderError, ProviderFactory}; use reth_trie::StateRoot; diff --git a/bin/reth/src/commands/stage/drop.rs b/bin/reth/src/commands/stage/drop.rs index 73ac898c970..fc3ef5768da 100644 --- a/bin/reth/src/commands/stage/drop.rs +++ b/bin/reth/src/commands/stage/drop.rs @@ -11,8 +11,8 @@ use crate::{ use clap::Parser; use itertools::Itertools; use reth_db::{open_db, static_file::iter_static_files, tables, transaction::DbTxMut, DatabaseEnv}; +use reth_db_common::init::{insert_genesis_header, insert_genesis_history, insert_genesis_state}; use reth_fs_util as fs; -use reth_node_core::init::{insert_genesis_header, insert_genesis_history, insert_genesis_state}; use reth_primitives::{ stage::StageId, static_file::find_fixed_range, ChainSpec, StaticFileSegment, }; diff --git a/bin/reth/src/utils.rs b/bin/reth/src/utils.rs index 025b059bcef..f312b2d1b77 100644 --- a/bin/reth/src/utils.rs +++ b/bin/reth/src/utils.rs @@ -36,6 +36,9 @@ pub struct DbTool { impl DbTool { /// Takes a DB where the tables have already been created. pub fn new(provider_factory: ProviderFactory, chain: Arc) -> eyre::Result { + // Disable timeout because we are entering a TUI which might read for a long time. We + // disable on the [`DbTool`] level since it's only used in the CLI. + provider_factory.provider()?.disable_long_read_transaction_safety(); Ok(Self { provider_factory, chain }) } diff --git a/book/cli/help.py b/book/cli/help.py index 26ce5e69198..3f40a5e0b56 100755 --- a/book/cli/help.py +++ b/book/cli/help.py @@ -262,9 +262,13 @@ def preprocess_help(s: str): "default: ", s, ) - # Remove the commit SHA and target architecture triple + # Remove the commit SHA and target architecture triple or fourth + # rustup available targets: + # aarch64-apple-darwin + # x86_64-unknown-linux-gnu + # x86_64-pc-windows-gnu s = re.sub( - r"default: reth/.*-[0-9A-Fa-f]{6,10}/\w+-\w*-\w+", + r"default: reth/.*-[0-9A-Fa-f]{6,10}/([_\w]+)-(\w+)-(\w+)(-\w+)?", "default: reth/-/", s, ) @@ -275,6 +279,14 @@ def preprocess_help(s: str): s, ) + # Remove rpc.max-tracing-requests default value + s = re.sub( + r"(rpc.max-tracing-requests \n.*\n.*\n.*)\[default: \d+\]", + r"\1[default: ]", + s, + flags=re.MULTILINE, + ) + return s diff --git a/book/cli/reth/db/checksum.md b/book/cli/reth/db/checksum.md index a8147b04a4d..66f0a86a957 100644 --- a/book/cli/reth/db/checksum.md +++ b/book/cli/reth/db/checksum.md @@ -22,6 +22,9 @@ Options: [default: default] + --start-key + The start of the range to checksum + --chain The chain this node is running. Possible values are either a built-in chain or the path to a chain specification file. @@ -31,6 +34,12 @@ Options: [default: mainnet] + --end-key + The end of the range to checksum + + --limit + The maximum number of records that are queried and used to compute the checksum + --instance Add a new instance of a node. diff --git a/book/cli/reth/node.md b/book/cli/reth/node.md index c73b7dd32e6..999601f044b 100644 --- a/book/cli/reth/node.md +++ b/book/cli/reth/node.md @@ -287,7 +287,7 @@ RPC: --rpc.max-tracing-requests Maximum number of concurrent tracing requests - [default: 6] + [default: ] --rpc.max-blocks-per-filter Maximum number of blocks that could be scanned per filter request. (0 = entire chain) diff --git a/book/run/optimism.md b/book/run/optimism.md index 8a5392d631e..004cc8abb75 100644 --- a/book/run/optimism.md +++ b/book/run/optimism.md @@ -92,7 +92,7 @@ op-node \ --l1.trustrpc ``` -If you opted to build the `op-node` with the `rethdb` build tag, this "`RPCKind`" can be enabled via appending two extra flags to the `op-node` invocation: +If you opted to build the `op-node` with the `rethdb` build tag, this feature can be enabled by appending one extra flag to the `op-node` invocation: > Note, the `reth_db_path` is the path to the `db` folder inside of the reth datadir, not the `mdbx.dat` file itself. This can be fetched from `op-reth db path [--chain ]`, or if you are using a custom datadir location via the `--datadir` flag, > by appending `/db` to the end of the path. @@ -100,7 +100,6 @@ If you opted to build the `op-node` with the `rethdb` build tag, this "`RPCKind` ```sh op-node \ # ... - --l1.rpckind=reth_db \ --l1.rethdb= ``` diff --git a/book/run/private-testnet.md b/book/run/private-testnet.md index 958d769e348..c85ff7d5478 100644 --- a/book/run/private-testnet.md +++ b/book/run/private-testnet.md @@ -35,6 +35,10 @@ First, in your home directory, create a file with the name `network_params.json` "launch_additional_services": false } ``` + +> [!TIP] +> If you would like to use a modified reth node, you can build an image locally with a custom tag. The tag can then be used in the `el_image` field in the `network_params.json` file. + ### Step 2: Spin up your network Next, run the following command from your command line: diff --git a/crates/blockchain-tree/Cargo.toml b/crates/blockchain-tree/Cargo.toml index dc9e13866e3..1b8a53394b6 100644 --- a/crates/blockchain-tree/Cargo.toml +++ b/crates/blockchain-tree/Cargo.toml @@ -14,6 +14,8 @@ workspace = true # reth reth-primitives.workspace = true reth-interfaces.workspace = true +reth-storage-errors.workspace = true +reth-execution-errors.workspace = true reth-db.workspace = true reth-evm.workspace = true reth-revm.workspace = true @@ -50,4 +52,4 @@ assert_matches.workspace = true [features] test-utils = [] -optimism = ["reth-primitives/optimism", "reth-interfaces/optimism", "reth-provider/optimism"] +optimism = ["reth-primitives/optimism", "reth-provider/optimism"] diff --git a/crates/blockchain-tree/src/blockchain_tree.rs b/crates/blockchain-tree/src/blockchain_tree.rs index 2a0bfb8bae6..c031a5749bf 100644 --- a/crates/blockchain-tree/src/blockchain_tree.rs +++ b/crates/blockchain-tree/src/blockchain_tree.rs @@ -8,14 +8,10 @@ use crate::{ use reth_consensus::{Consensus, ConsensusError}; use reth_db::database::Database; use reth_evm::execute::BlockExecutorProvider; -use reth_interfaces::{ - blockchain_tree::{ - error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, - BlockAttachment, BlockStatus, BlockValidationKind, CanonicalOutcome, InsertPayloadOk, - }, - executor::{BlockExecutionError, BlockValidationError}, - provider::RootMismatch, - RethResult, +use reth_execution_errors::{BlockExecutionError, BlockValidationError}; +use reth_interfaces::blockchain_tree::{ + error::{BlockchainTreeError, CanonicalError, InsertBlockError, InsertBlockErrorKind}, + BlockAttachment, BlockStatus, BlockValidationKind, CanonicalOutcome, InsertPayloadOk, }; use reth_primitives::{ BlockHash, BlockNumHash, BlockNumber, ForkBlock, GotExpected, Hardfork, PruneModes, Receipt, @@ -29,6 +25,7 @@ use reth_provider::{ StaticFileProviderFactory, }; use reth_stages_api::{MetricEvent, MetricEventsSender}; +use reth_storage_errors::provider::{ProviderResult, RootMismatch}; use std::{ collections::{btree_map::Entry, BTreeMap, HashSet}, sync::Arc, @@ -120,7 +117,7 @@ where externals: TreeExternals, config: BlockchainTreeConfig, prune_modes: Option, - ) -> RethResult { + ) -> ProviderResult { let max_reorg_depth = config.max_reorg_depth() as usize; // The size of the broadcast is twice the maximum reorg depth, because at maximum reorg // depth at least N blocks must be sent at once. @@ -307,7 +304,7 @@ where *key_value.0 } else { debug!(target: "blockchain_tree", ?chain_id, "No blockhashes stored"); - return None; + return None }; let canonical_chain = canonical_chain .iter() @@ -730,7 +727,7 @@ where return Err(e) } - if let Err(e) = self.externals.consensus.validate_block(block) { + if let Err(e) = self.externals.consensus.validate_block_pre_execution(block) { error!(?block, "Failed to validate block {}: {e}", block.header.hash()); return Err(e) } @@ -843,7 +840,7 @@ where pub fn connect_buffered_blocks_to_canonical_hashes_and_finalize( &mut self, last_finalized_block: BlockNumber, - ) -> RethResult<()> { + ) -> ProviderResult<()> { self.finalize_block(last_finalized_block); let last_canonical_hashes = self.update_block_hashes()?; @@ -855,7 +852,7 @@ where /// Update all block hashes. iterate over present and new list of canonical hashes and compare /// them. Remove all mismatches, disconnect them and removes all chains. - pub fn update_block_hashes(&mut self) -> RethResult> { + pub fn update_block_hashes(&mut self) -> ProviderResult> { let last_canonical_hashes = self .externals .fetch_latest_canonical_hashes(self.config.num_of_canonical_hashes() as usize)?; @@ -878,7 +875,7 @@ where /// blocks before the tip. pub fn update_block_hashes_and_clear_buffered( &mut self, - ) -> RethResult> { + ) -> ProviderResult> { let chain = self.update_block_hashes()?; if let Some((block, _)) = chain.last_key_value() { @@ -893,7 +890,7 @@ where /// /// `N` is the maximum of `max_reorg_depth` and the number of block hashes needed to satisfy the /// `BLOCKHASH` opcode in the EVM. - pub fn connect_buffered_blocks_to_canonical_hashes(&mut self) -> RethResult<()> { + pub fn connect_buffered_blocks_to_canonical_hashes(&mut self) -> ProviderResult<()> { let last_canonical_hashes = self .externals .fetch_latest_canonical_hashes(self.config.num_of_canonical_hashes() as usize)?; @@ -905,7 +902,7 @@ where fn connect_buffered_blocks_to_hashes( &mut self, hashes: impl IntoIterator>, - ) -> RethResult<()> { + ) -> ProviderResult<()> { // check unconnected block buffer for children of the canonical hashes for added_block in hashes.into_iter() { self.try_connect_buffered_blocks(added_block.into()) @@ -1264,7 +1261,7 @@ where } /// Unwind tables and put it inside state - pub fn unwind(&mut self, unwind_to: BlockNumber) -> RethResult<()> { + pub fn unwind(&mut self, unwind_to: BlockNumber) -> Result<(), CanonicalError> { // nothing to be done if unwind_to is higher then the tip if self.block_indices().canonical_tip().number <= unwind_to { return Ok(()) diff --git a/crates/blockchain-tree/src/chain.rs b/crates/blockchain-tree/src/chain.rs index ce6487a060b..e73b1757666 100644 --- a/crates/blockchain-tree/src/chain.rs +++ b/crates/blockchain-tree/src/chain.rs @@ -8,12 +8,10 @@ use crate::BundleStateDataRef; use reth_consensus::{Consensus, ConsensusError}; use reth_db::database::Database; use reth_evm::execute::{BlockExecutionOutput, BlockExecutorProvider, Executor}; -use reth_interfaces::{ - blockchain_tree::{ - error::{BlockchainTreeError, InsertBlockErrorKind}, - BlockAttachment, BlockValidationKind, - }, - RethResult, +use reth_execution_errors::BlockExecutionError; +use reth_interfaces::blockchain_tree::{ + error::{BlockchainTreeError, InsertBlockErrorKind}, + BlockAttachment, BlockValidationKind, }; use reth_primitives::{ BlockHash, BlockNumber, ForkBlock, GotExpected, Receipts, SealedBlockWithSenders, SealedHeader, @@ -176,7 +174,7 @@ impl AppendableChain { externals: &TreeExternals, block_attachment: BlockAttachment, block_validation_kind: BlockValidationKind, - ) -> RethResult<(BundleStateWithReceipts, Option)> + ) -> Result<(BundleStateWithReceipts, Option), BlockExecutionError> where BSDP: FullBundleStateDataProvider, DB: Database + Clone, @@ -210,8 +208,11 @@ impl AppendableChain { let executor = externals.executor_factory.executor(db); let block_hash = block.hash(); let block = block.unseal(); + let state = executor.execute((&block, U256::MAX).into())?; let BlockExecutionOutput { state, receipts, .. } = state; + externals.consensus.validate_block_post_execution(&block, &receipts)?; + let bundle_state = BundleStateWithReceipts::new( state, Receipts::from_block_receipt(receipts), diff --git a/crates/blockchain-tree/src/externals.rs b/crates/blockchain-tree/src/externals.rs index a311281c942..439b9d4a9b0 100644 --- a/crates/blockchain-tree/src/externals.rs +++ b/crates/blockchain-tree/src/externals.rs @@ -4,9 +4,9 @@ use reth_consensus::Consensus; use reth_db::{ cursor::DbCursorRO, database::Database, static_file::HeaderMask, tables, transaction::DbTx, }; -use reth_interfaces::RethResult; use reth_primitives::{BlockHash, BlockNumber, StaticFileSegment}; use reth_provider::{ProviderFactory, StaticFileProviderFactory, StatsReader}; +use reth_storage_errors::provider::ProviderResult; use std::{collections::BTreeMap, sync::Arc}; /// A container for external components. @@ -46,7 +46,7 @@ impl TreeExternals { pub(crate) fn fetch_latest_canonical_hashes( &self, num_hashes: usize, - ) -> RethResult> { + ) -> ProviderResult> { // Fetch the latest canonical hashes from the database let mut hashes = self .provider_factory diff --git a/crates/blockchain-tree/src/shareable.rs b/crates/blockchain-tree/src/shareable.rs index 66f76b0916f..624dfd0e3af 100644 --- a/crates/blockchain-tree/src/shareable.rs +++ b/crates/blockchain-tree/src/shareable.rs @@ -80,7 +80,7 @@ where let res = tree.connect_buffered_blocks_to_canonical_hashes_and_finalize(last_finalized_block); tree.update_chains_metrics(); - res + Ok(res?) } fn update_block_hashes_and_clear_buffered( @@ -89,7 +89,7 @@ where let mut tree = self.tree.write(); let res = tree.update_block_hashes_and_clear_buffered(); tree.update_chains_metrics(); - res + Ok(res?) } fn connect_buffered_blocks_to_canonical_hashes(&self) -> RethResult<()> { @@ -97,7 +97,7 @@ where let mut tree = self.tree.write(); let res = tree.connect_buffered_blocks_to_canonical_hashes(); tree.update_chains_metrics(); - res + Ok(res?) } fn make_canonical(&self, block_hash: BlockHash) -> Result { diff --git a/crates/config/Cargo.toml b/crates/config/Cargo.toml index d9147d7b7a7..9e9aa48064c 100644 --- a/crates/config/Cargo.toml +++ b/crates/config/Cargo.toml @@ -13,17 +13,12 @@ workspace = true [dependencies] # reth reth-network.workspace = true -reth-net-nat.workspace = true -reth-discv4.workspace = true reth-primitives.workspace = true # serde serde.workspace = true humantime-serde.workspace = true -# crypto -secp256k1 = { workspace = true, features = ["global-context", "rand-std", "recovery"] } - # toml confy.workspace = true diff --git a/crates/config/src/config.rs b/crates/config/src/config.rs index aa8b7ee09ab..7847ae202e3 100644 --- a/crates/config/src/config.rs +++ b/crates/config/src/config.rs @@ -1,9 +1,7 @@ //! Configuration files. -use reth_discv4::Discv4Config; -use reth_network::{NetworkConfigBuilder, PeersConfig, SessionsConfig}; +use reth_network::{PeersConfig, SessionsConfig}; use reth_primitives::PruneModes; -use secp256k1::SecretKey; use serde::{Deserialize, Deserializer, Serialize}; use std::{ ffi::OsStr, @@ -30,25 +28,17 @@ pub struct Config { } impl Config { - /// Initializes network config from read data - pub fn network_config( + /// Returns the [PeersConfig] for the node. + /// + /// If a peers file is provided, the basic nodes from the file are added to the configuration. + pub fn peers_config_with_basic_nodes_from_file( &self, - nat_resolution_method: reth_net_nat::NatResolver, - peers_file: Option, - secret_key: SecretKey, - ) -> NetworkConfigBuilder { - let peer_config = self - .peers + peers_file: Option<&Path>, + ) -> PeersConfig { + self.peers .clone() .with_basic_nodes_from_file(peers_file) - .unwrap_or_else(|_| self.peers.clone()); - - let discv4 = - Discv4Config::builder().external_ip_resolver(Some(nat_resolution_method)).clone(); - NetworkConfigBuilder::new(secret_key) - .sessions_config(self.sessions.clone()) - .peer_config(peer_config) - .discovery(discv4) + .unwrap_or_else(|_| self.peers.clone()) } /// Save the configuration to toml file. @@ -57,7 +47,7 @@ impl Config { return Err(std::io::Error::new( std::io::ErrorKind::InvalidInput, format!("reth config file extension must be '{EXTENSION}'"), - )); + )) } confy::store_path(path, self).map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e)) } diff --git a/crates/config/src/lib.rs b/crates/config/src/lib.rs index 362e814632e..1e81e18ec42 100644 --- a/crates/config/src/lib.rs +++ b/crates/config/src/lib.rs @@ -5,6 +5,7 @@ html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" )] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] pub mod config; diff --git a/crates/consensus/auto-seal/Cargo.toml b/crates/consensus/auto-seal/Cargo.toml index 435ade53db3..ccbc1e06a32 100644 --- a/crates/consensus/auto-seal/Cargo.toml +++ b/crates/consensus/auto-seal/Cargo.toml @@ -25,6 +25,7 @@ reth-engine-primitives.workspace = true reth-consensus.workspace = true reth-rpc-types.workspace = true reth-network-types.workspace = true +reth-tokio-util.workspace = true # async futures-util.workspace = true diff --git a/crates/consensus/auto-seal/src/lib.rs b/crates/consensus/auto-seal/src/lib.rs index e954108c8c4..f318b7adea4 100644 --- a/crates/consensus/auto-seal/src/lib.rs +++ b/crates/consensus/auto-seal/src/lib.rs @@ -22,8 +22,9 @@ use reth_interfaces::executor::{BlockExecutionError, BlockValidationError}; use reth_primitives::{ constants::{EMPTY_TRANSACTIONS, ETHEREUM_BLOCK_GAS_LIMIT}, eip4844::calculate_excess_blob_gas, - proofs, Block, BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, ChainSpec, Header, - Receipts, SealedBlock, SealedHeader, TransactionSigned, Withdrawals, B256, U256, + proofs, Block, BlockBody, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, + ChainSpec, Header, Receipt, Receipts, SealedBlock, SealedHeader, TransactionSigned, + Withdrawals, B256, U256, }; use reth_provider::{ BlockReaderIdExt, BundleStateWithReceipts, CanonStateNotificationSender, StateProviderFactory, @@ -84,7 +85,15 @@ impl Consensus for AutoSealConsensus { Ok(()) } - fn validate_block(&self, _block: &SealedBlock) -> Result<(), ConsensusError> { + fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), ConsensusError> { + Ok(()) + } + + fn validate_block_post_execution( + &self, + _block: &BlockWithSenders, + _receipts: &[Receipt], + ) -> Result<(), ConsensusError> { Ok(()) } } @@ -361,7 +370,7 @@ impl StorageInner { let header = self.build_header_template(&transactions, &ommers, withdrawals.as_ref(), chain_spec); - let mut block = Block { + let block = Block { header, body: transactions, ommers: ommers.clone(), @@ -376,27 +385,7 @@ impl StorageInner { provider.latest().map_err(BlockExecutionError::LatestBlock)?, ); - // TODO(mattsse): At this point we don't know certain fields of the header, so we first - // execute it and then update the header this can be improved by changing the executor - // input, for now we intercept the errors and retry - loop { - match executor.executor(&mut db).execute((&block, U256::ZERO).into()) { - Err(BlockExecutionError::Validation(BlockValidationError::BlockGasUsed { - gas, - .. - })) => { - block.block.header.gas_used = gas.got; - } - Err(BlockExecutionError::Validation(BlockValidationError::ReceiptRootDiff( - err, - ))) => { - block.block.header.receipts_root = err.got; - } - _ => break, - }; - } - - // now execute the block + // execute the block let BlockExecutionOutput { state, receipts, .. } = executor.executor(&mut db).execute((&block, U256::ZERO).into())?; let bundle_state = BundleStateWithReceipts::new( diff --git a/crates/consensus/auto-seal/src/task.rs b/crates/consensus/auto-seal/src/task.rs index 42f1268f331..2a5ec4433e4 100644 --- a/crates/consensus/auto-seal/src/task.rs +++ b/crates/consensus/auto-seal/src/task.rs @@ -9,6 +9,7 @@ use reth_primitives::{ use reth_provider::{CanonChainTracker, CanonStateNotificationSender, Chain, StateProviderFactory}; use reth_rpc_types::engine::ForkchoiceState; use reth_stages_api::PipelineEvent; +use reth_tokio_util::EventStream; use reth_transaction_pool::{TransactionPool, ValidPoolTransaction}; use std::{ collections::VecDeque, @@ -18,7 +19,6 @@ use std::{ task::{Context, Poll}, }; use tokio::sync::{mpsc::UnboundedSender, oneshot}; -use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::{debug, error, warn}; /// A Future that listens for new ready transactions and puts new blocks into storage @@ -30,7 +30,7 @@ pub struct MiningTask>>>, + insert_task: Option>>>, /// Shared storage to insert new blocks storage: Storage, /// Pool where transactions are stored @@ -42,7 +42,7 @@ pub struct MiningTask>, + pipe_line_events: Option>, /// The type used for block execution block_executor: Executor, } @@ -80,7 +80,7 @@ impl } /// Sets the pipeline events to listen on. - pub fn set_pipeline_events(&mut self, events: UnboundedReceiverStream) { + pub fn set_pipeline_events(&mut self, events: EventStream) { self.pipe_line_events = Some(events); } } diff --git a/crates/consensus/beacon/Cargo.toml b/crates/consensus/beacon/Cargo.toml index 659ef02c175..a5cef8e3427 100644 --- a/crates/consensus/beacon/Cargo.toml +++ b/crates/consensus/beacon/Cargo.toml @@ -67,7 +67,6 @@ assert_matches.workspace = true [features] optimism = [ "reth-primitives/optimism", - "reth-interfaces/optimism", "reth-provider/optimism", "reth-blockchain-tree/optimism", "reth-ethereum-consensus/optimism", diff --git a/crates/consensus/beacon/src/engine/handle.rs b/crates/consensus/beacon/src/engine/handle.rs index 121a8fac070..bec289bf4a7 100644 --- a/crates/consensus/beacon/src/engine/handle.rs +++ b/crates/consensus/beacon/src/engine/handle.rs @@ -10,28 +10,20 @@ use reth_interfaces::RethResult; use reth_rpc_types::engine::{ CancunPayloadFields, ExecutionPayload, ForkchoiceState, ForkchoiceUpdated, PayloadStatus, }; -use tokio::sync::{mpsc, mpsc::UnboundedSender, oneshot}; -use tokio_stream::wrappers::UnboundedReceiverStream; +use reth_tokio_util::{EventSender, EventStream}; +use tokio::sync::{mpsc::UnboundedSender, oneshot}; /// A _shareable_ beacon consensus frontend type. Used to interact with the spawned beacon consensus /// engine task. /// /// See also `BeaconConsensusEngine` -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct BeaconConsensusEngineHandle where Engine: EngineTypes, { pub(crate) to_engine: UnboundedSender>, -} - -impl Clone for BeaconConsensusEngineHandle -where - Engine: EngineTypes, -{ - fn clone(&self) -> Self { - Self { to_engine: self.to_engine.clone() } - } + event_sender: EventSender, } // === impl BeaconConsensusEngineHandle === @@ -41,8 +33,11 @@ where Engine: EngineTypes, { /// Creates a new beacon consensus engine handle. - pub fn new(to_engine: UnboundedSender>) -> Self { - Self { to_engine } + pub fn new( + to_engine: UnboundedSender>, + event_sender: EventSender, + ) -> Self { + Self { to_engine, event_sender } } /// Sends a new payload message to the beacon consensus engine and waits for a response. @@ -97,9 +92,7 @@ where } /// Creates a new [`BeaconConsensusEngineEvent`] listener stream. - pub fn event_listener(&self) -> UnboundedReceiverStream { - let (tx, rx) = mpsc::unbounded_channel(); - let _ = self.to_engine.send(BeaconEngineMessage::EventListener(tx)); - UnboundedReceiverStream::new(rx) + pub fn event_listener(&self) -> EventStream { + self.event_sender.new_listener() } } diff --git a/crates/consensus/beacon/src/engine/hooks/prune.rs b/crates/consensus/beacon/src/engine/hooks/prune.rs index a9bb4f05bd4..d2c2e2d33a1 100644 --- a/crates/consensus/beacon/src/engine/hooks/prune.rs +++ b/crates/consensus/beacon/src/engine/hooks/prune.rs @@ -78,10 +78,10 @@ impl PruneHook { /// This will try to spawn the pruner if it is idle: /// 1. Check if pruning is needed through [Pruner::is_pruning_needed]. - /// 2. - /// 1. If pruning is needed, pass tip block number to the [Pruner::run] and spawn it in a - /// separate task. Set pruner state to [PrunerState::Running]. - /// 2. If pruning is not needed, set pruner state back to [PrunerState::Idle]. + /// + /// 2.1. If pruning is needed, pass tip block number to the [Pruner::run] and spawn it in a + /// separate task. Set pruner state to [PrunerState::Running]. + /// 2.2. If pruning is not needed, set pruner state back to [PrunerState::Idle]. /// /// If pruner is already running, do nothing. fn try_spawn_pruner(&mut self, tip_block_number: BlockNumber) -> Option { diff --git a/crates/consensus/beacon/src/engine/hooks/static_file.rs b/crates/consensus/beacon/src/engine/hooks/static_file.rs index 2cff68e1d26..3786e29f87f 100644 --- a/crates/consensus/beacon/src/engine/hooks/static_file.rs +++ b/crates/consensus/beacon/src/engine/hooks/static_file.rs @@ -71,13 +71,13 @@ impl StaticFileHook { /// 1. Check if producing static files is needed through /// [StaticFileProducer::get_static_file_targets](reth_static_file::StaticFileProducerInner::get_static_file_targets) /// and then [StaticFileTargets::any](reth_static_file::StaticFileTargets::any). - /// 2. - /// 1. If producing static files is needed, pass static file request to the - /// [StaticFileProducer::run](reth_static_file::StaticFileProducerInner::run) and spawn - /// it in a separate task. Set static file producer state to - /// [StaticFileProducerState::Running]. - /// 2. If producing static files is not needed, set static file producer state back to - /// [StaticFileProducerState::Idle]. + /// + /// 2.1. If producing static files is needed, pass static file request to the + /// [StaticFileProducer::run](reth_static_file::StaticFileProducerInner::run) and + /// spawn it in a separate task. Set static file producer state to + /// [StaticFileProducerState::Running]. + /// 2.2. If producing static files is not needed, set static file producer state back to + /// [StaticFileProducerState::Idle]. /// /// If static_file_producer is already running, do nothing. fn try_spawn_static_file_producer( @@ -91,8 +91,7 @@ impl StaticFileHook { return Ok(None) }; - let Some(mut locked_static_file_producer) = static_file_producer.try_lock_arc() - else { + let Some(locked_static_file_producer) = static_file_producer.try_lock_arc() else { trace!(target: "consensus::engine::hooks::static_file", "StaticFileProducer lock is already taken"); return Ok(None) }; diff --git a/crates/consensus/beacon/src/engine/message.rs b/crates/consensus/beacon/src/engine/message.rs index f9f1a84d46f..108dab41eb0 100644 --- a/crates/consensus/beacon/src/engine/message.rs +++ b/crates/consensus/beacon/src/engine/message.rs @@ -1,7 +1,4 @@ -use crate::{ - engine::{error::BeaconOnNewPayloadError, forkchoice::ForkchoiceStatus}, - BeaconConsensusEngineEvent, -}; +use crate::engine::{error::BeaconOnNewPayloadError, forkchoice::ForkchoiceStatus}; use futures::{future::Either, FutureExt}; use reth_engine_primitives::EngineTypes; use reth_interfaces::RethResult; @@ -15,7 +12,7 @@ use std::{ pin::Pin, task::{ready, Context, Poll}, }; -use tokio::sync::{mpsc::UnboundedSender, oneshot}; +use tokio::sync::oneshot; /// Represents the outcome of forkchoice update. /// @@ -162,6 +159,4 @@ pub enum BeaconEngineMessage { }, /// Message with exchanged transition configuration. TransitionConfigurationExchanged, - /// Add a new listener for [`BeaconEngineMessage`]. - EventListener(UnboundedSender), } diff --git a/crates/consensus/beacon/src/engine/mod.rs b/crates/consensus/beacon/src/engine/mod.rs index 1057457c779..5f7f583902d 100644 --- a/crates/consensus/beacon/src/engine/mod.rs +++ b/crates/consensus/beacon/src/engine/mod.rs @@ -29,7 +29,7 @@ use reth_rpc_types::engine::{ }; use reth_stages_api::{ControlFlow, Pipeline}; use reth_tasks::TaskSpawner; -use reth_tokio_util::EventListeners; +use reth_tokio_util::EventSender; use std::{ pin::Pin, sync::Arc, @@ -202,8 +202,8 @@ where /// be used to download and execute the missing blocks. pipeline_run_threshold: u64, hooks: EngineHooksController, - /// Listeners for engine events. - listeners: EventListeners, + /// Sender for engine events. + event_sender: EventSender, /// Consensus engine metrics. metrics: EngineMetrics, } @@ -282,8 +282,8 @@ where engine_message_stream: BoxStream<'static, BeaconEngineMessage>, hooks: EngineHooks, ) -> RethResult<(Self, BeaconConsensusEngineHandle)> { - let handle = BeaconConsensusEngineHandle { to_engine }; - let listeners = EventListeners::default(); + let event_sender = EventSender::default(); + let handle = BeaconConsensusEngineHandle::new(to_engine, event_sender.clone()); let sync = EngineSyncController::new( pipeline, client, @@ -291,7 +291,7 @@ where run_pipeline_continuously, max_block, blockchain.chain_spec(), - listeners.clone(), + event_sender.clone(), ); let mut this = Self { sync, @@ -306,7 +306,7 @@ where blockchain_tree_action: None, pipeline_run_threshold, hooks: EngineHooksController::new(hooks), - listeners, + event_sender, metrics: EngineMetrics::default(), }; @@ -406,7 +406,7 @@ where if should_update_head { let head = outcome.header(); let _ = self.update_head(head.clone()); - self.listeners.notify(BeaconConsensusEngineEvent::CanonicalChainCommitted( + self.event_sender.notify(BeaconConsensusEngineEvent::CanonicalChainCommitted( Box::new(head.clone()), elapsed, )); @@ -543,7 +543,7 @@ where } // notify listeners about new processed FCU - self.listeners.notify(BeaconConsensusEngineEvent::ForkchoiceUpdated(state, status)); + self.event_sender.notify(BeaconConsensusEngineEvent::ForkchoiceUpdated(state, status)); } /// Check if the pipeline is consistent (all stages have the checkpoint block numbers no less @@ -597,13 +597,6 @@ where self.handle.clone() } - /// Pushes an [UnboundedSender] to the engine's listeners. Also pushes an [UnboundedSender] to - /// the sync controller's listeners. - pub(crate) fn push_listener(&mut self, listener: UnboundedSender) { - self.listeners.push_listener(listener.clone()); - self.sync.push_listener(listener); - } - /// Returns true if the distance from the local tip to the block is greater than the configured /// threshold. /// @@ -710,13 +703,13 @@ where /// If validation fails, the response MUST contain the latest valid hash: /// /// - The block hash of the ancestor of the invalid payload satisfying the following two - /// conditions: + /// conditions: /// - It is fully validated and deemed VALID /// - Any other ancestor of the invalid payload with a higher blockNumber is INVALID /// - 0x0000000000000000000000000000000000000000000000000000000000000000 if the above - /// conditions are satisfied by a PoW block. + /// conditions are satisfied by a PoW block. /// - null if client software cannot determine the ancestor of the invalid payload satisfying - /// the above conditions. + /// the above conditions. fn latest_valid_hash_for_invalid_payload( &mut self, parent_hash: B256, @@ -1110,8 +1103,8 @@ where /// - invalid extra data /// - invalid transactions /// - incorrect hash - /// - the versioned hashes passed with the payload do not exactly match transaction - /// versioned hashes + /// - the versioned hashes passed with the payload do not exactly match transaction versioned + /// hashes /// - the block does not contain blob transactions if it is pre-cancun /// /// This validates the following engine API rule: @@ -1255,7 +1248,7 @@ where } else { BeaconConsensusEngineEvent::ForkBlockAdded(block) }; - self.listeners.notify(event); + self.event_sender.notify(event); PayloadStatusEnum::Valid } InsertPayloadOk::AlreadySeen(BlockStatus::Valid(_)) => { @@ -1429,7 +1422,7 @@ where match make_canonical_result { Ok(outcome) => { if let CanonicalOutcome::Committed { head } = &outcome { - self.listeners.notify(BeaconConsensusEngineEvent::CanonicalChainCommitted( + self.event_sender.notify(BeaconConsensusEngineEvent::CanonicalChainCommitted( Box::new(head.clone()), elapsed, )); @@ -1878,7 +1871,6 @@ where BeaconEngineMessage::TransitionConfigurationExchanged => { this.blockchain.on_transition_configuration_exchanged(); } - BeaconEngineMessage::EventListener(tx) => this.push_listener(tx), } continue } diff --git a/crates/consensus/beacon/src/engine/sync.rs b/crates/consensus/beacon/src/engine/sync.rs index 09c6d208b6e..441c3ce0362 100644 --- a/crates/consensus/beacon/src/engine/sync.rs +++ b/crates/consensus/beacon/src/engine/sync.rs @@ -14,14 +14,14 @@ use reth_interfaces::p2p::{ use reth_primitives::{stage::PipelineTarget, BlockNumber, ChainSpec, SealedBlock, B256}; use reth_stages_api::{ControlFlow, Pipeline, PipelineError, PipelineWithResult}; use reth_tasks::TaskSpawner; -use reth_tokio_util::EventListeners; +use reth_tokio_util::EventSender; use std::{ cmp::{Ordering, Reverse}, collections::{binary_heap::PeekMut, BinaryHeap}, sync::Arc, task::{ready, Context, Poll}, }; -use tokio::sync::{mpsc::UnboundedSender, oneshot}; +use tokio::sync::oneshot; use tracing::trace; /// Manages syncing under the control of the engine. @@ -49,8 +49,8 @@ where inflight_full_block_requests: Vec>, /// In-flight full block _range_ requests in progress. inflight_block_range_requests: Vec>, - /// Listeners for engine events. - listeners: EventListeners, + /// Sender for engine events. + event_sender: EventSender, /// Buffered blocks from downloads - this is a min-heap of blocks, using the block number for /// ordering. This means the blocks will be popped from the heap with ascending block numbers. range_buffered_blocks: BinaryHeap>, @@ -76,7 +76,7 @@ where run_pipeline_continuously: bool, max_block: Option, chain_spec: Arc, - listeners: EventListeners, + event_sender: EventSender, ) -> Self { Self { full_block_client: FullBlockClient::new( @@ -90,7 +90,7 @@ where inflight_block_range_requests: Vec::new(), range_buffered_blocks: BinaryHeap::new(), run_pipeline_continuously, - listeners, + event_sender, max_block, metrics: EngineSyncMetrics::default(), } @@ -127,11 +127,6 @@ where self.run_pipeline_continuously } - /// Pushes an [UnboundedSender] to the sync controller's listeners. - pub(crate) fn push_listener(&mut self, listener: UnboundedSender) { - self.listeners.push_listener(listener); - } - /// Returns `true` if a pipeline target is queued and will be triggered on the next `poll`. #[allow(dead_code)] pub(crate) fn is_pipeline_sync_pending(&self) -> bool { @@ -169,7 +164,7 @@ where ); // notify listeners that we're downloading a block range - self.listeners.notify(BeaconConsensusEngineEvent::LiveSyncProgress( + self.event_sender.notify(BeaconConsensusEngineEvent::LiveSyncProgress( ConsensusEngineLiveSyncProgress::DownloadingBlocks { remaining_blocks: count, target: hash, @@ -198,7 +193,7 @@ where ); // notify listeners that we're downloading a block - self.listeners.notify(BeaconConsensusEngineEvent::LiveSyncProgress( + self.event_sender.notify(BeaconConsensusEngineEvent::LiveSyncProgress( ConsensusEngineLiveSyncProgress::DownloadingBlocks { remaining_blocks: 1, target: hash, diff --git a/crates/consensus/common/src/validation.rs b/crates/consensus/common/src/validation.rs index 8a3d9588e92..ffa48e77152 100644 --- a/crates/consensus/common/src/validation.rs +++ b/crates/consensus/common/src/validation.rs @@ -61,7 +61,7 @@ pub fn validate_header_standalone( /// - Compares the transactions root in the block header to the block body /// - Pre-execution transaction validation /// - (Optionally) Compares the receipts root in the block header to the block body -pub fn validate_block_standalone( +pub fn validate_block_pre_execution( block: &SealedBlock, chain_spec: &ChainSpec, ) -> Result<(), ConsensusError> { @@ -366,13 +366,13 @@ mod tests { // Single withdrawal let block = create_block_with_withdrawals(&[1]); - assert_eq!(validate_block_standalone(&block, &chain_spec), Ok(())); + assert_eq!(validate_block_pre_execution(&block, &chain_spec), Ok(())); // Multiple increasing withdrawals let block = create_block_with_withdrawals(&[1, 2, 3]); - assert_eq!(validate_block_standalone(&block, &chain_spec), Ok(())); + assert_eq!(validate_block_pre_execution(&block, &chain_spec), Ok(())); let block = create_block_with_withdrawals(&[5, 6, 7, 8, 9]); - assert_eq!(validate_block_standalone(&block, &chain_spec), Ok(())); + assert_eq!(validate_block_pre_execution(&block, &chain_spec), Ok(())); let (_, parent) = mock_block(); // Withdrawal index should be the last withdrawal index + 1 @@ -428,7 +428,7 @@ mod tests { // validate blob, it should fail blob gas used validation assert_eq!( - validate_block_standalone(&block, &chain_spec), + validate_block_pre_execution(&block, &chain_spec), Err(ConsensusError::BlobGasUsedDiff(GotExpected { got: 1, expected: expected_blob_gas_used diff --git a/crates/consensus/consensus/Cargo.toml b/crates/consensus/consensus/Cargo.toml index 308a16f2026..43264872e1f 100644 --- a/crates/consensus/consensus/Cargo.toml +++ b/crates/consensus/consensus/Cargo.toml @@ -18,4 +18,4 @@ auto_impl.workspace = true thiserror.workspace = true [features] -test-utils = [] \ No newline at end of file +test-utils = [] diff --git a/crates/consensus/consensus/src/lib.rs b/crates/consensus/consensus/src/lib.rs index 2dee6b1245e..46fce6d02ee 100644 --- a/crates/consensus/consensus/src/lib.rs +++ b/crates/consensus/consensus/src/lib.rs @@ -9,8 +9,8 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use reth_primitives::{ - BlockHash, BlockNumber, GotExpected, GotExpectedBoxed, Header, HeaderValidationError, - InvalidTransactionError, SealedBlock, SealedHeader, B256, U256, + BlockHash, BlockNumber, BlockWithSenders, Bloom, GotExpected, GotExpectedBoxed, Header, + HeaderValidationError, InvalidTransactionError, Receipt, SealedBlock, SealedHeader, B256, U256, }; use std::fmt::Debug; @@ -83,7 +83,19 @@ pub trait Consensus: Debug + Send + Sync { /// **This should not be called for the genesis block**. /// /// Note: validating blocks does not include other validations of the Consensus - fn validate_block(&self, block: &SealedBlock) -> Result<(), ConsensusError>; + fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError>; + + /// Validate a block considering world state, i.e. things that can not be checked before + /// execution. + /// + /// See the Yellow Paper sections 4.3.2 "Holistic Validity". + /// + /// Note: validating blocks does not include other validations of the Consensus + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + receipts: &[Receipt], + ) -> Result<(), ConsensusError>; } /// Consensus Errors @@ -98,6 +110,15 @@ pub enum ConsensusError { gas_limit: u64, }, + /// Error when block gas used doesn't match expected value + #[error("block gas used mismatch: {gas}; gas spent by each transaction: {gas_spent_by_tx:?}")] + BlockGasUsed { + /// The gas diff. + gas: GotExpected, + /// Gas spent by each transaction + gas_spent_by_tx: Vec<(u64, u64)>, + }, + /// Error when the hash of block ommer is different from the expected hash. #[error("mismatched block ommer hash: {0}")] BodyOmmersHashDiff(GotExpectedBoxed), @@ -111,6 +132,14 @@ pub enum ConsensusError { #[error("mismatched block transaction root: {0}")] BodyTransactionRootDiff(GotExpectedBoxed), + /// Error when the receipt root in the block is different from the expected receipt root. + #[error("receipt root mismatch: {0}")] + BodyReceiptRootDiff(GotExpectedBoxed), + + /// Error when header bloom filter is different from the expected bloom filter. + #[error("header bloom filter mismatch: {0}")] + BodyBloomLogDiff(GotExpectedBoxed), + /// Error when the withdrawals root in the block is different from the expected withdrawals /// root. #[error("mismatched block withdrawals root: {0}")] diff --git a/crates/consensus/consensus/src/test_utils.rs b/crates/consensus/consensus/src/test_utils.rs index a8655661b8c..a616d4f43b8 100644 --- a/crates/consensus/consensus/src/test_utils.rs +++ b/crates/consensus/consensus/src/test_utils.rs @@ -1,5 +1,5 @@ use crate::{Consensus, ConsensusError}; -use reth_primitives::{Header, SealedBlock, SealedHeader, U256}; +use reth_primitives::{BlockWithSenders, Header, Receipt, SealedBlock, SealedHeader, U256}; use std::sync::atomic::{AtomicBool, Ordering}; /// Consensus engine implementation for testing @@ -60,7 +60,19 @@ impl Consensus for TestConsensus { } } - fn validate_block(&self, _block: &SealedBlock) -> Result<(), ConsensusError> { + fn validate_block_pre_execution(&self, _block: &SealedBlock) -> Result<(), ConsensusError> { + if self.fail_validation() { + Err(ConsensusError::BaseFeeMissing) + } else { + Ok(()) + } + } + + fn validate_block_post_execution( + &self, + _block: &BlockWithSenders, + _receipts: &[Receipt], + ) -> Result<(), ConsensusError> { if self.fail_validation() { Err(ConsensusError::BaseFeeMissing) } else { diff --git a/crates/e2e-test-utils/Cargo.toml b/crates/e2e-test-utils/Cargo.toml index 59424cac98f..4165044ae2e 100644 --- a/crates/e2e-test-utils/Cargo.toml +++ b/crates/e2e-test-utils/Cargo.toml @@ -20,6 +20,7 @@ reth-rpc-layer.workspace = true reth-payload-builder = { workspace = true, features = ["test-utils"] } reth-provider.workspace = true reth-node-builder.workspace = true +reth-tokio-util.workspace = true jsonrpsee.workspace = true diff --git a/crates/e2e-test-utils/src/network.rs b/crates/e2e-test-utils/src/network.rs index 92e9b316a9a..5b148b09f55 100644 --- a/crates/e2e-test-utils/src/network.rs +++ b/crates/e2e-test-utils/src/network.rs @@ -1,12 +1,12 @@ use futures_util::StreamExt; use reth::network::{NetworkEvent, NetworkEvents, NetworkHandle, PeersInfo}; use reth_primitives::NodeRecord; +use reth_tokio_util::EventStream; use reth_tracing::tracing::info; -use tokio_stream::wrappers::UnboundedReceiverStream; /// Helper for network operations pub struct NetworkTestContext { - network_events: UnboundedReceiverStream, + network_events: EventStream, network: NetworkHandle, } diff --git a/crates/e2e-test-utils/src/payload.rs b/crates/e2e-test-utils/src/payload.rs index 47f4134d7fe..828bc5f32c4 100644 --- a/crates/e2e-test-utils/src/payload.rs +++ b/crates/e2e-test-utils/src/payload.rs @@ -50,9 +50,9 @@ impl PayloadTestContext { let payload = self.payload_builder.best_payload(payload_id).await.unwrap().unwrap(); if payload.block().body.is_empty() { tokio::time::sleep(std::time::Duration::from_millis(20)).await; - continue; + continue } - break; + break } } diff --git a/crates/ethereum/consensus/Cargo.toml b/crates/ethereum/consensus/Cargo.toml index f3ff5d4d36e..984fb1ec6fc 100644 --- a/crates/ethereum/consensus/Cargo.toml +++ b/crates/ethereum/consensus/Cargo.toml @@ -17,4 +17,4 @@ reth-primitives.workspace = true reth-consensus.workspace = true [features] -optimism = ["reth-primitives/optimism"] \ No newline at end of file +optimism = ["reth-primitives/optimism"] diff --git a/crates/ethereum/consensus/src/lib.rs b/crates/ethereum/consensus/src/lib.rs index ed283f0262a..0264089475a 100644 --- a/crates/ethereum/consensus/src/lib.rs +++ b/crates/ethereum/consensus/src/lib.rs @@ -9,12 +9,18 @@ #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] use reth_consensus::{Consensus, ConsensusError}; -use reth_consensus_common::validation; +use reth_consensus_common::validation::{ + validate_block_pre_execution, validate_header_extradata, validate_header_standalone, +}; use reth_primitives::{ - Chain, ChainSpec, Hardfork, Header, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, U256, + BlockWithSenders, Chain, ChainSpec, Hardfork, Header, Receipt, SealedBlock, SealedHeader, + EMPTY_OMMER_ROOT_HASH, U256, }; use std::{sync::Arc, time::SystemTime}; +mod validation; +pub use validation::validate_block_post_execution; + /// Ethereum beacon consensus /// /// This consensus engine does basic checks as outlined in the execution specs. @@ -33,7 +39,7 @@ impl EthBeaconConsensus { impl Consensus for EthBeaconConsensus { fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { - validation::validate_header_standalone(header, &self.chain_spec)?; + validate_header_standalone(header, &self.chain_spec)?; Ok(()) } @@ -87,7 +93,7 @@ impl Consensus for EthBeaconConsensus { // is greater than its parent timestamp. // validate header extradata for all networks post merge - validation::validate_header_extradata(header)?; + validate_header_extradata(header)?; // mixHash is used instead of difficulty inside EVM // https://eips.ethereum.org/EIPS/eip-4399#using-mixhash-field-instead-of-difficulty @@ -111,14 +117,22 @@ impl Consensus for EthBeaconConsensus { // * If the network is goerli pre-merge, ignore the extradata check, since we do not // support clique. Same goes for OP blocks below Bedrock. if self.chain_spec.chain != Chain::goerli() && !self.chain_spec.is_optimism() { - validation::validate_header_extradata(header)?; + validate_header_extradata(header)?; } } Ok(()) } - fn validate_block(&self, block: &SealedBlock) -> Result<(), ConsensusError> { - validation::validate_block_standalone(block, &self.chain_spec) + fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { + validate_block_pre_execution(block, &self.chain_spec) + } + + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + receipts: &[Receipt], + ) -> Result<(), ConsensusError> { + validate_block_post_execution(block, &self.chain_spec, receipts) } } diff --git a/crates/ethereum/consensus/src/validation.rs b/crates/ethereum/consensus/src/validation.rs new file mode 100644 index 00000000000..11fe54406af --- /dev/null +++ b/crates/ethereum/consensus/src/validation.rs @@ -0,0 +1,81 @@ +use reth_consensus::ConsensusError; +use reth_primitives::{ + gas_spent_by_transactions, BlockWithSenders, Bloom, ChainSpec, GotExpected, Receipt, B256, +}; + +/// Validate a block with regard to execution results: +/// +/// - Compares the receipts root in the block header to the block body +/// - Compares the gas used in the block header to the actual gas usage after execution +pub fn validate_block_post_execution( + block: &BlockWithSenders, + chain_spec: &ChainSpec, + receipts: &[Receipt], +) -> Result<(), ConsensusError> { + // Before Byzantium, receipts contained state root that would mean that expensive + // operation as hashing that is required for state root got calculated in every + // transaction This was replaced with is_success flag. + // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 + if chain_spec.is_byzantium_active_at_block(block.header.number) { + verify_receipts(block.header.receipts_root, block.header.logs_bloom, receipts)?; + } + + // Check if gas used matches the value set in header. + let cumulative_gas_used = + receipts.last().map(|receipt| receipt.cumulative_gas_used).unwrap_or(0); + if block.gas_used != cumulative_gas_used { + return Err(ConsensusError::BlockGasUsed { + gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used }, + gas_spent_by_tx: gas_spent_by_transactions(receipts), + }) + } + + Ok(()) +} + +/// Calculate the receipts root, and compare it against against the expected receipts root and logs +/// bloom. +fn verify_receipts( + expected_receipts_root: B256, + expected_logs_bloom: Bloom, + receipts: &[Receipt], +) -> Result<(), ConsensusError> { + // Calculate receipts root. + let receipts_with_bloom = receipts.iter().map(Receipt::with_bloom_ref).collect::>(); + let receipts_root = reth_primitives::proofs::calculate_receipt_root_ref(&receipts_with_bloom); + + // Calculate header logs bloom. + let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); + + compare_receipts_root_and_logs_bloom( + receipts_root, + logs_bloom, + expected_receipts_root, + expected_logs_bloom, + )?; + + Ok(()) +} + +/// Compare the calculated receipts root with the expected receipts root, also compare +/// the calculated logs bloom with the expected logs bloom. +fn compare_receipts_root_and_logs_bloom( + calculated_receipts_root: B256, + calculated_logs_bloom: Bloom, + expected_receipts_root: B256, + expected_logs_bloom: Bloom, +) -> Result<(), ConsensusError> { + if calculated_receipts_root != expected_receipts_root { + return Err(ConsensusError::BodyReceiptRootDiff( + GotExpected { got: calculated_receipts_root, expected: expected_receipts_root }.into(), + )) + } + + if calculated_logs_bloom != expected_logs_bloom { + return Err(ConsensusError::BodyBloomLogDiff( + GotExpected { got: calculated_logs_bloom, expected: expected_logs_bloom }.into(), + )) + } + + Ok(()) +} diff --git a/crates/ethereum/engine-primitives/src/payload.rs b/crates/ethereum/engine-primitives/src/payload.rs index ed3f484b8d0..6e753dac962 100644 --- a/crates/ethereum/engine-primitives/src/payload.rs +++ b/crates/ethereum/engine-primitives/src/payload.rs @@ -112,7 +112,7 @@ impl From for ExecutionPayloadEnvelopeV3 { let EthBuiltPayload { block, fees, sidecars, .. } = value; ExecutionPayloadEnvelopeV3 { - execution_payload: block_to_payload_v3(block), + execution_payload: block_to_payload_v3(block).0, block_value: fees, // From the engine API spec: // diff --git a/crates/ethereum/evm/Cargo.toml b/crates/ethereum/evm/Cargo.toml index e9f8bc5ad31..88e5967e5d8 100644 --- a/crates/ethereum/evm/Cargo.toml +++ b/crates/ethereum/evm/Cargo.toml @@ -15,14 +15,11 @@ workspace = true reth-evm.workspace = true reth-primitives.workspace = true reth-revm.workspace = true -reth-interfaces.workspace = true +reth-ethereum-consensus.workspace = true # Ethereum revm-primitives.workspace = true -# misc -tracing.workspace = true - [dev-dependencies] reth-revm = { workspace = true, features = ["test-utils"] } alloy-eips.workspace = true diff --git a/crates/ethereum/evm/src/execute.rs b/crates/ethereum/evm/src/execute.rs index 15702ba7508..9998927061b 100644 --- a/crates/ethereum/evm/src/execute.rs +++ b/crates/ethereum/evm/src/execute.rs @@ -2,23 +2,19 @@ use crate::{ dao_fork::{DAO_HARDFORK_BENEFICIARY, DAO_HARDKFORK_ACCOUNTS}, - verify::verify_receipts, EthEvmConfig, }; +use reth_ethereum_consensus::validate_block_post_execution; use reth_evm::{ execute::{ - BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput, - BlockExecutorProvider, Executor, + BatchBlockExecutionOutput, BatchExecutor, BlockExecutionError, BlockExecutionInput, + BlockExecutionOutput, BlockExecutorProvider, BlockValidationError, Executor, ProviderError, }, ConfigureEvm, }; -use reth_interfaces::{ - executor::{BlockExecutionError, BlockValidationError}, - provider::ProviderError, -}; use reth_primitives::{ - BlockNumber, BlockWithSenders, ChainSpec, GotExpected, Hardfork, Header, PruneModes, Receipt, - Receipts, Withdrawals, MAINNET, U256, + BlockNumber, BlockWithSenders, ChainSpec, Hardfork, Header, PruneModes, Receipt, Withdrawals, + MAINNET, U256, }; use reth_revm::{ batch::{BlockBatchRecord, BlockExecutorStats}, @@ -31,7 +27,6 @@ use revm_primitives::{ BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, }; use std::sync::Arc; -use tracing::debug; /// Provides executors to execute regular ethereum blocks #[derive(Debug, Clone)] @@ -187,16 +182,6 @@ where } drop(evm); - // Check if gas used matches the value set in header. - if block.gas_used != cumulative_gas_used { - let receipts = Receipts::from_block_receipt(receipts); - return Err(BlockValidationError::BlockGasUsed { - gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used }, - gas_spent_by_tx: receipts.gas_spent_by_tx()?, - } - .into()) - } - Ok((receipts, cumulative_gas_used)) } } @@ -260,8 +245,8 @@ where /// /// Returns the receipts of the transactions in the block and the total gas used. /// - /// Returns an error if execution fails or receipt verification fails. - fn execute_and_verify( + /// Returns an error if execution fails. + fn execute_without_verification( &mut self, block: &BlockWithSenders, total_difficulty: U256, @@ -280,21 +265,6 @@ where // 3. apply post execution changes self.post_execution(block, total_difficulty)?; - // Before Byzantium, receipts contained state root that would mean that expensive - // operation as hashing that is required for state root got calculated in every - // transaction This was replaced with is_success flag. - // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 - if self.chain_spec().is_byzantium_active_at_block(block.header.number) { - if let Err(error) = verify_receipts( - block.header.receipts_root, - block.header.logs_bloom, - receipts.iter(), - ) { - debug!(target: "evm", %error, ?receipts, "receipts verification failed"); - return Err(error) - }; - } - Ok((receipts, gas_used)) } @@ -363,7 +333,7 @@ where /// State changes are committed to the database. fn execute(mut self, input: Self::Input<'_>) -> Result { let BlockExecutionInput { block, total_difficulty } = input; - let (receipts, gas_used) = self.execute_and_verify(block, total_difficulty)?; + let (receipts, gas_used) = self.execute_without_verification(block, total_difficulty)?; // NOTE: we need to merge keep the reverts for the bundle retention self.state.merge_transitions(BundleRetention::Reverts); @@ -403,9 +373,12 @@ where type Output = BatchBlockExecutionOutput; type Error = BlockExecutionError; - fn execute_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { + fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { let BlockExecutionInput { block, total_difficulty } = input; - let (receipts, _gas_used) = self.executor.execute_and_verify(block, total_difficulty)?; + let (receipts, _gas_used) = + self.executor.execute_without_verification(block, total_difficulty)?; + + validate_block_post_execution(block, self.executor.chain_spec(), &receipts)?; // prepare the state according to the prune mode let retention = self.batch_record.bundle_retention(block.number); @@ -523,7 +496,7 @@ mod tests { // Now execute a block with the fixed header, ensure that it does not fail executor - .execute_and_verify( + .execute_without_verification( &BlockWithSenders { block: Block { header: header.clone(), @@ -634,7 +607,7 @@ mod tests { // attempt to execute an empty block with parent beacon block root, this should not fail executor - .execute_and_verify( + .execute_without_verification( &BlockWithSenders { block: Block { header, body: vec![], ommers: vec![], withdrawals: None }, senders: vec![], @@ -672,7 +645,7 @@ mod tests { // attempt to execute the genesis block with non-zero parent beacon block root, expect err header.parent_beacon_block_root = Some(B256::with_last_byte(0x69)); let _err = executor - .execute_one( + .execute_and_verify_one( ( &BlockWithSenders { block: Block { @@ -698,7 +671,7 @@ mod tests { // now try to process the genesis block again, this time ensuring that a system contract // call does not occur executor - .execute_one( + .execute_and_verify_one( ( &BlockWithSenders { block: Block { header, body: vec![], ommers: vec![], withdrawals: None }, @@ -752,7 +725,7 @@ mod tests { // Now execute a block with the fixed header, ensure that it does not fail executor - .execute_one( + .execute_and_verify_one( ( &BlockWithSenders { block: Block { diff --git a/crates/ethereum/evm/src/lib.rs b/crates/ethereum/evm/src/lib.rs index 7799cf4107e..9e5db6bc25d 100644 --- a/crates/ethereum/evm/src/lib.rs +++ b/crates/ethereum/evm/src/lib.rs @@ -16,7 +16,6 @@ use reth_primitives::{ }; use reth_revm::{Database, EvmBuilder}; pub mod execute; -pub mod verify; /// Ethereum DAO hardfork state change data. pub mod dao_fork; diff --git a/crates/ethereum/evm/src/verify.rs b/crates/ethereum/evm/src/verify.rs deleted file mode 100644 index 6f552fe4242..00000000000 --- a/crates/ethereum/evm/src/verify.rs +++ /dev/null @@ -1,53 +0,0 @@ -//! Helpers for verifying the receipts. - -use reth_interfaces::executor::{BlockExecutionError, BlockValidationError}; -use reth_primitives::{Bloom, GotExpected, Receipt, ReceiptWithBloom, B256}; - -/// Calculate the receipts root, and compare it against against the expected receipts root and logs -/// bloom. -pub fn verify_receipts<'a>( - expected_receipts_root: B256, - expected_logs_bloom: Bloom, - receipts: impl Iterator + Clone, -) -> Result<(), BlockExecutionError> { - // Calculate receipts root. - let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); - let receipts_root = reth_primitives::proofs::calculate_receipt_root(&receipts_with_bloom); - - // Create header log bloom. - let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); - - compare_receipts_root_and_logs_bloom( - receipts_root, - logs_bloom, - expected_receipts_root, - expected_logs_bloom, - )?; - - Ok(()) -} - -/// Compare the calculated receipts root with the expected receipts root, also compare -/// the calculated logs bloom with the expected logs bloom. -pub fn compare_receipts_root_and_logs_bloom( - calculated_receipts_root: B256, - calculated_logs_bloom: Bloom, - expected_receipts_root: B256, - expected_logs_bloom: Bloom, -) -> Result<(), BlockExecutionError> { - if calculated_receipts_root != expected_receipts_root { - return Err(BlockValidationError::ReceiptRootDiff( - GotExpected { got: calculated_receipts_root, expected: expected_receipts_root }.into(), - ) - .into()) - } - - if calculated_logs_bloom != expected_logs_bloom { - return Err(BlockValidationError::BloomLogDiff( - GotExpected { got: calculated_logs_bloom, expected: expected_logs_bloom }.into(), - ) - .into()) - } - - Ok(()) -} diff --git a/crates/evm/Cargo.toml b/crates/evm/Cargo.toml index 854dcd95a20..183d9f694c5 100644 --- a/crates/evm/Cargo.toml +++ b/crates/evm/Cargo.toml @@ -12,10 +12,12 @@ workspace = true [dependencies] # reth +reth-execution-errors.workspace = true reth-primitives.workspace = true revm-primitives.workspace = true +reth-storage-errors.workspace = true + revm.workspace = true -reth-interfaces.workspace = true futures-util.workspace = true parking_lot = { workspace = true, optional = true } diff --git a/crates/evm/execution-errors/Cargo.toml b/crates/evm/execution-errors/Cargo.toml new file mode 100644 index 00000000000..c04b2b1224f --- /dev/null +++ b/crates/evm/execution-errors/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "reth-execution-errors" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +reth-consensus.workspace = true +reth-primitives.workspace = true +reth-storage-errors.workspace = true + +thiserror.workspace = true diff --git a/crates/interfaces/src/executor.rs b/crates/evm/execution-errors/src/lib.rs similarity index 85% rename from crates/interfaces/src/executor.rs rename to crates/evm/execution-errors/src/lib.rs index 04b9832f092..5013e538759 100644 --- a/crates/interfaces/src/executor.rs +++ b/crates/evm/execution-errors/src/lib.rs @@ -1,10 +1,21 @@ -use crate::{provider::ProviderError, trie::StateRootError}; -use reth_primitives::{ - revm_primitives::EVMError, BlockNumHash, Bloom, GotExpected, GotExpectedBoxed, - PruneSegmentError, B256, -}; +//! Commonly used error types used when doing block execution. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +use reth_consensus::ConsensusError; +use reth_primitives::{revm_primitives::EVMError, BlockNumHash, PruneSegmentError, B256}; +use reth_storage_errors::provider::ProviderError; use thiserror::Error; +pub mod trie; +pub use trie::{StateRootError, StorageRootError}; + /// Transaction validation errors #[derive(Error, Debug, Clone, PartialEq, Eq)] pub enum BlockValidationError { @@ -23,12 +34,6 @@ pub enum BlockValidationError { /// Error when incrementing balance in post execution #[error("incrementing balance in post execution failed")] IncrementBalanceFailed, - /// Error when receipt root doesn't match expected value - #[error("receipt root mismatch: {0}")] - ReceiptRootDiff(GotExpectedBoxed), - /// Error when header bloom filter doesn't match expected value - #[error("header bloom filter mismatch: {0}")] - BloomLogDiff(GotExpectedBoxed), /// Error when the state root does not match the expected value. #[error(transparent)] StateRoot(#[from] StateRootError), @@ -40,14 +45,6 @@ pub enum BlockValidationError { /// The available block gas block_available_gas: u64, }, - /// Error when block gas used doesn't match expected value - #[error("block gas used mismatch: {gas}; gas spent by each transaction: {gas_spent_by_tx:?}")] - BlockGasUsed { - /// The gas diff. - gas: GotExpected, - /// Gas spent by each transaction - gas_spent_by_tx: Vec<(u64, u64)>, - }, /// Error for pre-merge block #[error("block {hash} is pre merge")] BlockPreMerge { @@ -88,6 +85,9 @@ pub enum BlockExecutionError { /// Pruning error, transparently wrapping `PruneSegmentError` #[error(transparent)] Pruning(#[from] PruneSegmentError), + /// Consensus error, transparently wrapping `ConsensusError` + #[error(transparent)] + Consensus(#[from] ConsensusError), /// Transaction error on revert with inner details #[error("transaction error on revert: {inner}")] CanonicalRevert { diff --git a/crates/interfaces/src/trie/mod.rs b/crates/evm/execution-errors/src/trie.rs similarity index 86% rename from crates/interfaces/src/trie/mod.rs rename to crates/evm/execution-errors/src/trie.rs index d2dba7c272a..146f72f4814 100644 --- a/crates/interfaces/src/trie/mod.rs +++ b/crates/evm/execution-errors/src/trie.rs @@ -1,7 +1,9 @@ -use crate::db::DatabaseError; +//! Errors when computing the state root. + +use reth_storage_errors::db::DatabaseError; use thiserror::Error; -/// State root error. +/// State root errors. #[derive(Error, Debug, PartialEq, Eq, Clone)] pub enum StateRootError { /// Internal database error. diff --git a/crates/evm/src/either.rs b/crates/evm/src/either.rs index d1ae4ed78ff..7d8320c315c 100644 --- a/crates/evm/src/either.rs +++ b/crates/evm/src/either.rs @@ -4,8 +4,9 @@ use crate::execute::{ BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput, BlockExecutorProvider, Executor, }; -use reth_interfaces::{executor::BlockExecutionError, provider::ProviderError}; +use reth_execution_errors::BlockExecutionError; use reth_primitives::{BlockNumber, BlockWithSenders, PruneModes, Receipt}; +use reth_storage_errors::provider::ProviderError; use revm_primitives::db::Database; // re-export Either @@ -89,10 +90,10 @@ where type Output = BatchBlockExecutionOutput; type Error = BlockExecutionError; - fn execute_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { + fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { match self { - Either::Left(a) => a.execute_one(input), - Either::Right(b) => b.execute_one(input), + Either::Left(a) => a.execute_and_verify_one(input), + Either::Right(b) => b.execute_and_verify_one(input), } } diff --git a/crates/evm/src/execute.rs b/crates/evm/src/execute.rs index e7ce09e7980..6fdd6ebfd0c 100644 --- a/crates/evm/src/execute.rs +++ b/crates/evm/src/execute.rs @@ -1,12 +1,16 @@ //! Traits for execution. -use reth_interfaces::{executor::BlockExecutionError, provider::ProviderError}; use reth_primitives::{BlockNumber, BlockWithSenders, PruneModes, Receipt, Receipts, U256}; use revm::db::BundleState; use revm_primitives::db::Database; -/// A general purpose executor trait that executes on an input (e.g. blocks) and produces an output +pub use reth_execution_errors::{BlockExecutionError, BlockValidationError}; +pub use reth_storage_errors::provider::ProviderError; + +/// A general purpose executor trait that executes an input (e.g. block) and produces an output /// (e.g. state changes and receipts). +/// +/// This executor does not validate the output, see [BatchExecutor] for that. pub trait Executor { /// The input type for the executor. type Input<'a>; @@ -17,12 +21,17 @@ pub trait Executor { /// Consumes the type and executes the block. /// - /// Returns the output of the block execution. + /// # Note + /// Execution happens without any validation of the output. To validate the output, use the + /// [BatchExecutor]. + /// + /// # Returns + /// The output of the block execution. fn execute(self, input: Self::Input<'_>) -> Result; } -/// A general purpose executor that can execute multiple inputs in sequence and keep track of the -/// state over the entire batch. +/// A general purpose executor that can execute multiple inputs in sequence, validate the outputs, +/// and keep track of the state over the entire batch. pub trait BatchExecutor { /// The input type for the executor. type Input<'a>; @@ -31,27 +40,34 @@ pub trait BatchExecutor { /// The error type returned by the executor. type Error; - /// Executes the next block in the batch and update the state internally. - fn execute_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error>; + /// Executes the next block in the batch, verifies the output and updates the state internally. + fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error>; - /// Executes multiple inputs in the batch and update the state internally. - fn execute_many<'a, I>(&mut self, inputs: I) -> Result<(), Self::Error> + /// Executes multiple inputs in the batch, verifies the output, and updates the state + /// internally. + /// + /// This method is a convenience function for calling [`BatchExecutor::execute_and_verify_one`] + /// for each input. + fn execute_and_verify_many<'a, I>(&mut self, inputs: I) -> Result<(), Self::Error> where I: IntoIterator>, { for input in inputs { - self.execute_one(input)?; + self.execute_and_verify_one(input)?; } Ok(()) } - /// Executes the entire batch and return the final state. - fn execute_batch<'a, I>(mut self, batch: I) -> Result + /// Executes the entire batch, verifies the output, and returns the final state. + /// + /// This method is a convenience function for calling [`BatchExecutor::execute_and_verify_many`] + /// and [`BatchExecutor::finalize`]. + fn execute_and_verify_batch<'a, I>(mut self, batch: I) -> Result where I: IntoIterator>, Self: Sized, { - self.execute_many(batch)?; + self.execute_and_verify_many(batch)?; Ok(self.finalize()) } @@ -222,7 +238,7 @@ mod tests { type Output = BatchBlockExecutionOutput; type Error = BlockExecutionError; - fn execute_one(&mut self, _input: Self::Input<'_>) -> Result<(), Self::Error> { + fn execute_and_verify_one(&mut self, _input: Self::Input<'_>) -> Result<(), Self::Error> { Ok(()) } diff --git a/crates/evm/src/test_utils.rs b/crates/evm/src/test_utils.rs index e0ee4691704..910f9d08b2e 100644 --- a/crates/evm/src/test_utils.rs +++ b/crates/evm/src/test_utils.rs @@ -5,8 +5,9 @@ use crate::execute::{ BlockExecutorProvider, Executor, }; use parking_lot::Mutex; -use reth_interfaces::{executor::BlockExecutionError, provider::ProviderError}; +use reth_execution_errors::BlockExecutionError; use reth_primitives::{BlockNumber, BlockWithSenders, PruneModes, Receipt}; +use reth_storage_errors::provider::ProviderError; use revm_primitives::db::Database; use std::sync::Arc; @@ -64,7 +65,7 @@ impl BatchExecutor for MockExecutorProvider { type Output = BatchBlockExecutionOutput; type Error = BlockExecutionError; - fn execute_one(&mut self, _: Self::Input<'_>) -> Result<(), Self::Error> { + fn execute_and_verify_one(&mut self, _: Self::Input<'_>) -> Result<(), Self::Error> { Ok(()) } diff --git a/crates/exex/src/manager.rs b/crates/exex/src/manager.rs index 1de8c102e3b..088328c8605 100644 --- a/crates/exex/src/manager.rs +++ b/crates/exex/src/manager.rs @@ -160,7 +160,7 @@ pub struct ExExManagerMetrics { /// The manager is responsible for: /// /// - Receiving relevant events from the rest of the node, and sending these to the execution -/// extensions +/// extensions /// - Backpressure /// - Error handling /// - Monitoring diff --git a/crates/interfaces/Cargo.toml b/crates/interfaces/Cargo.toml index 27e2d8f390e..1d7483691bf 100644 --- a/crates/interfaces/Cargo.toml +++ b/crates/interfaces/Cargo.toml @@ -11,39 +11,17 @@ repository.workspace = true workspace = true [dependencies] -reth-primitives.workspace = true +reth-consensus.workspace = true +reth-execution-errors.workspace = true reth-fs-util.workspace = true reth-network-api.workspace = true -reth-eth-wire-types.workspace = true -reth-consensus.workspace = true -reth-network-types.workspace = true - -# async -futures.workspace = true -tokio = { workspace = true, features = ["sync"] } +reth-network-p2p.workspace = true +reth-primitives.workspace = true +reth-storage-errors.workspace = true # misc -auto_impl.workspace = true thiserror.workspace = true -tracing.workspace = true -secp256k1 = { workspace = true, default-features = false, features = [ - "alloc", - "recovery", - "rand", -], optional = true } -clap = { workspace = true, features = ["derive"], optional = true } -parking_lot = { workspace = true, optional = true } -rand = { workspace = true, optional = true } - -[dev-dependencies] -reth-consensus = { workspace = true, features = ["test-utils"] } - -parking_lot.workspace = true -rand.workspace = true -tokio = { workspace = true, features = ["full"] } -secp256k1 = { workspace = true, features = ["alloc", "recovery", "rand"] } [features] -test-utils = ["reth-consensus/test-utils", "secp256k1", "rand", "parking_lot"] -cli = ["clap"] -optimism = ["reth-eth-wire-types/optimism"] +test-utils = ["reth-consensus/test-utils", "reth-network-p2p/test-utils"] +clap = ["reth-storage-errors/clap"] \ No newline at end of file diff --git a/crates/interfaces/src/blockchain_tree/error.rs b/crates/interfaces/src/blockchain_tree/error.rs index a98d765014b..122b857437e 100644 --- a/crates/interfaces/src/blockchain_tree/error.rs +++ b/crates/interfaces/src/blockchain_tree/error.rs @@ -1,12 +1,10 @@ //! Error handling for the blockchain tree -use crate::{ - executor::{BlockExecutionError, BlockValidationError}, - provider::ProviderError, - RethError, -}; +use crate::RethError; use reth_consensus::ConsensusError; +use reth_execution_errors::{BlockExecutionError, BlockValidationError}; use reth_primitives::{BlockHash, BlockNumber, SealedBlock}; +use reth_storage_errors::provider::ProviderError; /// Various error cases that can occur when a block violates tree assumptions. #[derive(Debug, Clone, Copy, thiserror::Error, Eq, PartialEq)] @@ -297,7 +295,7 @@ impl InsertBlockErrorKind { // other execution errors that are considered internal errors InsertBlockErrorKind::Execution(err) => { match err { - BlockExecutionError::Validation(_) => { + BlockExecutionError::Validation(_) | BlockExecutionError::Consensus(_) => { // this is caused by an invalid block true } diff --git a/crates/interfaces/src/blockchain_tree/mod.rs b/crates/interfaces/src/blockchain_tree/mod.rs index 7d2b50e418e..0c1a9553dc3 100644 --- a/crates/interfaces/src/blockchain_tree/mod.rs +++ b/crates/interfaces/src/blockchain_tree/mod.rs @@ -210,6 +210,7 @@ pub enum BlockStatus { /// This is required to: /// - differentiate whether trie state updates should be cached. /// - inform other +/// /// This is required because the state root check can only be performed if the targeted block can be /// traced back to the canonical __head__. #[derive(Debug, Clone, Copy, PartialEq, Eq)] diff --git a/crates/interfaces/src/error.rs b/crates/interfaces/src/error.rs index ec3da8ad01b..f38742ab51f 100644 --- a/crates/interfaces/src/error.rs +++ b/crates/interfaces/src/error.rs @@ -1,12 +1,9 @@ -use crate::{ - blockchain_tree::error::{BlockchainTreeError, CanonicalError}, - db::DatabaseError, - executor::BlockExecutionError, - provider::ProviderError, -}; +use crate::blockchain_tree::error::{BlockchainTreeError, CanonicalError}; use reth_consensus::ConsensusError; +use reth_execution_errors::BlockExecutionError; use reth_fs_util::FsPathError; use reth_network_api::NetworkError; +use reth_storage_errors::{db::DatabaseError, provider::ProviderError}; /// Result alias for [`RethError`]. pub type RethResult = Result; diff --git a/crates/interfaces/src/lib.rs b/crates/interfaces/src/lib.rs index e60d4a62164..461413a1e2f 100644 --- a/crates/interfaces/src/lib.rs +++ b/crates/interfaces/src/lib.rs @@ -12,31 +12,28 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -/// Database error -pub mod db; +/// Storage error types +pub use reth_storage_errors::{db, provider}; /// Block Execution traits. -pub mod executor; +pub use reth_execution_errors as executor; /// Possible errors when interacting with the chain. mod error; pub use error::{RethError, RethResult}; /// P2P traits. -pub mod p2p; +pub use reth_network_p2p as p2p; /// Trie error -pub mod trie; - -/// Provider error -pub mod provider; +pub use reth_execution_errors::trie; /// Syncing related traits. -pub mod sync; +pub use reth_network_p2p::sync; /// BlockchainTree related traits. pub mod blockchain_tree; -#[cfg(any(test, feature = "test-utils"))] /// Common test helpers for mocking out Consensus, Downloaders and Header Clients. -pub mod test_utils; +#[cfg(feature = "test-utils")] +pub use reth_network_p2p::test_utils; diff --git a/crates/net/common/Cargo.toml b/crates/net/common/Cargo.toml index 0c3b253a50a..3d73f480f57 100644 --- a/crates/net/common/Cargo.toml +++ b/crates/net/common/Cargo.toml @@ -12,8 +12,8 @@ description = "Types shared across network code" workspace = true [dependencies] -# reth -reth-network-types.workspace = true +# ethereum +alloy-primitives.workspace = true # async pin-project.workspace = true diff --git a/crates/net/common/src/ban_list.rs b/crates/net/common/src/ban_list.rs index 11d4c6049b4..1cde15ef2b9 100644 --- a/crates/net/common/src/ban_list.rs +++ b/crates/net/common/src/ban_list.rs @@ -1,6 +1,7 @@ //! Support for banning peers. -use reth_network_types::PeerId; +type PeerId = alloy_primitives::B512; + use std::{collections::HashMap, net::IpAddr, time::Instant}; /// Determines whether or not the IP is globally routable. diff --git a/crates/net/discv4/src/lib.rs b/crates/net/discv4/src/lib.rs index 77cc309ebf9..2019f58ee16 100644 --- a/crates/net/discv4/src/lib.rs +++ b/crates/net/discv4/src/lib.rs @@ -2266,7 +2266,7 @@ mod tests { assert!(service.pending_pings.contains_key(&node.id)); assert_eq!(service.pending_pings.len(), num_inserted); if num_inserted == MAX_NODES_PING { - break; + break } } } diff --git a/crates/net/downloaders/src/bodies/bodies.rs b/crates/net/downloaders/src/bodies/bodies.rs index 8f97e09c7dd..a806f2fa62e 100644 --- a/crates/net/downloaders/src/bodies/bodies.rs +++ b/crates/net/downloaders/src/bodies/bodies.rs @@ -95,7 +95,7 @@ where max_non_empty: u64, ) -> DownloadResult>> { if range.is_empty() || max_non_empty == 0 { - return Ok(None); + return Ok(None) } // Collect headers while @@ -144,7 +144,7 @@ where // if we're only connected to a few peers, we keep it low if num_peers < *self.concurrent_requests_range.start() { - return max_requests; + return max_requests } max_requests.min(*self.concurrent_requests_range.end()) @@ -238,7 +238,7 @@ where .skip_while(|b| b.block_number() < expected) .take_while(|b| self.download_range.contains(&b.block_number())) .collect() - }); + }) } // Drop buffered response since we passed that range @@ -257,7 +257,7 @@ where self.queued_bodies.shrink_to_fit(); self.metrics.total_flushed.increment(next_batch.len() as u64); self.metrics.queued_blocks.set(self.queued_bodies.len() as f64); - return Some(next_batch); + return Some(next_batch) } None } @@ -354,13 +354,13 @@ where fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { let this = self.get_mut(); if this.is_terminated() { - return Poll::Ready(None); + return Poll::Ready(None) } // Submit new requests and poll any in progress loop { // Yield next batch if ready if let Some(next_batch) = this.try_split_next_batch() { - return Poll::Ready(Some(Ok(next_batch))); + return Poll::Ready(Some(Ok(next_batch))) } // Poll requests @@ -373,7 +373,7 @@ where Err(error) => { tracing::debug!(target: "downloaders::bodies", %error, "Request failed"); this.clear(); - return Poll::Ready(Some(Err(error))); + return Poll::Ready(Some(Err(error))) } }; } @@ -396,7 +396,7 @@ where Err(error) => { tracing::error!(target: "downloaders::bodies", %error, "Failed to download from next request"); this.clear(); - return Poll::Ready(Some(Err(error))); + return Poll::Ready(Some(Err(error))) } }; } @@ -409,21 +409,21 @@ where this.buffered_responses.shrink_to_fit(); if !new_request_submitted { - break; + break } } // All requests are handled, stream is finished if this.in_progress_queue.is_empty() { if this.queued_bodies.is_empty() { - return Poll::Ready(None); + return Poll::Ready(None) } let batch_size = this.stream_batch_size.min(this.queued_bodies.len()); let next_batch = this.queued_bodies.drain(..batch_size).collect::>(); this.queued_bodies.shrink_to_fit(); this.metrics.total_flushed.increment(next_batch.len() as u64); this.metrics.queued_blocks.set(this.queued_bodies.len() as f64); - return Poll::Ready(Some(Ok(next_batch))); + return Poll::Ready(Some(Ok(next_batch))) } Poll::Pending diff --git a/crates/net/downloaders/src/bodies/request.rs b/crates/net/downloaders/src/bodies/request.rs index dfe877a0b91..593c738e0bb 100644 --- a/crates/net/downloaders/src/bodies/request.rs +++ b/crates/net/downloaders/src/bodies/request.rs @@ -180,7 +180,7 @@ where let block = SealedBlock::new(next_header, next_body); - if let Err(error) = self.consensus.validate_block(&block) { + if let Err(error) = self.consensus.validate_block_pre_execution(&block) { // Body is invalid, put the header back and return an error let hash = block.hash(); let number = block.number; diff --git a/crates/net/ecies/src/algorithm.rs b/crates/net/ecies/src/algorithm.rs index 52398de4fe4..65d74627e27 100644 --- a/crates/net/ecies/src/algorithm.rs +++ b/crates/net/ecies/src/algorithm.rs @@ -45,8 +45,8 @@ fn ecdh_x(public_key: &PublicKey, secret_key: &SecretKey) -> B256 { /// # Panics /// * If the `dest` is empty /// * If the `dest` len is greater than or equal to the hash output len * the max counter value. In -/// this case, the hash output len is 32 bytes, and the max counter value is 2^32 - 1. So the dest -/// cannot have a len greater than 32 * 2^32 - 1. +/// this case, the hash output len is 32 bytes, and the max counter value is 2^32 - 1. So the dest +/// cannot have a len greater than 32 * 2^32 - 1. fn kdf(secret: B256, s1: &[u8], dest: &mut [u8]) { concat_kdf::derive_key_into::(secret.as_slice(), s1, dest).unwrap(); } diff --git a/crates/net/eth-wire-types/Cargo.toml b/crates/net/eth-wire-types/Cargo.toml index d68fbbd1f0c..9954dba10fc 100644 --- a/crates/net/eth-wire-types/Cargo.toml +++ b/crates/net/eth-wire-types/Cargo.toml @@ -13,7 +13,7 @@ workspace = true [dependencies] # reth -reth-codecs.workspace = true +reth-codecs-derive.workspace = true reth-primitives.workspace = true alloy-rlp = { workspace = true, features = ["derive"] } @@ -55,4 +55,3 @@ arbitrary = [ "dep:proptest", "dep:proptest-derive", ] -optimism = ["reth-primitives/optimism"] diff --git a/crates/net/eth-wire-types/src/blocks.rs b/crates/net/eth-wire-types/src/blocks.rs index 36b8e6e8ca9..d8c13062d26 100644 --- a/crates/net/eth-wire-types/src/blocks.rs +++ b/crates/net/eth-wire-types/src/blocks.rs @@ -2,7 +2,7 @@ //! types. use alloy_rlp::{RlpDecodable, RlpDecodableWrapper, RlpEncodable, RlpEncodableWrapper}; -use reth_codecs::{add_arbitrary_tests, derive_arbitrary}; +use reth_codecs_derive::{add_arbitrary_tests, derive_arbitrary}; use reth_primitives::{BlockBody, BlockHashOrNumber, Header, HeadersDirection, B256}; #[cfg(any(test, feature = "arbitrary"))] diff --git a/crates/net/eth-wire-types/src/broadcast.rs b/crates/net/eth-wire-types/src/broadcast.rs index 69362523922..b648f5a22d6 100644 --- a/crates/net/eth-wire-types/src/broadcast.rs +++ b/crates/net/eth-wire-types/src/broadcast.rs @@ -6,7 +6,7 @@ use alloy_rlp::{ }; use derive_more::{Constructor, Deref, DerefMut, From, IntoIterator}; -use reth_codecs::derive_arbitrary; +use reth_codecs_derive::derive_arbitrary; use reth_primitives::{ Block, Bytes, PooledTransactionsElement, TransactionSigned, TxHash, B256, U128, }; diff --git a/crates/net/eth-wire-types/src/lib.rs b/crates/net/eth-wire-types/src/lib.rs index 18c1bd59fdd..a60fa4c8c1e 100644 --- a/crates/net/eth-wire-types/src/lib.rs +++ b/crates/net/eth-wire-types/src/lib.rs @@ -8,6 +8,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] // TODO: remove when https://github.com/proptest-rs/proptest/pull/427 is merged #![allow(unknown_lints, non_local_definitions)] +#![allow(clippy::needless_lifetimes)] // side effect of optimism fields #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod status; diff --git a/crates/net/eth-wire-types/src/message.rs b/crates/net/eth-wire-types/src/message.rs index dc8011879ba..c4101e852d2 100644 --- a/crates/net/eth-wire-types/src/message.rs +++ b/crates/net/eth-wire-types/src/message.rs @@ -169,7 +169,7 @@ impl From for ProtocolBroadcastMessage { /// The ethereum wire protocol is a set of messages that are broadcast to the network in two /// styles: /// * A request message sent by a peer (such as [`GetPooledTransactions`]), and an associated -/// response message (such as [`PooledTransactions`]). +/// response message (such as [`PooledTransactions`]). /// * A message that is broadcast to the network, without a corresponding request. /// /// The newer `eth/66` is an efficiency upgrade on top of `eth/65`, introducing a request id to diff --git a/crates/net/eth-wire-types/src/receipts.rs b/crates/net/eth-wire-types/src/receipts.rs index 87a0e10deac..3d653b594be 100644 --- a/crates/net/eth-wire-types/src/receipts.rs +++ b/crates/net/eth-wire-types/src/receipts.rs @@ -1,7 +1,7 @@ //! Implements the `GetReceipts` and `Receipts` message types. use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; -use reth_codecs::derive_arbitrary; +use reth_codecs_derive::derive_arbitrary; use reth_primitives::{ReceiptWithBloom, B256}; #[cfg(feature = "serde")] @@ -41,16 +41,7 @@ mod tests { #[test] fn roundtrip_eip1559() { let receipts = Receipts(vec![vec![ReceiptWithBloom { - receipt: Receipt { - tx_type: TxType::Eip1559, - success: false, - cumulative_gas_used: 0, - logs: vec![], - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, - }, + receipt: Receipt { tx_type: TxType::Eip1559, ..Default::default() }, bloom: Default::default(), }]]); @@ -119,10 +110,7 @@ mod tests { ), ], success: false, - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, + ..Default::default() }, bloom: hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into(), }, @@ -158,10 +146,7 @@ mod tests { ), ], success: false, - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, + ..Default::default() }, bloom: hex!("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000").into(), }, diff --git a/crates/net/eth-wire-types/src/state.rs b/crates/net/eth-wire-types/src/state.rs index 334184b3b72..5f3dc833950 100644 --- a/crates/net/eth-wire-types/src/state.rs +++ b/crates/net/eth-wire-types/src/state.rs @@ -1,7 +1,7 @@ //! Implements the `GetNodeData` and `NodeData` message types. use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; -use reth_codecs::derive_arbitrary; +use reth_codecs_derive::derive_arbitrary; use reth_primitives::{Bytes, B256}; #[cfg(feature = "serde")] diff --git a/crates/net/eth-wire-types/src/status.rs b/crates/net/eth-wire-types/src/status.rs index 6dd3f7eb42a..fc6f7fd2c7d 100644 --- a/crates/net/eth-wire-types/src/status.rs +++ b/crates/net/eth-wire-types/src/status.rs @@ -1,6 +1,6 @@ use crate::EthVersion; use alloy_rlp::{RlpDecodable, RlpEncodable}; -use reth_codecs::derive_arbitrary; +use reth_codecs_derive::derive_arbitrary; use reth_primitives::{ hex, Chain, ChainSpec, ForkId, Genesis, Hardfork, Head, NamedChain, B256, MAINNET, U256, }; diff --git a/crates/net/eth-wire-types/src/transactions.rs b/crates/net/eth-wire-types/src/transactions.rs index f19bbdcc744..2a7313ad1f3 100644 --- a/crates/net/eth-wire-types/src/transactions.rs +++ b/crates/net/eth-wire-types/src/transactions.rs @@ -2,7 +2,7 @@ use alloy_rlp::{RlpDecodableWrapper, RlpEncodableWrapper}; use derive_more::{Constructor, Deref, IntoIterator}; -use reth_codecs::derive_arbitrary; +use reth_codecs_derive::derive_arbitrary; use reth_primitives::{ transaction::TransactionConversionError, PooledTransactionsElement, TransactionSigned, B256, }; diff --git a/crates/net/eth-wire/src/multiplex.rs b/crates/net/eth-wire/src/multiplex.rs index 04b7cda37e5..82172f8d5c7 100644 --- a/crates/net/eth-wire/src/multiplex.rs +++ b/crates/net/eth-wire/src/multiplex.rs @@ -479,9 +479,9 @@ where if let Err(disconnect_err) = this.inner.conn.start_disconnect(DisconnectReason::DisconnectRequested) { - return Poll::Ready(Some(Err(disconnect_err.into()))); + return Poll::Ready(Some(Err(disconnect_err.into()))) } - return Poll::Ready(Some(Err(err.into()))); + return Poll::Ready(Some(Err(err.into()))) } Poll::Pending => { conn_ready = false; diff --git a/crates/net/network-api/Cargo.toml b/crates/net/network-api/Cargo.toml index 81536aad985..3e8ed584fc3 100644 --- a/crates/net/network-api/Cargo.toml +++ b/crates/net/network-api/Cargo.toml @@ -6,19 +6,20 @@ rust-version.workspace = true license.workspace = true homepage.workspace = true repository.workspace = true -description = "Network interfaces" +description = "Network interfaces and commonly used types" [lints] workspace = true [dependencies] # reth -reth-primitives.workspace = true reth-eth-wire.workspace = true reth-rpc-types.workspace = true -reth-discv4.workspace = true reth-network-types.workspace = true +# ethereum +alloy-primitives.workspace = true + # eth enr = { workspace = true, default-features = false, features = ["rust-secp256k1"] } diff --git a/crates/net/network-api/src/lib.rs b/crates/net/network-api/src/lib.rs index 6c3040bd9b8..10ffddb6a30 100644 --- a/crates/net/network-api/src/lib.rs +++ b/crates/net/network-api/src/lib.rs @@ -1,4 +1,4 @@ -//! Reth network interface definitions. +//! Reth interface definitions and commonly used types for the reth-network crate. //! //! Provides abstractions for the reth-network crate. //! @@ -13,15 +13,16 @@ )] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -use reth_eth_wire::{DisconnectReason, EthVersion, Status}; -use reth_network_types::PeerId; -use reth_primitives::NodeRecord; +use reth_eth_wire::{capability::Capabilities, DisconnectReason, EthVersion, Status}; +use reth_rpc_types::NetworkStatus; use std::{future::Future, net::SocketAddr, sync::Arc, time::Instant}; pub use error::NetworkError; pub use reputation::{Reputation, ReputationChangeKind}; -use reth_eth_wire::capability::Capabilities; -use reth_rpc_types::NetworkStatus; +use reth_network_types::NodeRecord; + +/// The PeerId type. +pub type PeerId = alloy_primitives::B512; /// Network Error pub mod error; diff --git a/crates/net/network-api/src/noop.rs b/crates/net/network-api/src/noop.rs index 2ace603e348..b022ced4bc3 100644 --- a/crates/net/network-api/src/noop.rs +++ b/crates/net/network-api/src/noop.rs @@ -4,14 +4,12 @@ //! generic over it. use crate::{ - NetworkError, NetworkInfo, PeerInfo, PeerKind, Peers, PeersInfo, Reputation, + NetworkError, NetworkInfo, PeerId, PeerInfo, PeerKind, Peers, PeersInfo, Reputation, ReputationChangeKind, }; use enr::{secp256k1::SecretKey, Enr}; -use reth_discv4::DEFAULT_DISCOVERY_PORT; use reth_eth_wire::{DisconnectReason, ProtocolVersion}; -use reth_network_types::PeerId; -use reth_primitives::{Chain, NodeRecord}; +use reth_network_types::NodeRecord; use reth_rpc_types::{admin::EthProtocolInfo, NetworkStatus}; use std::net::{IpAddr, SocketAddr}; @@ -24,7 +22,7 @@ pub struct NoopNetwork; impl NetworkInfo for NoopNetwork { fn local_addr(&self) -> SocketAddr { - (IpAddr::from(std::net::Ipv4Addr::UNSPECIFIED), DEFAULT_DISCOVERY_PORT).into() + (IpAddr::from(std::net::Ipv4Addr::UNSPECIFIED), 30303).into() } async fn network_status(&self) -> Result { @@ -42,7 +40,8 @@ impl NetworkInfo for NoopNetwork { } fn chain_id(&self) -> u64 { - Chain::mainnet().into() + // mainnet + 1 } fn is_syncing(&self) -> bool { diff --git a/crates/net/network/src/config.rs b/crates/net/network/src/config.rs index 4bd1dab8835..368f958b2a3 100644 --- a/crates/net/network/src/config.rs +++ b/crates/net/network/src/config.rs @@ -8,7 +8,7 @@ use crate::{ transactions::TransactionsManagerConfig, NetworkHandle, NetworkManager, }; -use reth_discv4::{Discv4Config, Discv4ConfigBuilder, DEFAULT_DISCOVERY_ADDRESS}; +use reth_discv4::{Discv4Config, Discv4ConfigBuilder, NatResolver, DEFAULT_DISCOVERY_ADDRESS}; use reth_discv5::NetworkStackId; use reth_dns_discovery::DnsDiscoveryConfig; use reth_eth_wire::{HelloMessage, HelloMessageWithProtocols, Status}; @@ -314,6 +314,19 @@ impl NetworkConfigBuilder { self } + /// Sets the external ip resolver to use for discovery v4. + /// + /// If no [Discv4ConfigBuilder] is set via [Self::discovery], this will create a new one. + /// + /// This is a convenience function for setting the external ip resolver on the default + /// [Discv4Config] config. + pub fn external_ip_resolver(mut self, resolver: NatResolver) -> Self { + self.discovery_v4_builder + .get_or_insert_with(Discv4Config::builder) + .external_ip_resolver(Some(resolver)); + self + } + /// Sets the discv4 config to use. pub fn discovery(mut self, builder: Discv4ConfigBuilder) -> Self { self.discovery_v4_builder = Some(builder); diff --git a/crates/net/network/src/manager.rs b/crates/net/network/src/manager.rs index d516625c640..b6b1d4d1ecb 100644 --- a/crates/net/network/src/manager.rs +++ b/crates/net/network/src/manager.rs @@ -49,7 +49,7 @@ use reth_primitives::{ForkId, NodeRecord}; use reth_provider::{BlockNumReader, BlockReader}; use reth_rpc_types::{admin::EthProtocolInfo, NetworkStatus}; use reth_tasks::shutdown::GracefulShutdown; -use reth_tokio_util::EventListeners; +use reth_tokio_util::EventSender; use secp256k1::SecretKey; use std::{ net::SocketAddr, @@ -84,8 +84,8 @@ pub struct NetworkManager { from_handle_rx: UnboundedReceiverStream, /// Handles block imports according to the `eth` protocol. block_import: Box, - /// All listeners for high level network events. - event_listeners: EventListeners, + /// Sender for high level network events. + event_sender: EventSender, /// Sender half to send events to the /// [`TransactionsManager`](crate::transactions::TransactionsManager) task, if configured. to_transactions_manager: Option>, @@ -246,6 +246,8 @@ where let (to_manager_tx, from_handle_rx) = mpsc::unbounded_channel(); + let event_sender: EventSender = Default::default(); + let handle = NetworkHandle::new( Arc::clone(&num_active_peers), listener_address, @@ -258,6 +260,7 @@ where Arc::new(AtomicU64::new(chain_spec.chain.id())), tx_gossip_disabled, discv4, + event_sender.clone(), ); Ok(Self { @@ -265,7 +268,7 @@ where handle, from_handle_rx: UnboundedReceiverStream::new(from_handle_rx), block_import, - event_listeners: Default::default(), + event_sender, to_transactions_manager: None, to_eth_request_handler: None, num_active_peers, @@ -528,9 +531,6 @@ where /// Handler for received messages from a handle fn on_handle_message(&mut self, msg: NetworkHandleMessage) { match msg { - NetworkHandleMessage::EventListener(tx) => { - self.event_listeners.push_listener(tx); - } NetworkHandleMessage::DiscoveryListener(tx) => { self.swarm.state_mut().discovery_mut().add_listener(tx); } @@ -690,7 +690,7 @@ where self.update_active_connection_metrics(); - self.event_listeners.notify(NetworkEvent::SessionEstablished { + self.event_sender.notify(NetworkEvent::SessionEstablished { peer_id, remote_addr, client_version, @@ -702,12 +702,12 @@ where } SwarmEvent::PeerAdded(peer_id) => { trace!(target: "net", ?peer_id, "Peer added"); - self.event_listeners.notify(NetworkEvent::PeerAdded(peer_id)); + self.event_sender.notify(NetworkEvent::PeerAdded(peer_id)); self.metrics.tracked_peers.set(self.swarm.state().peers().num_known_peers() as f64); } SwarmEvent::PeerRemoved(peer_id) => { trace!(target: "net", ?peer_id, "Peer dropped"); - self.event_listeners.notify(NetworkEvent::PeerRemoved(peer_id)); + self.event_sender.notify(NetworkEvent::PeerRemoved(peer_id)); self.metrics.tracked_peers.set(self.swarm.state().peers().num_known_peers() as f64); } SwarmEvent::SessionClosed { peer_id, remote_addr, error } => { @@ -750,7 +750,7 @@ where .saturating_sub(1) as f64, ); - self.event_listeners.notify(NetworkEvent::SessionClosed { peer_id, reason }); + self.event_sender.notify(NetworkEvent::SessionClosed { peer_id, reason }); } SwarmEvent::IncomingPendingSessionClosed { remote_addr, error } => { trace!( diff --git a/crates/net/network/src/network.rs b/crates/net/network/src/network.rs index 86669bf19f4..8d9b277f419 100644 --- a/crates/net/network/src/network.rs +++ b/crates/net/network/src/network.rs @@ -16,6 +16,7 @@ use reth_network_api::{ use reth_network_types::PeerId; use reth_primitives::{Head, NodeRecord, TransactionSigned, B256}; use reth_rpc_types::NetworkStatus; +use reth_tokio_util::{EventSender, EventStream}; use secp256k1::SecretKey; use std::{ net::SocketAddr, @@ -24,7 +25,10 @@ use std::{ Arc, }, }; -use tokio::sync::{mpsc, mpsc::UnboundedSender, oneshot}; +use tokio::sync::{ + mpsc::{self, UnboundedSender}, + oneshot, +}; use tokio_stream::wrappers::UnboundedReceiverStream; /// A _shareable_ network frontend. Used to interact with the network. @@ -53,6 +57,7 @@ impl NetworkHandle { chain_id: Arc, tx_gossip_disabled: bool, discv4: Option, + event_sender: EventSender, ) -> Self { let inner = NetworkInner { num_active_peers, @@ -68,6 +73,7 @@ impl NetworkHandle { chain_id, tx_gossip_disabled, discv4, + event_sender, }; Self { inner: Arc::new(inner) } } @@ -196,10 +202,8 @@ impl NetworkHandle { // === API Implementations === impl NetworkEvents for NetworkHandle { - fn event_listener(&self) -> UnboundedReceiverStream { - let (tx, rx) = mpsc::unbounded_channel(); - let _ = self.manager().send(NetworkHandleMessage::EventListener(tx)); - UnboundedReceiverStream::new(rx) + fn event_listener(&self) -> EventStream { + self.inner.event_sender.new_listener() } fn discovery_listener(&self) -> UnboundedReceiverStream { @@ -401,12 +405,14 @@ struct NetworkInner { tx_gossip_disabled: bool, /// The instance of the discv4 service discv4: Option, + /// Sender for high level network events. + event_sender: EventSender, } /// Provides event subscription for the network. pub trait NetworkEvents: Send + Sync { /// Creates a new [`NetworkEvent`] listener channel. - fn event_listener(&self) -> UnboundedReceiverStream; + fn event_listener(&self) -> EventStream; /// Returns a new [`DiscoveryEvent`] stream. /// /// This stream yields [`DiscoveryEvent`]s for each peer that is discovered. @@ -430,8 +436,6 @@ pub(crate) enum NetworkHandleMessage { RemovePeer(PeerId, PeerKind), /// Disconnects a connection to a peer if it exists, optionally providing a disconnect reason. DisconnectPeer(PeerId, Option), - /// Adds a new listener for `NetworkEvent`. - EventListener(UnboundedSender), /// Broadcasts an event to announce a new block to all nodes. AnnounceBlock(NewBlock, B256), /// Sends a list of transactions to the given peer. diff --git a/crates/net/network/src/test_utils/testnet.rs b/crates/net/network/src/test_utils/testnet.rs index a92934c0cbc..99c98db55d5 100644 --- a/crates/net/network/src/test_utils/testnet.rs +++ b/crates/net/network/src/test_utils/testnet.rs @@ -20,6 +20,7 @@ use reth_provider::{ test_utils::NoopProvider, BlockReader, BlockReaderIdExt, HeaderProvider, StateProviderFactory, }; use reth_tasks::TokioTaskExecutor; +use reth_tokio_util::EventStream; use reth_transaction_pool::{ blobstore::InMemoryBlobStore, test_utils::{TestPool, TestPoolBuilder}, @@ -40,7 +41,6 @@ use tokio::{ }, task::JoinHandle, }; -use tokio_stream::wrappers::UnboundedReceiverStream; /// A test network consisting of multiple peers. pub struct Testnet { @@ -503,7 +503,7 @@ impl PeerHandle { } /// Creates a new [`NetworkEvent`] listener channel. - pub fn event_listener(&self) -> UnboundedReceiverStream { + pub fn event_listener(&self) -> EventStream { self.network.event_listener() } @@ -591,14 +591,14 @@ impl Default for PeerConfig { /// This makes it easier to await established connections #[derive(Debug)] pub struct NetworkEventStream { - inner: UnboundedReceiverStream, + inner: EventStream, } // === impl NetworkEventStream === impl NetworkEventStream { /// Create a new [`NetworkEventStream`] from the given network event receiver stream. - pub fn new(inner: UnboundedReceiverStream) -> Self { + pub fn new(inner: EventStream) -> Self { Self { inner } } diff --git a/crates/net/network/src/transactions/mod.rs b/crates/net/network/src/transactions/mod.rs index 070b9c7a147..b6b2328e4f8 100644 --- a/crates/net/network/src/transactions/mod.rs +++ b/crates/net/network/src/transactions/mod.rs @@ -30,6 +30,7 @@ use reth_network_types::PeerId; use reth_primitives::{ FromRecoveredPooledTransaction, PooledTransactionsElement, TransactionSigned, TxHash, B256, }; +use reth_tokio_util::EventStream; use reth_transaction_pool::{ error::{PoolError, PoolResult}, GetPooledTransactionLimit, PoolTransaction, PropagateKind, PropagatedTransactions, @@ -197,7 +198,7 @@ pub struct TransactionsManager { /// Subscriptions to all network related events. /// /// From which we get all new incoming transaction related messages. - network_events: UnboundedReceiverStream, + network_events: EventStream, /// Transaction fetcher to handle inflight and missing transaction requests. transaction_fetcher: TransactionFetcher, /// All currently pending transactions grouped by peers. @@ -880,8 +881,8 @@ where } /// Handles a received event related to common network events. - fn on_network_event(&mut self, event: NetworkEvent) { - match event { + fn on_network_event(&mut self, event_result: NetworkEvent) { + match event_result { NetworkEvent::SessionClosed { peer_id, .. } => { // remove the peer self.peers.remove(&peer_id); @@ -1626,6 +1627,7 @@ mod tests { use secp256k1::SecretKey; use std::{fmt, future::poll_fn, hash}; use tests::fetcher::TxFetchMetadata; + use tracing::error; async fn new_tx_manager() -> TransactionsManager { let secret_key = SecretKey::new(&mut rand::thread_rng()); @@ -1734,7 +1736,7 @@ mod tests { } NetworkEvent::PeerAdded(_peer_id) => continue, ev => { - panic!("unexpected event {ev:?}") + error!("unexpected event {ev:?}") } } } @@ -1820,7 +1822,7 @@ mod tests { } NetworkEvent::PeerAdded(_peer_id) => continue, ev => { - panic!("unexpected event {ev:?}") + error!("unexpected event {ev:?}") } } } @@ -1904,7 +1906,7 @@ mod tests { } NetworkEvent::PeerAdded(_peer_id) => continue, ev => { - panic!("unexpected event {ev:?}") + error!("unexpected event {ev:?}") } } } @@ -1992,7 +1994,7 @@ mod tests { }), NetworkEvent::PeerAdded(_peer_id) => continue, ev => { - panic!("unexpected event {ev:?}") + error!("unexpected event {ev:?}") } } } diff --git a/crates/net/p2p/Cargo.toml b/crates/net/p2p/Cargo.toml new file mode 100644 index 00000000000..34705d78e52 --- /dev/null +++ b/crates/net/p2p/Cargo.toml @@ -0,0 +1,48 @@ +[package] +name = "reth-network-p2p" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "traits and commonly used types for p2p and network communication" + +[lints] +workspace = true + +[dependencies] +reth-primitives.workspace = true +reth-network-api.workspace = true +reth-eth-wire-types.workspace = true +reth-consensus.workspace = true +reth-network-types.workspace = true +reth-storage-errors.workspace = true + +# async +futures.workspace = true +tokio = { workspace = true, features = ["sync"] } + +# misc +auto_impl.workspace = true +thiserror.workspace = true +tracing.workspace = true + +secp256k1 = { workspace = true, default-features = false, features = [ + "alloc", + "recovery", + "rand", +], optional = true } +parking_lot = { workspace = true, optional = true } +rand = { workspace = true, optional = true } + +[dev-dependencies] +reth-consensus = { workspace = true, features = ["test-utils"] } + +parking_lot.workspace = true +rand.workspace = true +tokio = { workspace = true, features = ["full"] } +secp256k1 = { workspace = true, features = ["alloc", "recovery", "rand"] } + +[features] +test-utils = ["reth-consensus/test-utils", "secp256k1", "rand", "parking_lot"] diff --git a/crates/interfaces/src/p2p/bodies/client.rs b/crates/net/p2p/src/bodies/client.rs similarity index 95% rename from crates/interfaces/src/p2p/bodies/client.rs rename to crates/net/p2p/src/bodies/client.rs index 4b7f3366a24..3a36da50016 100644 --- a/crates/interfaces/src/p2p/bodies/client.rs +++ b/crates/net/p2p/src/bodies/client.rs @@ -3,7 +3,7 @@ use std::{ task::{ready, Context, Poll}, }; -use crate::p2p::{download::DownloadClient, error::PeerRequestResult, priority::Priority}; +use crate::{download::DownloadClient, error::PeerRequestResult, priority::Priority}; use futures::{Future, FutureExt}; use reth_primitives::{BlockBody, B256}; diff --git a/crates/interfaces/src/p2p/bodies/downloader.rs b/crates/net/p2p/src/bodies/downloader.rs similarity index 82% rename from crates/interfaces/src/p2p/bodies/downloader.rs rename to crates/net/p2p/src/bodies/downloader.rs index 86a7698ae84..f7f5e9c92eb 100644 --- a/crates/interfaces/src/p2p/bodies/downloader.rs +++ b/crates/net/p2p/src/bodies/downloader.rs @@ -1,5 +1,5 @@ use super::response::BlockResponse; -use crate::p2p::error::DownloadResult; +use crate::error::DownloadResult; use futures::Stream; use reth_primitives::BlockNumber; use std::ops::RangeInclusive; @@ -10,7 +10,7 @@ pub type BodyDownloaderResult = DownloadResult>; /// A downloader capable of fetching and yielding block bodies from block headers. /// /// A downloader represents a distinct strategy for submitting requests to download block bodies, -/// while a [BodiesClient][crate::p2p::bodies::client::BodiesClient] represents a client capable of +/// while a [BodiesClient][crate::bodies::client::BodiesClient] represents a client capable of /// fulfilling these requests. pub trait BodyDownloader: Send + Sync + Stream + Unpin { /// Method for setting the download range. diff --git a/crates/interfaces/src/p2p/bodies/mod.rs b/crates/net/p2p/src/bodies/mod.rs similarity index 100% rename from crates/interfaces/src/p2p/bodies/mod.rs rename to crates/net/p2p/src/bodies/mod.rs diff --git a/crates/interfaces/src/p2p/bodies/response.rs b/crates/net/p2p/src/bodies/response.rs similarity index 100% rename from crates/interfaces/src/p2p/bodies/response.rs rename to crates/net/p2p/src/bodies/response.rs diff --git a/crates/interfaces/src/p2p/download.rs b/crates/net/p2p/src/download.rs similarity index 100% rename from crates/interfaces/src/p2p/download.rs rename to crates/net/p2p/src/download.rs diff --git a/crates/interfaces/src/p2p/either.rs b/crates/net/p2p/src/either.rs similarity index 99% rename from crates/interfaces/src/p2p/either.rs rename to crates/net/p2p/src/either.rs index ed9d50c736f..36e95d487a6 100644 --- a/crates/interfaces/src/p2p/either.rs +++ b/crates/net/p2p/src/either.rs @@ -1,6 +1,6 @@ //! Support for different download types. -use crate::p2p::{ +use crate::{ bodies::client::BodiesClient, download::DownloadClient, headers::client::{HeadersClient, HeadersRequest}, diff --git a/crates/interfaces/src/p2p/error.rs b/crates/net/p2p/src/error.rs similarity index 99% rename from crates/interfaces/src/p2p/error.rs rename to crates/net/p2p/src/error.rs index 1a847b64949..3bd469e6056 100644 --- a/crates/interfaces/src/p2p/error.rs +++ b/crates/net/p2p/src/error.rs @@ -1,11 +1,11 @@ use super::headers::client::HeadersRequest; -use crate::{db::DatabaseError, provider::ProviderError}; use reth_consensus::ConsensusError; use reth_network_api::ReputationChangeKind; use reth_network_types::WithPeerId; use reth_primitives::{ BlockHashOrNumber, BlockNumber, GotExpected, GotExpectedBoxed, Header, B256, }; +use reth_storage_errors::{db::DatabaseError, provider::ProviderError}; use std::ops::RangeInclusive; use thiserror::Error; use tokio::sync::{mpsc, oneshot}; diff --git a/crates/interfaces/src/p2p/full_block.rs b/crates/net/p2p/src/full_block.rs similarity index 99% rename from crates/interfaces/src/p2p/full_block.rs rename to crates/net/p2p/src/full_block.rs index dd8cfff4d4c..997ab74bb89 100644 --- a/crates/interfaces/src/p2p/full_block.rs +++ b/crates/net/p2p/src/full_block.rs @@ -1,5 +1,5 @@ use super::headers::client::HeadersRequest; -use crate::p2p::{ +use crate::{ bodies::client::{BodiesClient, SingleBodyRequest}, error::PeerRequestResult, headers::client::{HeadersClient, SingleHeaderRequest}, @@ -727,11 +727,10 @@ enum RangeResponseResult { #[cfg(test)] mod tests { - use std::ops::Range; - use super::*; use crate::test_utils::TestFullBlockClient; use futures::StreamExt; + use std::ops::Range; #[tokio::test] async fn download_single_full_block() { diff --git a/crates/interfaces/src/p2p/headers/client.rs b/crates/net/p2p/src/headers/client.rs similarity index 96% rename from crates/interfaces/src/p2p/headers/client.rs rename to crates/net/p2p/src/headers/client.rs index cf535530869..5b70aa1e528 100644 --- a/crates/interfaces/src/p2p/headers/client.rs +++ b/crates/net/p2p/src/headers/client.rs @@ -1,4 +1,4 @@ -use crate::p2p::{download::DownloadClient, error::PeerRequestResult, priority::Priority}; +use crate::{download::DownloadClient, error::PeerRequestResult, priority::Priority}; use futures::{Future, FutureExt}; pub use reth_eth_wire_types::BlockHeaders; use reth_primitives::{BlockHashOrNumber, Header, HeadersDirection}; diff --git a/crates/interfaces/src/p2p/headers/downloader.rs b/crates/net/p2p/src/headers/downloader.rs similarity index 95% rename from crates/interfaces/src/p2p/headers/downloader.rs rename to crates/net/p2p/src/headers/downloader.rs index 500a1a1bc84..b52a8487710 100644 --- a/crates/interfaces/src/p2p/headers/downloader.rs +++ b/crates/net/p2p/src/headers/downloader.rs @@ -1,12 +1,12 @@ use super::error::HeadersDownloaderResult; -use crate::p2p::error::{DownloadError, DownloadResult}; +use crate::error::{DownloadError, DownloadResult}; use futures::Stream; use reth_consensus::Consensus; use reth_primitives::{BlockHashOrNumber, SealedHeader, B256}; /// A downloader capable of fetching and yielding block headers. /// /// A downloader represents a distinct strategy for submitting requests to download block headers, -/// while a [HeadersClient][crate::p2p::headers::client::HeadersClient] represents a client capable +/// while a [HeadersClient][crate::headers::client::HeadersClient] represents a client capable /// of fulfilling these requests. /// /// A [HeaderDownloader] is a [Stream] that returns batches of headers. diff --git a/crates/interfaces/src/p2p/headers/error.rs b/crates/net/p2p/src/headers/error.rs similarity index 100% rename from crates/interfaces/src/p2p/headers/error.rs rename to crates/net/p2p/src/headers/error.rs diff --git a/crates/interfaces/src/p2p/headers/mod.rs b/crates/net/p2p/src/headers/mod.rs similarity index 100% rename from crates/interfaces/src/p2p/headers/mod.rs rename to crates/net/p2p/src/headers/mod.rs diff --git a/crates/interfaces/src/p2p/mod.rs b/crates/net/p2p/src/lib.rs similarity index 51% rename from crates/interfaces/src/p2p/mod.rs rename to crates/net/p2p/src/lib.rs index 75f3a8fc4c5..310afc79981 100644 --- a/crates/interfaces/src/p2p/mod.rs +++ b/crates/net/p2p/src/lib.rs @@ -1,3 +1,16 @@ +//! Provides abstractions and commonly used types for p2p. +//! +//! ## Feature Flags +//! +//! - `test-utils`: Export utilities for testing +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + /// Shared abstractions for downloader implementations. pub mod download; @@ -15,7 +28,7 @@ pub mod full_block; /// [`HeadersClient`]. /// /// [`Consensus`]: reth_consensus::Consensus -/// [`HeadersClient`]: crate::p2p::headers::client::HeadersClient +/// [`HeadersClient`]: crate::headers::client::HeadersClient pub mod headers; /// Error types broadly used by p2p interfaces for any operation which may produce an error when @@ -24,3 +37,10 @@ pub mod error; /// Priority enum for BlockHeader and BlockBody requests pub mod priority; + +/// Syncing related traits. +pub mod sync; + +/// Common test helpers for mocking out Consensus, Downloaders and Header Clients. +#[cfg(any(test, feature = "test-utils"))] +pub mod test_utils; diff --git a/crates/interfaces/src/p2p/priority.rs b/crates/net/p2p/src/priority.rs similarity index 100% rename from crates/interfaces/src/p2p/priority.rs rename to crates/net/p2p/src/priority.rs diff --git a/crates/interfaces/src/sync.rs b/crates/net/p2p/src/sync.rs similarity index 100% rename from crates/interfaces/src/sync.rs rename to crates/net/p2p/src/sync.rs diff --git a/crates/interfaces/src/test_utils/bodies.rs b/crates/net/p2p/src/test_utils/bodies.rs similarity index 98% rename from crates/interfaces/src/test_utils/bodies.rs rename to crates/net/p2p/src/test_utils/bodies.rs index 8f0bfcef09f..46bd3ec9b88 100644 --- a/crates/interfaces/src/test_utils/bodies.rs +++ b/crates/net/p2p/src/test_utils/bodies.rs @@ -1,4 +1,4 @@ -use crate::p2p::{ +use crate::{ bodies::client::{BodiesClient, BodiesFut}, download::DownloadClient, error::PeerRequestResult, diff --git a/crates/interfaces/src/test_utils/full_block.rs b/crates/net/p2p/src/test_utils/full_block.rs similarity index 99% rename from crates/interfaces/src/test_utils/full_block.rs rename to crates/net/p2p/src/test_utils/full_block.rs index 95c1c2b3a0f..c0a26539f87 100644 --- a/crates/interfaces/src/test_utils/full_block.rs +++ b/crates/net/p2p/src/test_utils/full_block.rs @@ -1,4 +1,4 @@ -use crate::p2p::{ +use crate::{ bodies::client::BodiesClient, download::DownloadClient, error::PeerRequestResult, diff --git a/crates/interfaces/src/test_utils/generators.rs b/crates/net/p2p/src/test_utils/generators.rs similarity index 98% rename from crates/interfaces/src/test_utils/generators.rs rename to crates/net/p2p/src/test_utils/generators.rs index 506358276c7..9da1429ea51 100644 --- a/crates/interfaces/src/test_utils/generators.rs +++ b/crates/net/p2p/src/test_utils/generators.rs @@ -14,9 +14,6 @@ use std::{ ops::{Range, RangeInclusive}, }; -// TODO(onbjerg): Maybe we should split this off to its own crate, or move the helpers to the -// relevant crates? - /// Returns a random number generator that can be seeded using the `SEED` environment variable. /// /// If `SEED` is not set, a random seed is used. @@ -353,6 +350,7 @@ pub fn random_receipt( ) -> Receipt { let success = rng.gen::(); let logs_count = logs_count.unwrap_or_else(|| rng.gen::()); + #[allow(clippy::needless_update)] // side-effect of optimism fields Receipt { tx_type: transaction.tx_type(), success, @@ -362,10 +360,7 @@ pub fn random_receipt( } else { vec![] }, - #[cfg(feature = "optimism")] - deposit_nonce: None, - #[cfg(feature = "optimism")] - deposit_receipt_version: None, + ..Default::default() } } diff --git a/crates/interfaces/src/test_utils/headers.rs b/crates/net/p2p/src/test_utils/headers.rs similarity index 99% rename from crates/interfaces/src/test_utils/headers.rs rename to crates/net/p2p/src/test_utils/headers.rs index 0272c68d304..354732c2d2f 100644 --- a/crates/interfaces/src/test_utils/headers.rs +++ b/crates/net/p2p/src/test_utils/headers.rs @@ -1,19 +1,6 @@ //! Testing support for headers related interfaces. -use std::{ - fmt, - pin::Pin, - sync::{ - atomic::{AtomicU64, Ordering}, - Arc, - }, - task::{ready, Context, Poll}, -}; - -use futures::{Future, FutureExt, Stream, StreamExt}; -use tokio::sync::Mutex; - -use crate::p2p::{ +use crate::{ download::DownloadClient, error::{DownloadError, DownloadResult, PeerRequestResult, RequestError}, headers::{ @@ -23,9 +10,20 @@ use crate::p2p::{ }, priority::Priority, }; +use futures::{Future, FutureExt, Stream, StreamExt}; use reth_consensus::{test_utils::TestConsensus, Consensus}; use reth_network_types::{PeerId, WithPeerId}; use reth_primitives::{Header, HeadersDirection, SealedHeader}; +use std::{ + fmt, + pin::Pin, + sync::{ + atomic::{AtomicU64, Ordering}, + Arc, + }, + task::{ready, Context, Poll}, +}; +use tokio::sync::Mutex; /// A test downloader which just returns the values that have been pushed to it. #[derive(Debug)] diff --git a/crates/interfaces/src/test_utils/mod.rs b/crates/net/p2p/src/test_utils/mod.rs similarity index 100% rename from crates/interfaces/src/test_utils/mod.rs rename to crates/net/p2p/src/test_utils/mod.rs diff --git a/crates/node-core/Cargo.toml b/crates/node-core/Cargo.toml index b0ed1fae159..787c68c9d1b 100644 --- a/crates/node-core/Cargo.toml +++ b/crates/node-core/Cargo.toml @@ -18,28 +18,24 @@ reth-db = { workspace = true, features = ["mdbx"] } reth-interfaces = { workspace = true, features = ["clap"] } reth-provider.workspace = true reth-network = { workspace = true, features = ["serde"] } -reth-rpc-engine-api.workspace = true reth-rpc-builder.workspace = true reth-rpc.workspace = true reth-rpc-types.workspace = true reth-rpc-types-compat.workspace = true reth-rpc-api = { workspace = true, features = ["client"] } -reth-rpc-layer.workspace = true reth-transaction-pool.workspace = true reth-tracing.workspace = true reth-config.workspace = true reth-discv4.workspace = true reth-discv5.workspace = true reth-net-nat.workspace = true -reth-network-api.workspace = true -reth-evm.workspace = true reth-engine-primitives.workspace = true reth-tasks.workspace = true -reth-trie.workspace = true reth-consensus-common.workspace = true reth-beacon-consensus.workspace = true -reth-etl.workspace = true -reth-codecs.workspace = true + +# ethereum +alloy-rpc-types-engine.workspace = true # ethereum discv5.workspace = true @@ -104,9 +100,7 @@ assert_matches.workspace = true [features] optimism = [ "reth-primitives/optimism", - "reth-interfaces/optimism", "reth-rpc/optimism", - "reth-rpc-engine-api/optimism", "reth-provider/optimism", "reth-rpc-types-compat/optimism", "reth-beacon-consensus/optimism", diff --git a/crates/node-core/src/args/network.rs b/crates/node-core/src/args/network.rs index 350e7c4a1b1..115ec8517ab 100644 --- a/crates/node-core/src/args/network.rs +++ b/crates/node-core/src/args/network.rs @@ -145,8 +145,11 @@ impl NetworkArgs { ), }; // Configure basic network stack - let mut network_config_builder = config - .network_config(self.nat, self.persistent_peers_file(peers_file), secret_key) + let mut network_config_builder = NetworkConfigBuilder::new(secret_key) + .peer_config(config.peers_config_with_basic_nodes_from_file( + self.persistent_peers_file(peers_file).as_deref(), + )) + .external_ip_resolver(self.nat) .sessions_config( SessionsConfig::default().with_upscaled_event_buffer(peers_config.max_peers()), ) diff --git a/crates/node-core/src/args/pruning.rs b/crates/node-core/src/args/pruning.rs index 4adc721586b..e585a216dc7 100644 --- a/crates/node-core/src/args/pruning.rs +++ b/crates/node-core/src/args/pruning.rs @@ -20,7 +20,7 @@ impl PruningArgs { /// Returns pruning configuration. pub fn prune_config(&self, chain_spec: &ChainSpec) -> Option { if !self.full { - return None; + return None } Some(PruneConfig { block_interval: 5, diff --git a/crates/node-core/src/args/rpc_server.rs b/crates/node-core/src/args/rpc_server.rs index f67ef6acb74..9bf433b7aa4 100644 --- a/crates/node-core/src/args/rpc_server.rs +++ b/crates/node-core/src/args/rpc_server.rs @@ -8,32 +8,20 @@ use crate::{ cli::config::RethRpcConfig, utils::get_or_create_jwt_secret_from_path, }; +use alloy_rpc_types_engine::{JwtError, JwtSecret}; use clap::{ builder::{PossibleValue, RangedU64ValueParser, TypedValueParser}, Arg, Args, Command, }; use rand::Rng; -use reth_engine_primitives::EngineTypes; -use reth_evm::ConfigureEvm; -use reth_network_api::{NetworkInfo, Peers}; -use reth_provider::{ - AccountReader, BlockReaderIdExt, CanonStateSubscriptions, ChainSpecProvider, ChangeSetReader, - EvmEnvProvider, HeaderProvider, StateProviderFactory, -}; use reth_rpc::eth::{ cache::EthStateCacheConfig, gas_oracle::GasPriceOracleConfig, RPC_DEFAULT_GAS_CAP, }; use reth_rpc_builder::{ - auth::{AuthServerConfig, AuthServerHandle}, - constants, - error::RpcError, - EthConfig, Identity, IpcServerBuilder, RethRpcModule, RpcModuleConfig, RpcModuleSelection, - RpcServerConfig, RpcServerHandle, ServerBuilder, TransportRpcModuleConfig, + auth::AuthServerConfig, constants, error::RpcError, EthConfig, Identity, IpcServerBuilder, + RethRpcModule, RpcModuleConfig, RpcModuleSelection, RpcServerConfig, ServerBuilder, + TransportRpcModuleConfig, }; -use reth_rpc_engine_api::EngineApi; -use reth_rpc_layer::{JwtError, JwtSecret}; -use reth_tasks::TaskSpawner; -use reth_transaction_pool::TransactionPool; use std::{ ffi::OsStr, net::{IpAddr, Ipv4Addr, SocketAddr}, @@ -277,88 +265,6 @@ impl RpcServerArgs { self = self.with_ipc_random_path(); self } - - /// Convenience function for starting a rpc server with configs which extracted from cli args. - pub async fn start_rpc_server( - &self, - provider: Provider, - pool: Pool, - network: Network, - executor: Tasks, - events: Events, - evm_config: EvmConfig, - ) -> Result - where - Provider: BlockReaderIdExt - + AccountReader - + HeaderProvider - + StateProviderFactory - + EvmEnvProvider - + ChainSpecProvider - + ChangeSetReader - + Clone - + Unpin - + 'static, - Pool: TransactionPool + Clone + 'static, - Network: NetworkInfo + Peers + Clone + 'static, - Tasks: TaskSpawner + Clone + 'static, - Events: CanonStateSubscriptions + Clone + 'static, - EvmConfig: ConfigureEvm + 'static, - { - reth_rpc_builder::launch( - provider, - pool, - network, - self.transport_rpc_module_config(), - self.rpc_server_config(), - executor, - events, - evm_config, - ) - .await - } - - /// Create Engine API server. - #[allow(clippy::too_many_arguments)] - pub async fn start_auth_server( - &self, - provider: Provider, - pool: Pool, - network: Network, - executor: Tasks, - engine_api: EngineApi, - jwt_secret: JwtSecret, - evm_config: EvmConfig, - ) -> Result - where - Provider: BlockReaderIdExt - + ChainSpecProvider - + EvmEnvProvider - + HeaderProvider - + StateProviderFactory - + Clone - + Unpin - + 'static, - Pool: TransactionPool + Clone + 'static, - Network: NetworkInfo + Peers + Clone + 'static, - Tasks: TaskSpawner + Clone + 'static, - EngineT: EngineTypes + 'static, - EvmConfig: ConfigureEvm + 'static, - { - let socket_address = SocketAddr::new(self.auth_addr, self.auth_port); - - reth_rpc_builder::auth::launch( - provider, - pool, - network, - executor, - engine_api, - socket_address, - jwt_secret, - evm_config, - ) - .await - } } impl RethRpcConfig for RpcServerArgs { diff --git a/crates/node-core/src/args/utils.rs b/crates/node-core/src/args/utils.rs index 72b84914f5b..4f49bf1349e 100644 --- a/crates/node-core/src/args/utils.rs +++ b/crates/node-core/src/args/utils.rs @@ -141,7 +141,7 @@ pub enum SocketAddressParsingError { /// The following formats are checked: /// /// - If the value can be parsed as a `u16` or starts with `:` it is considered a port, and the -/// hostname is set to `localhost`. +/// hostname is set to `localhost`. /// - If the value contains `:` it is assumed to be the format `:` /// - Otherwise it is assumed to be a hostname /// diff --git a/crates/node-core/src/cli/config.rs b/crates/node-core/src/cli/config.rs index 45838320128..6e5d1f6a2a8 100644 --- a/crates/node-core/src/cli/config.rs +++ b/crates/node-core/src/cli/config.rs @@ -1,5 +1,6 @@ //! Config traits for various node components. +use alloy_rpc_types_engine::{JwtError, JwtSecret}; use reth_network::protocol::IntoRlpxSubProtocol; use reth_primitives::Bytes; use reth_rpc::eth::{cache::EthStateCacheConfig, gas_oracle::GasPriceOracleConfig}; @@ -7,7 +8,6 @@ use reth_rpc_builder::{ auth::AuthServerConfig, error::RpcError, EthConfig, Identity, IpcServerBuilder, RpcServerConfig, ServerBuilder, TransportRpcModuleConfig, }; -use reth_rpc_layer::{JwtError, JwtSecret}; use reth_transaction_pool::PoolConfig; use std::{borrow::Cow, path::PathBuf, time::Duration}; diff --git a/crates/node-core/src/dirs.rs b/crates/node-core/src/dirs.rs index 75919f6f0fc..b33df18f26f 100644 --- a/crates/node-core/src/dirs.rs +++ b/crates/node-core/src/dirs.rs @@ -257,6 +257,7 @@ impl From for MaybePlatformPath { /// * mainnet: `/mainnet` /// * goerli: `/goerli` /// * sepolia: `/sepolia` +/// /// Otherwise, the path will be dependent on the chain ID: /// * `/` #[derive(Clone, Debug, PartialEq, Eq)] diff --git a/crates/node-core/src/engine/engine_store.rs b/crates/node-core/src/engine/engine_store.rs index 2a1ffc3b0ed..d59651ce9ca 100644 --- a/crates/node-core/src/engine/engine_store.rs +++ b/crates/node-core/src/engine/engine_store.rs @@ -89,8 +89,7 @@ impl EngineMessageStore { )?; } // noop - BeaconEngineMessage::TransitionConfigurationExchanged | - BeaconEngineMessage::EventListener(_) => (), + BeaconEngineMessage::TransitionConfigurationExchanged => (), }; Ok(()) } diff --git a/crates/node-core/src/lib.rs b/crates/node-core/src/lib.rs index 024467ab16c..956b3ad3c3a 100644 --- a/crates/node-core/src/lib.rs +++ b/crates/node-core/src/lib.rs @@ -13,7 +13,6 @@ pub mod cli; pub mod dirs; pub mod engine; pub mod exit; -pub mod init; pub mod metrics; pub mod node_config; pub mod utils; diff --git a/crates/node-core/src/node_config.rs b/crates/node-core/src/node_config.rs index 52333c14714..5cb251f8afe 100644 --- a/crates/node-core/src/node_config.rs +++ b/crates/node-core/src/node_config.rs @@ -411,7 +411,7 @@ impl NodeConfig { // try to look up the header in the database if let Some(header) = header { info!(target: "reth::cli", ?tip, "Successfully looked up tip block in the database"); - return Ok(header.number); + return Ok(header.number) } Ok(self.fetch_tip_from_network(client, tip.into()).await?.number) @@ -434,7 +434,7 @@ impl NodeConfig { match get_single_header(&client, tip).await { Ok(tip_header) => { info!(target: "reth::cli", ?tip, "Successfully fetched tip"); - return Ok(tip_header); + return Ok(tip_header) } Err(error) => { fetch_failures += 1; diff --git a/crates/node-core/src/utils.rs b/crates/node-core/src/utils.rs index f9b4ff599ca..963f863c5aa 100644 --- a/crates/node-core/src/utils.rs +++ b/crates/node-core/src/utils.rs @@ -2,7 +2,7 @@ //! blocks from the network. use eyre::Result; -use reth_consensus_common::validation::validate_block_standalone; +use reth_consensus_common::validation::validate_block_pre_execution; use reth_fs_util as fs; use reth_interfaces::p2p::{ bodies::client::BodiesClient, @@ -121,7 +121,7 @@ where withdrawals: block.withdrawals, }; - validate_block_standalone(&block, &chain_spec)?; + validate_block_pre_execution(&block, &chain_spec)?; Ok(block) } diff --git a/crates/node-core/src/version.rs b/crates/node-core/src/version.rs index 868fa933ea4..79190531b30 100644 --- a/crates/node-core/src/version.rs +++ b/crates/node-core/src/version.rs @@ -1,6 +1,12 @@ //! Version information for reth. - use reth_db::models::client_version::ClientVersion; +use reth_rpc_types::engine::ClientCode; + +/// The client code for Reth +pub const CLIENT_CODE: ClientCode = ClientCode::RH; + +/// The human readable name of the client +pub const NAME_CLIENT: &str = "Reth"; /// The latest version from Cargo.toml. pub const CARGO_PKG_VERSION: &str = env!("CARGO_PKG_VERSION"); diff --git a/crates/node/builder/Cargo.toml b/crates/node/builder/Cargo.toml index e36ac2e2c39..55b0094a631 100644 --- a/crates/node/builder/Cargo.toml +++ b/crates/node/builder/Cargo.toml @@ -16,6 +16,7 @@ workspace = true reth-auto-seal-consensus.workspace = true reth-beacon-consensus.workspace = true reth-blockchain-tree.workspace = true +reth-db-common.workspace = true reth-exex.workspace = true reth-evm.workspace = true reth-provider.workspace = true @@ -39,7 +40,7 @@ reth-config.workspace = true reth-downloaders.workspace = true reth-node-events.workspace = true reth-consensus.workspace = true - +reth-rpc-types.workspace = true ## async futures.workspace = true tokio = { workspace = true, features = [ diff --git a/crates/node/builder/src/launch/common.rs b/crates/node/builder/src/launch/common.rs index 8a5d8e51900..b6b0a03c83f 100644 --- a/crates/node/builder/src/launch/common.rs +++ b/crates/node/builder/src/launch/common.rs @@ -9,11 +9,11 @@ use tokio::sync::mpsc::Receiver; use reth_auto_seal_consensus::MiningMode; use reth_config::{config::EtlConfig, PruneConfig}; use reth_db::{database::Database, database_metrics::DatabaseMetrics}; +use reth_db_common::init::{init_genesis, InitDatabaseError}; use reth_interfaces::p2p::headers::client::HeadersClient; use reth_node_core::{ cli::config::RethRpcConfig, dirs::{ChainPath, DataDirPath}, - init::{init_genesis, InitDatabaseError}, node_config::NodeConfig, }; use reth_primitives::{BlockNumber, Chain, ChainSpec, Head, PruneModes, B256}; diff --git a/crates/node/builder/src/launch/mod.rs b/crates/node/builder/src/launch/mod.rs index ece149e31dc..fec043c7fa0 100644 --- a/crates/node/builder/src/launch/mod.rs +++ b/crates/node/builder/src/launch/mod.rs @@ -25,11 +25,13 @@ use reth_node_core::{ dirs::{ChainPath, DataDirPath}, engine::EngineMessageStreamExt, exit::NodeExitFuture, + version::{CARGO_PKG_VERSION, CLIENT_CODE, NAME_CLIENT, VERGEN_GIT_SHA}, }; use reth_node_events::{cl::ConsensusLayerHealthEvents, node}; use reth_primitives::format_ether; use reth_provider::{providers::BlockchainProvider, CanonStateSubscriptions}; use reth_rpc_engine_api::EngineApi; +use reth_rpc_types::engine::ClientVersionV1; use reth_tasks::TaskExecutor; use reth_tracing::tracing::{debug, info}; use reth_transaction_pool::TransactionPool; @@ -282,7 +284,7 @@ where // Configure the pipeline let pipeline_exex_handle = exex_manager_handle.clone().unwrap_or_else(ExExManagerHandle::empty); - let (mut pipeline, client) = if ctx.is_dev() { + let (pipeline, client) = if ctx.is_dev() { info!(target: "reth::cli", "Starting Reth in dev mode"); for (idx, (address, alloc)) in ctx.chain_spec().genesis.alloc.iter().enumerate() { @@ -305,7 +307,7 @@ where ) .build(); - let mut pipeline = crate::setup::build_networked_pipeline( + let pipeline = crate::setup::build_networked_pipeline( ctx.node_config(), &ctx.toml_config().stages, client.clone(), @@ -358,7 +360,7 @@ where pruner_builder.finished_exex_height(exex_manager_handle.finished_height()); } - let mut pruner = pruner_builder.build(ctx.provider_factory().clone()); + let pruner = pruner_builder.build(ctx.provider_factory().clone()); let pruner_events = pruner.events(); info!(target: "reth::cli", prune_config=?ctx.prune_config().unwrap_or_default(), "Pruner initialized"); @@ -395,7 +397,7 @@ where Either::Right(stream::empty()) }, pruner_events.map(Into::into), - static_file_producer_events.map(Into::into) + static_file_producer_events.map(Into::into), ); ctx.task_executor().spawn_critical( "events task", @@ -407,12 +409,19 @@ where ), ); + let client = ClientVersionV1 { + code: CLIENT_CODE, + name: NAME_CLIENT.to_string(), + version: CARGO_PKG_VERSION.to_string(), + commit: VERGEN_GIT_SHA.to_string(), + }; let engine_api = EngineApi::new( blockchain_db.clone(), ctx.chain_spec(), beacon_engine_handle, node_adapter.components.payload_builder().clone().into(), Box::new(ctx.task_executor().clone()), + client, ); info!(target: "reth::cli", "Engine API handler initialized"); diff --git a/crates/node/events/src/node.rs b/crates/node/events/src/node.rs index ba7ae8da460..8c01c0a737a 100644 --- a/crates/node/events/src/node.rs +++ b/crates/node/events/src/node.rs @@ -315,7 +315,7 @@ impl NodeState { warn!("Beacon client online, but never received consensus updates. Please ensure your beacon client is operational to follow the chain!") } ConsensusLayerHealthEvent::HaveNotReceivedUpdatesForAWhile(period) => { - warn!(?period, "Beacon client online, but no consensus updates received for a while. Please fix your beacon client to follow the chain!") + warn!(?period, "Beacon client online, but no consensus updates received for a while. This may be because of a reth error, or an error in the beacon client! Please investigate reth and beacon client logs!") } } } @@ -392,6 +392,9 @@ pub enum NodeEvent { Pruner(PrunerEvent), /// A static_file_producer event StaticFileProducer(StaticFileProducerEvent), + /// Used to encapsulate various conditions or situations that do not + /// naturally fit into the other more specific variants. + Other(String), } impl From for NodeEvent { @@ -575,6 +578,9 @@ where NodeEvent::StaticFileProducer(event) => { this.state.handle_static_file_producer_event(event); } + NodeEvent::Other(event_description) => { + warn!("{event_description}"); + } } } diff --git a/crates/optimism/consensus/Cargo.toml b/crates/optimism/consensus/Cargo.toml index 4ebbaa8d8af..a2a5edb5d5c 100644 --- a/crates/optimism/consensus/Cargo.toml +++ b/crates/optimism/consensus/Cargo.toml @@ -20,4 +20,4 @@ reth-consensus.workspace = true [features] optimism = [ "reth-primitives/optimism", -] \ No newline at end of file +] diff --git a/crates/optimism/consensus/src/lib.rs b/crates/optimism/consensus/src/lib.rs index 4deea287962..09f9c1f38d1 100644 --- a/crates/optimism/consensus/src/lib.rs +++ b/crates/optimism/consensus/src/lib.rs @@ -10,10 +10,18 @@ #![cfg(feature = "optimism")] use reth_consensus::{Consensus, ConsensusError}; -use reth_consensus_common::{validation, validation::validate_header_extradata}; -use reth_primitives::{ChainSpec, Header, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, U256}; +use reth_consensus_common::validation::{ + validate_block_pre_execution, validate_header_extradata, validate_header_standalone, +}; +use reth_primitives::{ + BlockWithSenders, ChainSpec, Header, Receipt, SealedBlock, SealedHeader, EMPTY_OMMER_ROOT_HASH, + U256, +}; use std::{sync::Arc, time::SystemTime}; +mod validation; +pub use validation::validate_block_post_execution; + /// Optimism consensus implementation. /// /// Provides basic checks as outlined in the execution specs. @@ -37,7 +45,7 @@ impl OptimismBeaconConsensus { impl Consensus for OptimismBeaconConsensus { fn validate_header(&self, header: &SealedHeader) -> Result<(), ConsensusError> { - validation::validate_header_standalone(header, &self.chain_spec)?; + validate_header_standalone(header, &self.chain_spec)?; Ok(()) } @@ -96,7 +104,15 @@ impl Consensus for OptimismBeaconConsensus { Ok(()) } - fn validate_block(&self, block: &SealedBlock) -> Result<(), ConsensusError> { - validation::validate_block_standalone(block, &self.chain_spec) + fn validate_block_pre_execution(&self, block: &SealedBlock) -> Result<(), ConsensusError> { + validate_block_pre_execution(block, &self.chain_spec) + } + + fn validate_block_post_execution( + &self, + block: &BlockWithSenders, + receipts: &[Receipt], + ) -> Result<(), ConsensusError> { + validate_block_post_execution(block, &self.chain_spec, receipts) } } diff --git a/crates/optimism/consensus/src/validation.rs b/crates/optimism/consensus/src/validation.rs new file mode 100644 index 00000000000..cf9b849af47 --- /dev/null +++ b/crates/optimism/consensus/src/validation.rs @@ -0,0 +1,90 @@ +use reth_consensus::ConsensusError; +use reth_primitives::{ + gas_spent_by_transactions, proofs::calculate_receipt_root_optimism, BlockWithSenders, Bloom, + ChainSpec, GotExpected, Receipt, B256, +}; + +/// Validate a block with regard to execution results: +/// +/// - Compares the receipts root in the block header to the block body +/// - Compares the gas used in the block header to the actual gas usage after execution +pub fn validate_block_post_execution( + block: &BlockWithSenders, + chain_spec: &ChainSpec, + receipts: &[Receipt], +) -> Result<(), ConsensusError> { + // Before Byzantium, receipts contained state root that would mean that expensive + // operation as hashing that is required for state root got calculated in every + // transaction This was replaced with is_success flag. + // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 + if chain_spec.is_byzantium_active_at_block(block.header.number) { + verify_receipts( + block.header.receipts_root, + block.header.logs_bloom, + receipts, + chain_spec, + block.timestamp, + )?; + } + + // Check if gas used matches the value set in header. + let cumulative_gas_used = + receipts.last().map(|receipt| receipt.cumulative_gas_used).unwrap_or(0); + if block.gas_used != cumulative_gas_used { + return Err(ConsensusError::BlockGasUsed { + gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used }, + gas_spent_by_tx: gas_spent_by_transactions(receipts), + }) + } + + Ok(()) +} + +/// Verify the calculated receipts root against the expected receipts root. +fn verify_receipts( + expected_receipts_root: B256, + expected_logs_bloom: Bloom, + receipts: &[Receipt], + chain_spec: &ChainSpec, + timestamp: u64, +) -> Result<(), ConsensusError> { + // Calculate receipts root. + let receipts_with_bloom = receipts.iter().cloned().map(Receipt::with_bloom).collect::>(); + let receipts_root = + calculate_receipt_root_optimism(&receipts_with_bloom, chain_spec, timestamp); + + // Calculate header logs bloom. + let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); + + compare_receipts_root_and_logs_bloom( + receipts_root, + logs_bloom, + expected_receipts_root, + expected_logs_bloom, + )?; + + Ok(()) +} + +/// Compare the calculated receipts root with the expected receipts root, also compare +/// the calculated logs bloom with the expected logs bloom. +fn compare_receipts_root_and_logs_bloom( + calculated_receipts_root: B256, + calculated_logs_bloom: Bloom, + expected_receipts_root: B256, + expected_logs_bloom: Bloom, +) -> Result<(), ConsensusError> { + if calculated_receipts_root != expected_receipts_root { + return Err(ConsensusError::BodyReceiptRootDiff( + GotExpected { got: calculated_receipts_root, expected: expected_receipts_root }.into(), + )) + } + + if calculated_logs_bloom != expected_logs_bloom { + return Err(ConsensusError::BodyBloomLogDiff( + GotExpected { got: calculated_logs_bloom, expected: expected_logs_bloom }.into(), + )) + } + + Ok(()) +} diff --git a/crates/optimism/evm/Cargo.toml b/crates/optimism/evm/Cargo.toml index a1c3a168bda..0423f1bd7dd 100644 --- a/crates/optimism/evm/Cargo.toml +++ b/crates/optimism/evm/Cargo.toml @@ -15,10 +15,14 @@ workspace = true reth-evm.workspace = true reth-primitives.workspace = true reth-revm.workspace = true -reth-interfaces.workspace = true +reth-execution-errors.workspace = true reth-provider.workspace = true +reth-consensus-common.workspace = true # Optimism +reth-optimism-consensus.workspace = true + +# revm revm.workspace = true revm-primitives.workspace = true @@ -33,6 +37,6 @@ reth-revm = { workspace = true, features = ["test-utils"] } optimism = [ "reth-primitives/optimism", "reth-provider/optimism", - "reth-interfaces/optimism", "revm-primitives/optimism", + "reth-optimism-consensus/optimism", ] diff --git a/crates/optimism/evm/src/error.rs b/crates/optimism/evm/src/error.rs index de923d44ca4..1041f30c811 100644 --- a/crates/optimism/evm/src/error.rs +++ b/crates/optimism/evm/src/error.rs @@ -1,6 +1,6 @@ //! Error types for the Optimism EVM module. -use reth_interfaces::executor::BlockExecutionError; +use reth_evm::execute::BlockExecutionError; /// Optimism Block Executor Errors #[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)] diff --git a/crates/optimism/evm/src/execute.rs b/crates/optimism/evm/src/execute.rs index f729ceda1c7..7df033dc55f 100644 --- a/crates/optimism/evm/src/execute.rs +++ b/crates/optimism/evm/src/execute.rs @@ -1,23 +1,17 @@ //! Optimism block executor. -use crate::{ - l1::ensure_create2_deployer, verify::verify_receipts, OptimismBlockExecutionError, - OptimismEvmConfig, -}; +use crate::{l1::ensure_create2_deployer, OptimismBlockExecutionError, OptimismEvmConfig}; use reth_evm::{ execute::{ - BatchBlockExecutionOutput, BatchExecutor, BlockExecutionInput, BlockExecutionOutput, - BlockExecutorProvider, Executor, + BatchBlockExecutionOutput, BatchExecutor, BlockExecutionError, BlockExecutionInput, + BlockExecutionOutput, BlockExecutorProvider, BlockValidationError, Executor, ProviderError, }, ConfigureEvm, }; -use reth_interfaces::{ - executor::{BlockExecutionError, BlockValidationError}, - provider::ProviderError, -}; +use reth_optimism_consensus::validate_block_post_execution; use reth_primitives::{ - BlockNumber, BlockWithSenders, ChainSpec, GotExpected, Hardfork, Header, PruneModes, Receipt, - Receipts, TxType, Withdrawals, U256, + BlockNumber, BlockWithSenders, ChainSpec, Hardfork, Header, PruneModes, Receipt, Receipts, + TxType, Withdrawals, U256, }; use reth_revm::{ batch::{BlockBatchRecord, BlockExecutorStats}, @@ -30,7 +24,7 @@ use revm_primitives::{ BlockEnv, CfgEnvWithHandlerCfg, EnvWithHandlerCfg, ResultAndState, }; use std::sync::Arc; -use tracing::{debug, trace}; +use tracing::trace; /// Provides executors to execute regular ethereum blocks #[derive(Debug, Clone)] @@ -157,12 +151,12 @@ where transaction_gas_limit: transaction.gas_limit(), block_available_gas, } - .into()); + .into()) } // An optimism block should never contain blob transactions. if matches!(transaction.tx_type(), TxType::Eip4844) { - return Err(OptimismBlockExecutionError::BlobTransactionRejected.into()); + return Err(OptimismBlockExecutionError::BlobTransactionRejected.into()) } // Cache the depositor account prior to the state transition for the deposit nonce. @@ -221,16 +215,6 @@ where } drop(evm); - // Check if gas used matches the value set in header. - if block.gas_used != cumulative_gas_used { - let receipts = Receipts::from_block_receipt(receipts); - return Err(BlockValidationError::BlockGasUsed { - gas: GotExpected { got: cumulative_gas_used, expected: block.gas_used }, - gas_spent_by_tx: receipts.gas_spent_by_tx()?, - } - .into()); - } - Ok((receipts, cumulative_gas_used)) } } @@ -292,8 +276,8 @@ where /// /// Returns the receipts of the transactions in the block and the total gas used. /// - /// Returns an error if execution fails or receipt verification fails. - fn execute_and_verify( + /// Returns an error if execution fails. + fn execute_without_verification( &mut self, block: &BlockWithSenders, total_difficulty: U256, @@ -312,23 +296,6 @@ where // 3. apply post execution changes self.post_execution(block, total_difficulty)?; - // Before Byzantium, receipts contained state root that would mean that expensive - // operation as hashing that is required for state root got calculated in every - // transaction This was replaced with is_success flag. - // See more about EIP here: https://eips.ethereum.org/EIPS/eip-658 - if self.chain_spec().is_byzantium_active_at_block(block.header.number) { - if let Err(error) = verify_receipts( - block.header.receipts_root, - block.header.logs_bloom, - receipts.iter(), - self.chain_spec(), - block.timestamp, - ) { - debug!(target: "evm", %error, ?receipts, "receipts verification failed"); - return Err(error); - }; - } - Ok((receipts, gas_used)) } @@ -383,7 +350,7 @@ where /// State changes are committed to the database. fn execute(mut self, input: Self::Input<'_>) -> Result { let BlockExecutionInput { block, total_difficulty } = input; - let (receipts, gas_used) = self.execute_and_verify(block, total_difficulty)?; + let (receipts, gas_used) = self.execute_without_verification(block, total_difficulty)?; // NOTE: we need to merge keep the reverts for the bundle retention self.state.merge_transitions(BundleRetention::Reverts); @@ -426,9 +393,12 @@ where type Output = BatchBlockExecutionOutput; type Error = BlockExecutionError; - fn execute_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { + fn execute_and_verify_one(&mut self, input: Self::Input<'_>) -> Result<(), Self::Error> { let BlockExecutionInput { block, total_difficulty } = input; - let (receipts, _gas_used) = self.executor.execute_and_verify(block, total_difficulty)?; + let (receipts, _gas_used) = + self.executor.execute_without_verification(block, total_difficulty)?; + + validate_block_post_execution(block, self.executor.chain_spec(), &receipts)?; // prepare the state according to the prune mode let retention = self.batch_record.bundle_retention(block.number); @@ -557,7 +527,7 @@ mod tests { // Attempt to execute a block with one deposit and one non-deposit transaction executor - .execute_one( + .execute_and_verify_one( ( &BlockWithSenders { block: Block { @@ -638,7 +608,7 @@ mod tests { // attempt to execute an empty block with parent beacon block root, this should not fail executor - .execute_one( + .execute_and_verify_one( ( &BlockWithSenders { block: Block { diff --git a/crates/optimism/evm/src/l1.rs b/crates/optimism/evm/src/l1.rs index 66093e857b4..82fbb06e921 100644 --- a/crates/optimism/evm/src/l1.rs +++ b/crates/optimism/evm/src/l1.rs @@ -1,7 +1,7 @@ //! Optimism-specific implementation and utilities for the executor use crate::OptimismBlockExecutionError; -use reth_interfaces::{executor::BlockExecutionError, RethError}; +use reth_execution_errors::BlockExecutionError; use reth_primitives::{address, b256, hex, Address, Block, Bytes, ChainSpec, Hardfork, B256, U256}; use revm::{ primitives::{Bytecode, HashMap, SpecId}, @@ -232,7 +232,7 @@ pub fn ensure_create2_deployer( chain_spec: Arc, timestamp: u64, db: &mut revm::State, -) -> Result<(), RethError> +) -> Result<(), DB::Error> where DB: revm::Database, { @@ -246,9 +246,7 @@ where trace!(target: "evm", "Forcing create2 deployer contract deployment on Canyon transition"); // Load the create2 deployer account from the cache. - let acc = db - .load_cache_account(CREATE_2_DEPLOYER_ADDR) - .map_err(|_| RethError::Custom("Failed to load account".to_string()))?; + let acc = db.load_cache_account(CREATE_2_DEPLOYER_ADDR)?; // Update the account info with the create2 deployer codehash and bytecode. let mut acc_info = acc.account_info().unwrap_or_default(); diff --git a/crates/optimism/evm/src/lib.rs b/crates/optimism/evm/src/lib.rs index 31d39fcb6ac..be3897ef389 100644 --- a/crates/optimism/evm/src/lib.rs +++ b/crates/optimism/evm/src/lib.rs @@ -23,7 +23,6 @@ pub mod l1; pub use l1::*; mod error; -pub mod verify; pub use error::OptimismBlockExecutionError; /// Optimism-related EVM configuration. diff --git a/crates/optimism/evm/src/verify.rs b/crates/optimism/evm/src/verify.rs deleted file mode 100644 index d96965d03b5..00000000000 --- a/crates/optimism/evm/src/verify.rs +++ /dev/null @@ -1,58 +0,0 @@ -//! Helpers for verifying the receipts. - -use reth_interfaces::executor::{BlockExecutionError, BlockValidationError}; -use reth_primitives::{ - proofs::calculate_receipt_root_optimism, Bloom, ChainSpec, GotExpected, Receipt, - ReceiptWithBloom, B256, -}; - -/// Verify the calculated receipts root against the expected receipts root. -pub fn verify_receipts<'a>( - expected_receipts_root: B256, - expected_logs_bloom: Bloom, - receipts: impl Iterator + Clone, - chain_spec: &ChainSpec, - timestamp: u64, -) -> Result<(), BlockExecutionError> { - // Calculate receipts root. - let receipts_with_bloom = receipts.map(|r| r.clone().into()).collect::>(); - let receipts_root = - calculate_receipt_root_optimism(&receipts_with_bloom, chain_spec, timestamp); - - // Create header log bloom. - let logs_bloom = receipts_with_bloom.iter().fold(Bloom::ZERO, |bloom, r| bloom | r.bloom); - - compare_receipts_root_and_logs_bloom( - receipts_root, - logs_bloom, - expected_receipts_root, - expected_logs_bloom, - )?; - - Ok(()) -} - -/// Compare the calculated receipts root with the expected receipts root, also compare -/// the calculated logs bloom with the expected logs bloom. -pub fn compare_receipts_root_and_logs_bloom( - calculated_receipts_root: B256, - calculated_logs_bloom: Bloom, - expected_receipts_root: B256, - expected_logs_bloom: Bloom, -) -> Result<(), BlockExecutionError> { - if calculated_receipts_root != expected_receipts_root { - return Err(BlockValidationError::ReceiptRootDiff( - GotExpected { got: calculated_receipts_root, expected: expected_receipts_root }.into(), - ) - .into()) - } - - if calculated_logs_bloom != expected_logs_bloom { - return Err(BlockValidationError::BloomLogDiff( - GotExpected { got: calculated_logs_bloom, expected: expected_logs_bloom }.into(), - ) - .into()) - } - - Ok(()) -} diff --git a/crates/optimism/payload/src/payload.rs b/crates/optimism/payload/src/payload.rs index 182dadfed9b..41a3eec9be1 100644 --- a/crates/optimism/payload/src/payload.rs +++ b/crates/optimism/payload/src/payload.rs @@ -256,7 +256,7 @@ impl From for OptimismExecutionPayloadEnvelopeV3 { B256::ZERO }; OptimismExecutionPayloadEnvelopeV3 { - execution_payload: block_to_payload_v3(block), + execution_payload: block_to_payload_v3(block).0, block_value: fees, // From the engine API spec: // diff --git a/crates/optimism/rpc/Cargo.toml b/crates/optimism/rpc/Cargo.toml new file mode 100644 index 00000000000..07f885b08d0 --- /dev/null +++ b/crates/optimism/rpc/Cargo.toml @@ -0,0 +1,31 @@ +[package] +name = "reth-optimism-rpc" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true +description = "An extension of ethereum RPC for optimism." + +[lints] +workspace = true + +[dependencies] +# reth +reth-evm.workspace = true +reth-evm-optimism = { workspace = true, features = ["optimism"] } +revm.workspace = true +reth-network-api.workspace = true +reth-rpc = { workspace = true, features = ["optimism"] } +reth-rpc-api.workspace = true +reth-rpc-types.workspace = true +reth-primitives = { workspace = true, features = ["optimism"] } +reth-provider.workspace = true +reth-transaction-pool.workspace = true + +# rpc +jsonrpsee.workspace = true + +# misc +thiserror.workspace = true \ No newline at end of file diff --git a/crates/rpc/rpc/src/eth/optimism.rs b/crates/optimism/rpc/src/error.rs similarity index 86% rename from crates/rpc/rpc/src/eth/optimism.rs rename to crates/optimism/rpc/src/error.rs index 24f6f36ff46..08ef68265ce 100644 --- a/crates/rpc/rpc/src/eth/optimism.rs +++ b/crates/optimism/rpc/src/error.rs @@ -1,12 +1,10 @@ -//! Optimism specific types. +//! RPC errors specific to OP. use jsonrpsee::types::ErrorObject; +use reth_rpc::{eth::error::EthApiError, result::internal_rpc_err}; use reth_rpc_types::ToRpcError; -use crate::{eth::error::EthApiError, result::internal_rpc_err}; - /// Eth Optimism Api Error -#[cfg(feature = "optimism")] #[derive(Debug, thiserror::Error)] pub enum OptimismEthApiError { /// Thrown when calculating L1 gas fee diff --git a/crates/optimism/rpc/src/lib.rs b/crates/optimism/rpc/src/lib.rs new file mode 100644 index 00000000000..4d5a01712ac --- /dev/null +++ b/crates/optimism/rpc/src/lib.rs @@ -0,0 +1,12 @@ +//! Standalone crate for Optimism-specific RPC types. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +pub mod error; +pub mod receipt; +pub mod transaction; diff --git a/crates/optimism/rpc/src/receipt.rs b/crates/optimism/rpc/src/receipt.rs new file mode 100644 index 00000000000..a39dbb9e72f --- /dev/null +++ b/crates/optimism/rpc/src/receipt.rs @@ -0,0 +1,163 @@ +//! Formats OP receipt RPC response. + +use reth_evm::ConfigureEvm; +use reth_evm_optimism::RethL1BlockInfo; +use reth_network_api::NetworkInfo; +use reth_primitives::{BlockId, Receipt, TransactionMeta, TransactionSigned}; +use reth_provider::{BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; +use reth_rpc::{ + eth::{ + api::transactions::ReceiptResponseBuilder, + error::{EthApiError, EthResult}, + }, + EthApi, +}; +use reth_rpc_types::{AnyTransactionReceipt, OptimismTransactionReceiptFields}; +use reth_transaction_pool::TransactionPool; + +use crate::{error::OptimismEthApiError, transaction::OptimismTxMeta}; + +/// Helper function for `eth_getBlockReceipts`. Returns all transaction receipts in the block. +/// +/// Returns `None` if the block wasn't found. +pub async fn block_receipts( + eth_api: &EthApi, + block_id: BlockId, +) -> EthResult>> +where + Provider: + BlockReaderIdExt + ChainSpecProvider + EvmEnvProvider + StateProviderFactory + 'static, + Pool: TransactionPool + 'static, + Network: NetworkInfo + 'static, + EvmConfig: ConfigureEvm + 'static, +{ + if let Some((block, receipts)) = eth_api.load_block_and_receipts(block_id).await? { + let block_number = block.number; + let base_fee = block.base_fee_per_gas; + let block_hash = block.hash(); + let excess_blob_gas = block.excess_blob_gas; + let timestamp = block.timestamp; + let block = block.unseal(); + + let l1_block_info = reth_evm_optimism::extract_l1_info(&block).ok(); + + let receipts = block + .body + .into_iter() + .zip(receipts.iter()) + .enumerate() + .map(|(idx, (ref tx, receipt))| { + let meta = TransactionMeta { + tx_hash: tx.hash, + index: idx as u64, + block_hash, + block_number, + base_fee, + excess_blob_gas, + timestamp, + }; + + let optimism_tx_meta = + build_op_tx_meta(eth_api, tx, l1_block_info.clone(), timestamp)?; + + ReceiptResponseBuilder::new(tx, meta, receipt, &receipts) + .map(|builder| op_fields(builder, tx, receipt, optimism_tx_meta).build()) + }) + .collect::>>(); + return receipts.map(Some) + } + + Ok(None) +} + +/// Helper function for `eth_getTransactionReceipt` +/// +/// Returns the receipt +pub async fn build_transaction_receipt( + eth_api: &EthApi, + tx: TransactionSigned, + meta: TransactionMeta, + receipt: Receipt, +) -> EthResult +where + Provider: BlockReaderIdExt + ChainSpecProvider, +{ + let (block, receipts) = eth_api + .cache() + .get_block_and_receipts(meta.block_hash) + .await? + .ok_or(EthApiError::UnknownBlockNumber)?; + + let block = block.unseal(); + let l1_block_info = reth_evm_optimism::extract_l1_info(&block).ok(); + let optimism_tx_meta = build_op_tx_meta(eth_api, &tx, l1_block_info, block.timestamp)?; + + let resp_builder = ReceiptResponseBuilder::new(&tx, meta, &receipt, &receipts)?; + let resp_builder = op_fields(resp_builder, &tx, &receipt, optimism_tx_meta); + + Ok(resp_builder.build()) +} + +/// Builds op metadata object using the provided [TransactionSigned], L1 block info and +/// `block_timestamp`. The L1BlockInfo is used to calculate the l1 fee and l1 data gas for the +/// transaction. If the L1BlockInfo is not provided, the meta info will be empty. +pub fn build_op_tx_meta( + eth_api: &EthApi, + tx: &TransactionSigned, + l1_block_info: Option, + block_timestamp: u64, +) -> EthResult +where + Provider: BlockReaderIdExt + ChainSpecProvider, +{ + let Some(l1_block_info) = l1_block_info else { return Ok(OptimismTxMeta::default()) }; + + let (l1_fee, l1_data_gas) = if !tx.is_deposit() { + let envelope_buf = tx.envelope_encoded(); + + let inner_l1_fee = l1_block_info + .l1_tx_data_fee( + ð_api.provider().chain_spec(), + block_timestamp, + &envelope_buf, + tx.is_deposit(), + ) + .map_err(|_| OptimismEthApiError::L1BlockFeeError)?; + let inner_l1_data_gas = l1_block_info + .l1_data_gas(ð_api.provider().chain_spec(), block_timestamp, &envelope_buf) + .map_err(|_| OptimismEthApiError::L1BlockGasError)?; + ( + Some(inner_l1_fee.saturating_to::()), + Some(inner_l1_data_gas.saturating_to::()), + ) + } else { + (None, None) + }; + + Ok(OptimismTxMeta::new(Some(l1_block_info), l1_fee, l1_data_gas)) +} + +/// Applies OP specific fields to a receipts response. +pub fn op_fields( + resp_builder: ReceiptResponseBuilder, + tx: &TransactionSigned, + receipt: &Receipt, + optimism_tx_meta: OptimismTxMeta, +) -> ReceiptResponseBuilder { + let mut op_fields = OptimismTransactionReceiptFields::default(); + + if tx.is_deposit() { + op_fields.deposit_nonce = receipt.deposit_nonce.map(reth_primitives::U64::from); + op_fields.deposit_receipt_version = + receipt.deposit_receipt_version.map(reth_primitives::U64::from); + } else if let Some(l1_block_info) = optimism_tx_meta.l1_block_info { + op_fields.l1_fee = optimism_tx_meta.l1_fee; + op_fields.l1_gas_used = optimism_tx_meta.l1_data_gas.map(|dg| { + dg + l1_block_info.l1_fee_overhead.unwrap_or_default().saturating_to::() + }); + op_fields.l1_fee_scalar = Some(f64::from(l1_block_info.l1_base_fee_scalar) / 1_000_000.0); + op_fields.l1_gas_price = Some(l1_block_info.l1_base_fee.saturating_to()); + } + + resp_builder.add_other_fields(op_fields.into()) +} diff --git a/crates/rpc/rpc/src/eth/api/optimism.rs b/crates/optimism/rpc/src/transaction.rs similarity index 59% rename from crates/rpc/rpc/src/eth/api/optimism.rs rename to crates/optimism/rpc/src/transaction.rs index 91ec45202a5..64922e6116c 100644 --- a/crates/rpc/rpc/src/eth/api/optimism.rs +++ b/crates/optimism/rpc/src/transaction.rs @@ -1,4 +1,4 @@ -//! Optimism helpers. +//! Formats OP transaction RPC response. use revm::L1BlockInfo; @@ -6,22 +6,22 @@ use revm::L1BlockInfo; /// /// Includes the L1 fee and data gas for the tx along with the L1 /// block info. In order to pass the [OptimismTxMeta] into the -/// async colored `build_transaction_receipt_with_block_receipts` -/// function, a reference counter for the L1 block info is -/// used so the L1 block info can be shared between receipts. +/// async colored [ReceiptResponseBuilder], a reference counter +/// for the L1 block info is used so the L1 block info can be +/// shared between receipts. #[derive(Debug, Default, Clone)] -pub(crate) struct OptimismTxMeta { +pub struct OptimismTxMeta { /// The L1 block info. - pub(crate) l1_block_info: Option, + pub l1_block_info: Option, /// The L1 fee for the block. - pub(crate) l1_fee: Option, + pub l1_fee: Option, /// The L1 data gas for the block. - pub(crate) l1_data_gas: Option, + pub l1_data_gas: Option, } impl OptimismTxMeta { /// Creates a new [OptimismTxMeta]. - pub(crate) fn new( + pub fn new( l1_block_info: Option, l1_fee: Option, l1_data_gas: Option, diff --git a/crates/payload/validator/src/lib.rs b/crates/payload/validator/src/lib.rs index 6b95b042576..13c20541f03 100644 --- a/crates/payload/validator/src/lib.rs +++ b/crates/payload/validator/src/lib.rs @@ -84,8 +84,8 @@ impl ExecutionPayloadValidator { /// - invalid extra data /// - invalid transactions /// - incorrect hash - /// - the versioned hashes passed with the payload do not exactly match transaction - /// versioned hashes + /// - the versioned hashes passed with the payload do not exactly match transaction versioned + /// hashes /// - the block does not contain blob transactions if it is pre-cancun /// /// The checks are done in the order that conforms with the engine-API specification. @@ -155,7 +155,7 @@ impl ExecutionPayloadValidator { let shanghai_active = self.is_shanghai_active_at_timestamp(sealed_block.timestamp); if !shanghai_active && sealed_block.withdrawals.is_some() { // shanghai not active but withdrawals present - return Err(PayloadError::PreShanghaiBlockWithWitdrawals); + return Err(PayloadError::PreShanghaiBlockWithWitdrawals) } // EIP-4844 checks diff --git a/crates/primitives/Cargo.toml b/crates/primitives/Cargo.toml index 34100b24b70..f44db3cae1e 100644 --- a/crates/primitives/Cargo.toml +++ b/crates/primitives/Cargo.toml @@ -16,6 +16,7 @@ workspace = true reth-codecs.workspace = true reth-ethereum-forks.workspace = true reth-network-types.workspace = true +reth-static-file-types.workspace = true revm.workspace = true revm-primitives = { workspace = true, features = ["serde"] } @@ -41,7 +42,6 @@ c-kzg = { workspace = true, features = ["serde"], optional = true } # misc bytes.workspace = true byteorder = "1" -clap = { workspace = true, features = ["derive"], optional = true } derive_more.workspace = true itertools.workspace = true modular-bitfield.workspace = true @@ -62,7 +62,6 @@ plain_hasher = { version = "0.2", optional = true } arbitrary = { workspace = true, features = ["derive"], optional = true } proptest = { workspace = true, optional = true } proptest-derive = { workspace = true, optional = true } -strum = { workspace = true, features = ["derive"] } [dev-dependencies] # eth @@ -116,7 +115,7 @@ c-kzg = [ "alloy-eips/kzg", ] zstd-codec = ["dep:zstd"] -clap = ["dep:clap"] +clap = ["reth-static-file-types/clap"] optimism = [ "reth-codecs/optimism", "reth-ethereum-forks/optimism", diff --git a/crates/primitives/src/account.rs b/crates/primitives/src/account.rs index bbaf4201266..78e796147f1 100644 --- a/crates/primitives/src/account.rs +++ b/crates/primitives/src/account.rs @@ -106,6 +106,10 @@ impl Compact for Bytecode { len + bytecode.len() + 4 } + // # Panics + // + // A panic will be triggered if a bytecode variant of 1 or greater than 2 is passed from the + // database. fn from_compact(mut buf: &[u8], _: usize) -> (Self, &[u8]) { let len = buf.read_u32::().expect("could not read bytecode length"); let bytes = Bytes::from(buf.copy_to_bytes(len as usize)); diff --git a/crates/primitives/src/alloy_compat.rs b/crates/primitives/src/alloy_compat.rs index be8144e9012..2cdaee72db4 100644 --- a/crates/primitives/src/alloy_compat.rs +++ b/crates/primitives/src/alloy_compat.rs @@ -115,7 +115,7 @@ impl TryFrom for Transaction { return Err(ConversionError::Eip2718Error( RlpError::Custom("EIP-1559 fields are present in a legacy transaction") .into(), - )); + )) } Ok(Transaction::Legacy(TxLegacy { chain_id: tx.chain_id, diff --git a/crates/primitives/src/compression/mod.rs b/crates/primitives/src/compression/mod.rs index 200b6bc4360..b0a3fd2fe50 100644 --- a/crates/primitives/src/compression/mod.rs +++ b/crates/primitives/src/compression/mod.rs @@ -69,7 +69,7 @@ impl ReusableDecompressor { reserved_upper_bound = true; if let Some(upper_bound) = Decompressor::upper_bound(src) { if let Some(additional) = upper_bound.checked_sub(self.buf.capacity()) { - break 'b additional; + break 'b additional } } } diff --git a/crates/primitives/src/lib.rs b/crates/primitives/src/lib.rs index 3c57158f1a3..b10582cf9e5 100644 --- a/crates/primitives/src/lib.rs +++ b/crates/primitives/src/lib.rs @@ -42,7 +42,7 @@ mod receipt; /// Helpers for working with revm pub mod revm; pub mod stage; -pub mod static_file; +pub use reth_static_file_types as static_file; mod storage; /// Helpers for working with transactions pub mod transaction; @@ -82,7 +82,9 @@ pub use prune::{ PrunePurpose, PruneSegment, PruneSegmentError, ReceiptsLogPruneConfig, MINIMUM_PRUNING_DISTANCE, }; -pub use receipt::{Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts}; +pub use receipt::{ + gas_spent_by_transactions, Receipt, ReceiptWithBloom, ReceiptWithBloomRef, Receipts, +}; pub use static_file::StaticFileSegment; pub use storage::StorageEntry; diff --git a/crates/primitives/src/log.rs b/crates/primitives/src/log.rs index 79227d4f9bd..628a20f831c 100644 --- a/crates/primitives/src/log.rs +++ b/crates/primitives/src/log.rs @@ -1,13 +1,9 @@ use crate::Bloom; -/// Re-export `Log` from `alloy_primitives`. pub use alloy_primitives::Log; /// Calculate receipt logs bloom. -pub fn logs_bloom<'a, It>(logs: It) -> Bloom -where - It: IntoIterator, -{ +pub fn logs_bloom<'a>(logs: impl IntoIterator) -> Bloom { let mut bloom = Bloom::ZERO; for log in logs { bloom.m3_2048(log.address.as_slice()); diff --git a/crates/primitives/src/proofs.rs b/crates/primitives/src/proofs.rs index d08fc10a63c..b16fa687938 100644 --- a/crates/primitives/src/proofs.rs +++ b/crates/primitives/src/proofs.rs @@ -8,7 +8,6 @@ use crate::{ B256, U256, }; use alloy_rlp::Encodable; -use bytes::BufMut; use itertools::Itertools; /// Adjust the index of an item for rlp encoding. @@ -30,9 +29,8 @@ pub fn ordered_trie_root(items: &[T]) -> B256 { /// Compute a trie root of the collection of items with a custom encoder. pub fn ordered_trie_root_with_encoder(items: &[T], mut encode: F) -> B256 where - F: FnMut(&T, &mut dyn BufMut), + F: FnMut(&T, &mut Vec), { - let mut index_buffer = Vec::new(); let mut value_buffer = Vec::new(); let mut hb = HashBuilder::default(); @@ -40,8 +38,7 @@ where for i in 0..items_len { let index = adjust_index_for_rlp(i, items_len); - index_buffer.clear(); - index.encode(&mut index_buffer); + let index_buffer = alloy_rlp::encode_fixed_size(&index); value_buffer.clear(); encode(&items[index], &mut value_buffer); @@ -104,10 +101,15 @@ pub fn calculate_receipt_root_optimism( ordered_trie_root_with_encoder(receipts, |r, buf| r.encode_inner(buf, false)) } +/// Calculates the receipt root for a header. +pub fn calculate_receipt_root_ref(receipts: &[ReceiptWithBloomRef<'_>]) -> B256 { + ordered_trie_root_with_encoder(receipts, |r, buf| r.encode_inner(buf, false)) +} + /// Calculates the receipt root for a header for the reference type of [Receipt]. /// -/// NOTE: Prefer [calculate_receipt_root] if you have log blooms memoized. -pub fn calculate_receipt_root_ref(receipts: &[&Receipt]) -> B256 { +/// NOTE: Prefer [`calculate_receipt_root`] if you have log blooms memoized. +pub fn calculate_receipt_root_no_memo(receipts: &[&Receipt]) -> B256 { ordered_trie_root_with_encoder(receipts, |r, buf| { ReceiptWithBloomRef::from(*r).encode_inner(buf, false) }) @@ -115,9 +117,9 @@ pub fn calculate_receipt_root_ref(receipts: &[&Receipt]) -> B256 { /// Calculates the receipt root for a header for the reference type of [Receipt]. /// -/// NOTE: Prefer [calculate_receipt_root] if you have log blooms memoized. +/// NOTE: Prefer [`calculate_receipt_root_optimism`] if you have log blooms memoized. #[cfg(feature = "optimism")] -pub fn calculate_receipt_root_ref_optimism( +pub fn calculate_receipt_root_no_memo_optimism( receipts: &[&Receipt], chain_spec: &crate::ChainSpec, timestamp: u64, diff --git a/crates/primitives/src/prune/target.rs b/crates/primitives/src/prune/target.rs index 1300b9b0b96..7f39c8d74ce 100644 --- a/crates/primitives/src/prune/target.rs +++ b/crates/primitives/src/prune/target.rs @@ -70,8 +70,8 @@ impl PruneModes { /// /// 1. For [PruneMode::Full], it fails if `MIN_BLOCKS > 0`. /// 2. For [PruneMode::Distance(distance)], it fails if `distance < MIN_BLOCKS + 1`. `+ 1` is needed -/// because `PruneMode::Distance(0)` means that we leave zero blocks from the latest, meaning we -/// have one block in the database. +/// because `PruneMode::Distance(0)` means that we leave zero blocks from the latest, meaning we +/// have one block in the database. fn deserialize_opt_prune_mode_with_min_blocks<'de, const MIN_BLOCKS: u64, D: Deserializer<'de>>( deserializer: D, ) -> Result, D::Error> { diff --git a/crates/primitives/src/receipt.rs b/crates/primitives/src/receipt.rs index 63955a1d13b..85470cb2e81 100644 --- a/crates/primitives/src/receipt.rs +++ b/crates/primitives/src/receipt.rs @@ -1,6 +1,6 @@ #[cfg(feature = "zstd-codec")] use crate::compression::{RECEIPT_COMPRESSOR, RECEIPT_DECOMPRESSOR}; -use crate::{logs_bloom, Bloom, Bytes, PruneSegmentError, TxType, B256}; +use crate::{logs_bloom, Bloom, Bytes, TxType, B256}; use alloy_primitives::Log; use alloy_rlp::{length_of_length, Decodable, Encodable, RlpDecodable, RlpEncodable}; use bytes::{Buf, BufMut}; @@ -56,6 +56,12 @@ impl Receipt { pub fn with_bloom(self) -> ReceiptWithBloom { self.into() } + + /// Calculates the bloom filter for the receipt and returns the [ReceiptWithBloomRef] container + /// type. + pub fn with_bloom_ref(&self) -> ReceiptWithBloomRef<'_> { + self.into() + } } /// A collection of receipts organized as a two-dimensional vector. @@ -98,7 +104,7 @@ impl Receipts { /// Retrieves the receipt root for all recorded receipts from index. pub fn root_slow(&self, index: usize) -> Option { - Some(crate::proofs::calculate_receipt_root_ref( + Some(crate::proofs::calculate_receipt_root_no_memo( &self.receipt_vec[index].iter().map(Option::as_ref).collect::>>()?, )) } @@ -111,28 +117,12 @@ impl Receipts { chain_spec: &crate::ChainSpec, timestamp: u64, ) -> Option { - Some(crate::proofs::calculate_receipt_root_ref_optimism( + Some(crate::proofs::calculate_receipt_root_no_memo_optimism( &self.receipt_vec[index].iter().map(Option::as_ref).collect::>>()?, chain_spec, timestamp, )) } - - /// Retrieves gas spent by transactions as a vector of tuples (transaction index, gas used). - pub fn gas_spent_by_tx(&self) -> Result, PruneSegmentError> { - let Some(block_r) = self.last() else { - return Ok(vec![]); - }; - let mut out = Vec::with_capacity(block_r.len()); - for (id, tx_r) in block_r.iter().enumerate() { - if let Some(receipt) = tx_r.as_ref() { - out.push((id as u64, receipt.cumulative_gas_used)); - } else { - return Err(PruneSegmentError::ReceiptsPruned); - } - } - Ok(out) - } } impl Deref for Receipts { @@ -203,6 +193,17 @@ impl ReceiptWithBloom { } } +/// Retrieves gas spent by transactions as a vector of tuples (transaction index, gas used). +pub fn gas_spent_by_transactions>( + receipts: impl IntoIterator, +) -> Vec<(u64, u64)> { + receipts + .into_iter() + .enumerate() + .map(|(id, receipt)| (id as u64, receipt.deref().cumulative_gas_used)) + .collect() +} + #[cfg(any(test, feature = "arbitrary"))] impl proptest::arbitrary::Arbitrary for Receipt { type Parameters = (); @@ -312,7 +313,7 @@ impl ReceiptWithBloom { let b = &mut &**buf; let rlp_head = alloy_rlp::Header::decode(b)?; if !rlp_head.list { - return Err(alloy_rlp::Error::UnexpectedString); + return Err(alloy_rlp::Error::UnexpectedString) } let started_len = b.len(); @@ -357,7 +358,7 @@ impl ReceiptWithBloom { return Err(alloy_rlp::Error::ListLengthMismatch { expected: rlp_head.payload_length, got: consumed, - }); + }) } *buf = *b; Ok(this) @@ -510,7 +511,7 @@ impl<'a> ReceiptWithBloomEncoder<'a> { fn encode_inner(&self, out: &mut dyn BufMut, with_header: bool) { if matches!(self.receipt.tx_type, TxType::Legacy) { self.encode_fields(out); - return; + return } let mut payload = Vec::new(); diff --git a/crates/primitives/src/revm/env.rs b/crates/primitives/src/revm/env.rs index 0c16f5482f8..49b9f609c2b 100644 --- a/crates/primitives/src/revm/env.rs +++ b/crates/primitives/src/revm/env.rs @@ -138,8 +138,8 @@ pub fn tx_env_with_recovered(transaction: &TransactionSignedEcRecovered) -> TxEn /// and therefore: /// * the call must execute to completion /// * the call does not count against the block’s gas limit -/// * the call does not follow the EIP-1559 burn semantics - no value should be transferred as -/// part of the call +/// * the call does not follow the EIP-1559 burn semantics - no value should be transferred as part +/// of the call /// * if no code exists at `BEACON_ROOTS_ADDRESS`, the call must fail silently pub fn fill_tx_env_with_beacon_root_contract_call(env: &mut Env, parent_beacon_block_root: B256) { env.tx = TxEnv { diff --git a/crates/primitives/src/transaction/mod.rs b/crates/primitives/src/transaction/mod.rs index 2201b5f0d42..d481bed166f 100644 --- a/crates/primitives/src/transaction/mod.rs +++ b/crates/primitives/src/transaction/mod.rs @@ -644,6 +644,11 @@ impl Compact for Transaction { // For backwards compatibility purposes, only 2 bits of the type are encoded in the identifier // parameter. In the case of a 3, the full transaction type is read from the buffer as a // single byte. + // + // # Panics + // + // A panic will be triggered if an identifier larger than 3 is passed from the database. For + // optimism a identifier with value 126 is allowed. fn from_compact(mut buf: &[u8], identifier: usize) -> (Self, &[u8]) { match identifier { 0 => { @@ -1341,7 +1346,7 @@ impl TransactionSigned { }; if !input_data.is_empty() { - return Err(RlpError::UnexpectedLength); + return Err(RlpError::UnexpectedLength) } Ok(output_data) @@ -1419,7 +1424,7 @@ impl Decodable for TransactionSigned { /// header if the first byte is less than `0xf7`. fn decode(buf: &mut &[u8]) -> alloy_rlp::Result { if buf.is_empty() { - return Err(RlpError::InputTooShort); + return Err(RlpError::InputTooShort) } // decode header diff --git a/crates/primitives/src/trie/hash_builder/value.rs b/crates/primitives/src/trie/hash_builder/value.rs index a829f85175e..1397f5756aa 100644 --- a/crates/primitives/src/trie/hash_builder/value.rs +++ b/crates/primitives/src/trie/hash_builder/value.rs @@ -23,6 +23,10 @@ impl Compact for StoredHashBuilderValue { } } + // # Panics + // + // A panic will be triggered if a HashBuilderValue variant greater than 1 is passed from the + // database. fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) { match buf.get_u8() { 0 => { diff --git a/crates/prune/Cargo.toml b/crates/prune/Cargo.toml index cc24e68b834..65b4ba19c6c 100644 --- a/crates/prune/Cargo.toml +++ b/crates/prune/Cargo.toml @@ -30,7 +30,6 @@ thiserror.workspace = true itertools.workspace = true rayon.workspace = true tokio.workspace = true -tokio-stream.workspace = true [dev-dependencies] # reth diff --git a/crates/prune/src/pruner.rs b/crates/prune/src/pruner.rs index 55a998709d8..f4111f131a5 100644 --- a/crates/prune/src/pruner.rs +++ b/crates/prune/src/pruner.rs @@ -13,13 +13,12 @@ use reth_primitives::{ use reth_provider::{ DatabaseProviderRW, ProviderFactory, PruneCheckpointReader, StaticFileProviderFactory, }; -use reth_tokio_util::EventListeners; +use reth_tokio_util::{EventSender, EventStream}; use std::{ collections::BTreeMap, time::{Duration, Instant}, }; use tokio::sync::watch; -use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::debug; /// Result of [Pruner::run] execution. @@ -53,7 +52,7 @@ pub struct Pruner { finished_exex_height: watch::Receiver, #[doc(hidden)] metrics: Metrics, - listeners: EventListeners, + event_sender: EventSender, } impl Pruner { @@ -77,13 +76,13 @@ impl Pruner { timeout, finished_exex_height, metrics: Metrics::default(), - listeners: Default::default(), + event_sender: Default::default(), } } /// Listen for events on the pruner. - pub fn events(&mut self) -> UnboundedReceiverStream { - self.listeners.new_listener() + pub fn events(&self) -> EventStream { + self.event_sender.new_listener() } /// Run the pruner @@ -100,7 +99,7 @@ impl Pruner { return Ok(PruneProgress::Finished) } - self.listeners.notify(PrunerEvent::Started { tip_block_number }); + self.event_sender.notify(PrunerEvent::Started { tip_block_number }); debug!(target: "pruner", %tip_block_number, "Pruner started"); let start = Instant::now(); @@ -154,7 +153,7 @@ impl Pruner { "{message}", ); - self.listeners.notify(PrunerEvent::Finished { tip_block_number, elapsed, stats }); + self.event_sender.notify(PrunerEvent::Finished { tip_block_number, elapsed, stats }); Ok(progress) } diff --git a/crates/revm/Cargo.toml b/crates/revm/Cargo.toml index 87d30ca6f29..fe93edd506f 100644 --- a/crates/revm/Cargo.toml +++ b/crates/revm/Cargo.toml @@ -14,8 +14,9 @@ workspace = true [dependencies] # reth reth-primitives.workspace = true -reth-interfaces.workspace = true reth-provider.workspace = true +reth-storage-errors.workspace = true +reth-execution-errors.workspace = true reth-consensus-common.workspace = true reth-trie = { workspace = true, optional = true } diff --git a/crates/revm/src/batch.rs b/crates/revm/src/batch.rs index 544a74a5c09..77f747cd9a8 100644 --- a/crates/revm/src/batch.rs +++ b/crates/revm/src/batch.rs @@ -1,7 +1,7 @@ //! Helper for handling execution of multiple blocks. use crate::{precompile::Address, primitives::alloy_primitives::BlockNumber}; -use reth_interfaces::executor::BlockExecutionError; +use reth_execution_errors::BlockExecutionError; use reth_primitives::{ PruneMode, PruneModes, PruneSegmentError, Receipt, Receipts, MINIMUM_PRUNING_DISTANCE, }; diff --git a/crates/revm/src/database.rs b/crates/revm/src/database.rs index 93a22a06834..b7cf362fee1 100644 --- a/crates/revm/src/database.rs +++ b/crates/revm/src/database.rs @@ -1,5 +1,6 @@ use reth_primitives::{Address, B256, KECCAK_EMPTY, U256}; -use reth_provider::{ProviderError, StateProvider}; +use reth_provider::StateProvider; +use reth_storage_errors::provider::ProviderError; use revm::{ db::DatabaseRef, primitives::{AccountInfo, Bytecode}, diff --git a/crates/revm/src/state_change.rs b/crates/revm/src/state_change.rs index 27997342547..00f135490e2 100644 --- a/crates/revm/src/state_change.rs +++ b/crates/revm/src/state_change.rs @@ -1,5 +1,5 @@ use reth_consensus_common::calc; -use reth_interfaces::executor::{BlockExecutionError, BlockValidationError}; +use reth_execution_errors::{BlockExecutionError, BlockValidationError}; use reth_primitives::{ revm::env::fill_tx_env_with_beacon_root_contract_call, Address, ChainSpec, Header, Withdrawal, B256, U256, diff --git a/crates/revm/src/test_utils.rs b/crates/revm/src/test_utils.rs index 8c4d1894c5d..d2045c45932 100644 --- a/crates/revm/src/test_utils.rs +++ b/crates/revm/src/test_utils.rs @@ -1,9 +1,9 @@ -use reth_interfaces::provider::ProviderResult; use reth_primitives::{ keccak256, trie::AccountProof, Account, Address, BlockNumber, Bytecode, Bytes, StorageKey, B256, U256, }; use reth_provider::{AccountReader, BlockHashReader, StateProvider, StateRootProvider}; +use reth_storage_errors::provider::ProviderResult; use reth_trie::updates::TrieUpdates; use revm::db::BundleState; use std::collections::HashMap; diff --git a/crates/rpc/ipc/src/server/ipc.rs b/crates/rpc/ipc/src/server/ipc.rs index c73d9bb9367..33ed8d2d553 100644 --- a/crates/rpc/ipc/src/server/ipc.rs +++ b/crates/rpc/ipc/src/server/ipc.rs @@ -151,7 +151,7 @@ where return Some(batch_response_error( Id::Null, reject_too_big_request(max_request_body_size as u32), - )); + )) } // Single request or notification diff --git a/crates/rpc/ipc/src/server/mod.rs b/crates/rpc/ipc/src/server/mod.rs index 04608745484..2bec090cc44 100644 --- a/crates/rpc/ipc/src/server/mod.rs +++ b/crates/rpc/ipc/src/server/mod.rs @@ -871,7 +871,7 @@ mod tests { // and you might want to do something smarter if it's // critical that "the most recent item" must be sent when it is produced. if sink.send(notif).await.is_err() { - break Ok(()); + break Ok(()) } closed = c; diff --git a/crates/rpc/rpc-api/src/engine.rs b/crates/rpc/rpc-api/src/engine.rs index d320c74601d..be20a4fbe08 100644 --- a/crates/rpc/rpc-api/src/engine.rs +++ b/crates/rpc/rpc-api/src/engine.rs @@ -8,8 +8,9 @@ use reth_engine_primitives::EngineTypes; use reth_primitives::{Address, BlockHash, BlockId, BlockNumberOrTag, Bytes, B256, U256, U64}; use reth_rpc_types::{ engine::{ - ExecutionPayloadBodiesV1, ExecutionPayloadInputV2, ExecutionPayloadV1, ExecutionPayloadV3, - ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, TransitionConfiguration, + ClientVersionV1, ExecutionPayloadBodiesV1, ExecutionPayloadInputV2, ExecutionPayloadV1, + ExecutionPayloadV3, ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, + TransitionConfiguration, }, state::StateOverride, BlockOverrides, Filter, Log, RichBlock, SyncStatus, TransactionRequest, @@ -154,6 +155,22 @@ pub trait EngineApi { transition_configuration: TransitionConfiguration, ) -> RpcResult; + /// This function will return the ClientVersionV1 object. + /// See also: + /// make fmt + /// + /// + /// - When connected to a single execution client, the consensus client **MUST** receive an + /// array with a single `ClientVersionV1` object. + /// - When connected to multiple execution clients via a multiplexer, the multiplexer **MUST** + /// concatenate the responses from each execution client into a single, + /// flat array before returning the response to the consensus client. + #[method(name = "getClientVersionV1")] + async fn get_client_version_v1( + &self, + client_version: ClientVersionV1, + ) -> RpcResult>; + /// See also #[method(name = "exchangeCapabilities")] async fn exchange_capabilities(&self, capabilities: Vec) -> RpcResult>; diff --git a/crates/rpc/rpc-builder/Cargo.toml b/crates/rpc/rpc-builder/Cargo.toml index 9087ff7c7ff..e3b5f4766d9 100644 --- a/crates/rpc/rpc-builder/Cargo.toml +++ b/crates/rpc/rpc-builder/Cargo.toml @@ -56,6 +56,7 @@ reth-rpc-types.workspace = true reth-rpc-types-compat.workspace = true reth-tracing.workspace = true reth-transaction-pool = { workspace = true, features = ["test-utils"] } +reth-tokio-util.workspace = true tokio = { workspace = true, features = ["rt", "rt-multi-thread"] } serde_json.workspace = true diff --git a/crates/rpc/rpc-builder/tests/it/utils.rs b/crates/rpc/rpc-builder/tests/it/utils.rs index a3272ac026a..819a3a863ac 100644 --- a/crates/rpc/rpc-builder/tests/it/utils.rs +++ b/crates/rpc/rpc-builder/tests/it/utils.rs @@ -12,6 +12,7 @@ use reth_rpc_builder::{ }; use reth_rpc_engine_api::EngineApi; use reth_rpc_layer::JwtSecret; +use reth_rpc_types::engine::{ClientCode, ClientVersionV1}; use reth_tasks::TokioTaskExecutor; use reth_transaction_pool::test_utils::{TestPool, TestPoolBuilder}; use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; @@ -26,13 +27,22 @@ pub fn test_address() -> SocketAddr { pub async fn launch_auth(secret: JwtSecret) -> AuthServerHandle { let config = AuthServerConfig::builder(secret).socket_addr(test_address()).build(); let (tx, _rx) = unbounded_channel(); - let beacon_engine_handle = BeaconConsensusEngineHandle::::new(tx); + let beacon_engine_handle = + BeaconConsensusEngineHandle::::new(tx, Default::default()); + let client = ClientVersionV1 { + code: ClientCode::RH, + name: "Reth".to_string(), + version: "v0.2.0-beta.5".to_string(), + commit: "defa64b2".to_string(), + }; + let engine_api = EngineApi::new( NoopProvider::default(), MAINNET.clone(), beacon_engine_handle, spawn_test_payload_service().into(), Box::::default(), + client, ); let module = AuthRpcModule::new(engine_api); module.start_server(config).await.unwrap() diff --git a/crates/rpc/rpc-engine-api/Cargo.toml b/crates/rpc/rpc-engine-api/Cargo.toml index 5fe782a6ef5..83a5f85fcfa 100644 --- a/crates/rpc/rpc-engine-api/Cargo.toml +++ b/crates/rpc/rpc-engine-api/Cargo.toml @@ -43,6 +43,7 @@ reth-ethereum-engine-primitives.workspace = true reth-interfaces = { workspace = true, features = ["test-utils"] } reth-provider = { workspace = true, features = ["test-utils"] } reth-payload-builder = { workspace = true, features = ["test-utils"] } +reth-tokio-util.workspace = true alloy-rlp.workspace = true diff --git a/crates/rpc/rpc-engine-api/src/engine_api.rs b/crates/rpc/rpc-engine-api/src/engine_api.rs index 0e4476bb71b..8d51884d598 100644 --- a/crates/rpc/rpc-engine-api/src/engine_api.rs +++ b/crates/rpc/rpc-engine-api/src/engine_api.rs @@ -11,9 +11,10 @@ use reth_primitives::{BlockHash, BlockHashOrNumber, BlockNumber, ChainSpec, Hard use reth_provider::{BlockReader, EvmEnvProvider, HeaderProvider, StateProviderFactory}; use reth_rpc_api::EngineApiServer; use reth_rpc_types::engine::{ - CancunPayloadFields, ExecutionPayload, ExecutionPayloadBodiesV1, ExecutionPayloadInputV2, - ExecutionPayloadV1, ExecutionPayloadV3, ExecutionPayloadV4, ForkchoiceState, ForkchoiceUpdated, - PayloadId, PayloadStatus, TransitionConfiguration, CAPABILITIES, + CancunPayloadFields, ClientVersionV1, ExecutionPayload, ExecutionPayloadBodiesV1, + ExecutionPayloadInputV2, ExecutionPayloadV1, ExecutionPayloadV3, ExecutionPayloadV4, + ForkchoiceState, ForkchoiceUpdated, PayloadId, PayloadStatus, TransitionConfiguration, + CAPABILITIES, }; use reth_rpc_types_compat::engine::payload::{ convert_payload_input_v2_to_payload, convert_to_payload_body_v1, @@ -48,6 +49,8 @@ struct EngineApiInner { task_spawner: Box, /// The latency and response type metrics for engine api calls metrics: EngineApiMetrics, + /// Identification of the execution client used by the consensus client + client: ClientVersionV1, } impl EngineApi @@ -62,6 +65,7 @@ where beacon_consensus: BeaconConsensusEngineHandle, payload_store: PayloadStore, task_spawner: Box, + client: ClientVersionV1, ) -> Self { let inner = Arc::new(EngineApiInner { provider, @@ -70,10 +74,18 @@ where payload_store, task_spawner, metrics: EngineApiMetrics::default(), + client, }); Self { inner } } + /// Fetches the client version. + async fn get_client_version_v1( + &self, + _client: ClientVersionV1, + ) -> EngineApiResult> { + Ok(vec![self.inner.client.clone()]) + } /// Fetches the attributes for the payload with the given id. async fn get_payload_attributes( &self, @@ -749,6 +761,18 @@ where self.inner.metrics.latency.exchange_transition_configuration.record(start.elapsed()); Ok(res?) } + /// Handler for `engine_getClientVersionV1` + /// + /// See also + async fn get_client_version_v1( + &self, + client: ClientVersionV1, + ) -> RpcResult> { + trace!(target: "rpc::engine", "Serving engine_getClientVersionV1"); + let res = EngineApi::get_client_version_v1(self, client).await; + + Ok(res?) + } /// Handler for `engine_exchangeCapabilitiesV1` /// See also @@ -770,34 +794,59 @@ where mod tests { use super::*; use assert_matches::assert_matches; - use reth_beacon_consensus::BeaconEngineMessage; + use reth_beacon_consensus::{BeaconConsensusEngineEvent, BeaconEngineMessage}; use reth_ethereum_engine_primitives::EthEngineTypes; use reth_interfaces::test_utils::generators::random_block; + use reth_payload_builder::test_utils::spawn_test_payload_service; use reth_primitives::{SealedBlock, B256, MAINNET}; use reth_provider::test_utils::MockEthProvider; + use reth_rpc_types::engine::{ClientCode, ClientVersionV1}; use reth_rpc_types_compat::engine::payload::execution_payload_from_sealed_block; use reth_tasks::TokioTaskExecutor; + use reth_tokio_util::EventSender; use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver}; fn setup_engine_api() -> (EngineApiTestHandle, EngineApi, EthEngineTypes>) { + let client = ClientVersionV1 { + code: ClientCode::RH, + name: "Reth".to_string(), + version: "v0.2.0-beta.5".to_string(), + commit: "defa64b2".to_string(), + }; + let chain_spec: Arc = MAINNET.clone(); let provider = Arc::new(MockEthProvider::default()); let payload_store = spawn_test_payload_service(); let (to_engine, engine_rx) = unbounded_channel(); + let event_sender: EventSender = Default::default(); let task_executor = Box::::default(); let api = EngineApi::new( provider.clone(), chain_spec.clone(), - BeaconConsensusEngineHandle::new(to_engine), + BeaconConsensusEngineHandle::new(to_engine, event_sender), payload_store.into(), task_executor, + client, ); let handle = EngineApiTestHandle { chain_spec, provider, from_api: engine_rx }; (handle, api) } + #[tokio::test] + async fn engine_client_version_v1() { + let client = ClientVersionV1 { + code: ClientCode::RH, + name: "Reth".to_string(), + version: "v0.2.0-beta.5".to_string(), + commit: "defa64b2".to_string(), + }; + let (_, api) = setup_engine_api(); + let res = api.get_client_version_v1(client.clone()).await; + assert_eq!(res.unwrap(), vec![client]); + } + struct EngineApiTestHandle { chain_spec: Arc, provider: Arc, diff --git a/crates/rpc/rpc-engine-api/tests/it/payload.rs b/crates/rpc/rpc-engine-api/tests/it/payload.rs index 22219584c7e..e2b39691afe 100644 --- a/crates/rpc/rpc-engine-api/tests/it/payload.rs +++ b/crates/rpc/rpc-engine-api/tests/it/payload.rs @@ -29,6 +29,7 @@ fn transform_block Block>(src: SealedBlock, f: F) -> Executi ommers: transformed.ommers, withdrawals: transformed.withdrawals, }) + .0 } #[test] diff --git a/crates/rpc/rpc-types-compat/src/engine/payload.rs b/crates/rpc/rpc-types-compat/src/engine/payload.rs index f3478d189ab..9f968a1a4eb 100644 --- a/crates/rpc/rpc-types-compat/src/engine/payload.rs +++ b/crates/rpc/rpc-types-compat/src/engine/payload.rs @@ -97,18 +97,20 @@ pub fn try_payload_v4_to_block(payload: ExecutionPayloadV4) -> Result ExecutionPayload { +/// Converts [SealedBlock] to [ExecutionPayload], returning additional data (the parent beacon block +/// root) if the block is a V3 payload +pub fn block_to_payload(value: SealedBlock) -> (ExecutionPayload, Option) { // todo(onbjerg): check for requests_root here and return payload v4 if value.header.parent_beacon_block_root.is_some() { // block with parent beacon block root: V3 - ExecutionPayload::V3(block_to_payload_v3(value)) + let (payload, beacon_block_root) = block_to_payload_v3(value); + (ExecutionPayload::V3(payload), beacon_block_root) } else if value.withdrawals.is_some() { // block with withdrawals: V2 - ExecutionPayload::V2(block_to_payload_v2(value)) + (ExecutionPayload::V2(block_to_payload_v2(value)), None) } else { // otherwise V1 - ExecutionPayload::V1(block_to_payload_v1(value)) + (ExecutionPayload::V1(block_to_payload_v1(value)), None) } } @@ -158,11 +160,12 @@ pub fn block_to_payload_v2(value: SealedBlock) -> ExecutionPayloadV2 { } } -/// Converts [SealedBlock] to [ExecutionPayloadV3] -pub fn block_to_payload_v3(value: SealedBlock) -> ExecutionPayloadV3 { +/// Converts [SealedBlock] to [ExecutionPayloadV3], and returns the parent beacon block root. +pub fn block_to_payload_v3(value: SealedBlock) -> (ExecutionPayloadV3, Option) { let transactions = value.raw_transactions(); - ExecutionPayloadV3 { + let parent_beacon_block_root = value.header.parent_beacon_block_root; + let payload = ExecutionPayloadV3 { blob_gas_used: value.blob_gas_used.unwrap_or_default(), excess_blob_gas: value.excess_blob_gas.unwrap_or_default(), payload_inner: ExecutionPayloadV2 { @@ -184,7 +187,9 @@ pub fn block_to_payload_v3(value: SealedBlock) -> ExecutionPayloadV3 { }, withdrawals: value.withdrawals.unwrap_or_default().into_inner(), }, - } + }; + + (payload, parent_beacon_block_root) } /// Converts [SealedBlock] to [ExecutionPayloadFieldV2] @@ -374,7 +379,7 @@ mod tests { let converted_payload = block_to_payload_v3(block.seal_slow()); // ensure the payloads are the same - assert_eq!(new_payload, converted_payload); + assert_eq!((new_payload, Some(parent_beacon_block_root.into())), converted_payload); } #[test] diff --git a/crates/rpc/rpc-types/src/mev.rs b/crates/rpc/rpc-types/src/mev.rs index 9126c09635d..5da5a5667da 100644 --- a/crates/rpc/rpc-types/src/mev.rs +++ b/crates/rpc/rpc-types/src/mev.rs @@ -755,7 +755,7 @@ mod u256_numeric_string { match val { serde_json::Value::String(s) => { if let Ok(val) = s.parse::() { - return Ok(U256::from(val)); + return Ok(U256::from(val)) } U256::from_str(&s).map_err(de::Error::custom) } diff --git a/crates/rpc/rpc/Cargo.toml b/crates/rpc/rpc/Cargo.toml index 224866be6e2..73fbf13b8a8 100644 --- a/crates/rpc/rpc/Cargo.toml +++ b/crates/rpc/rpc/Cargo.toml @@ -29,8 +29,6 @@ revm-inspectors = { workspace = true, features = ["js-tracer"] } reth-evm.workspace = true reth-network-types.workspace = true -reth-evm-optimism = { workspace = true, optional = true } - # eth alloy-rlp.workspace = true alloy-dyn-abi = { workspace = true, features = ["eip712"] } @@ -91,6 +89,4 @@ optimism = [ "reth-primitives/optimism", "reth-rpc-types-compat/optimism", "reth-provider/optimism", - "dep:reth-evm-optimism", - "reth-evm-optimism/optimism", ] diff --git a/crates/rpc/rpc/src/eth/api/block.rs b/crates/rpc/rpc/src/eth/api/block.rs index cfc3fe058cb..a51fa1be83b 100644 --- a/crates/rpc/rpc/src/eth/api/block.rs +++ b/crates/rpc/rpc/src/eth/api/block.rs @@ -2,14 +2,14 @@ use crate::{ eth::{ - api::transactions::build_transaction_receipt_with_block_receipts, + api::transactions::ReceiptResponseBuilder, error::{EthApiError, EthResult}, }, EthApi, }; use reth_evm::ConfigureEvm; use reth_network_api::NetworkInfo; -use reth_primitives::{BlockId, TransactionMeta}; +use reth_primitives::{BlockId, Receipt, SealedBlock, TransactionMeta}; use reth_provider::{BlockReaderIdExt, ChainSpecProvider, EvmEnvProvider, StateProviderFactory}; use reth_rpc_types::{AnyTransactionReceipt, Header, Index, RichBlock}; use reth_rpc_types_compat::block::{from_block, uncle_block_from_header}; @@ -56,25 +56,33 @@ where Ok(uncle) } - /// Returns all transaction receipts in the block. - /// - /// Returns `None` if the block wasn't found. - pub(crate) async fn block_receipts( + /// Loads a bock and its corresponding receipts. + pub async fn load_block_and_receipts( &self, block_id: BlockId, - ) -> EthResult>> { - let mut block_and_receipts = None; - + ) -> EthResult>)>> { if block_id.is_pending() { - block_and_receipts = self + return Ok(self .provider() .pending_block_and_receipts()? - .map(|(sb, receipts)| (sb, Arc::new(receipts))); - } else if let Some(block_hash) = self.provider().block_hash_for_id(block_id)? { - block_and_receipts = self.cache().get_block_and_receipts(block_hash).await?; + .map(|(sb, receipts)| (sb, Arc::new(receipts)))) + } + + if let Some(block_hash) = self.provider().block_hash_for_id(block_id)? { + return Ok(self.cache().get_block_and_receipts(block_hash).await?) } - if let Some((block, receipts)) = block_and_receipts { + Ok(None) + } + + /// Returns all transaction receipts in the block. + /// + /// Returns `None` if the block wasn't found. + pub(crate) async fn block_receipts( + &self, + block_id: BlockId, + ) -> EthResult>> { + if let Some((block, receipts)) = self.load_block_and_receipts(block_id).await? { let block_number = block.number; let base_fee = block.base_fee_per_gas; let block_hash = block.hash(); @@ -82,12 +90,6 @@ where let timestamp = block.timestamp; let block = block.unseal(); - #[cfg(feature = "optimism")] - let (block_timestamp, l1_block_info) = { - let body = reth_evm_optimism::extract_l1_info(&block); - (block.timestamp, body.ok()) - }; - let receipts = block .body .into_iter() @@ -104,18 +106,8 @@ where timestamp, }; - #[cfg(feature = "optimism")] - let op_tx_meta = - self.build_op_tx_meta(&tx, l1_block_info.clone(), block_timestamp)?; - - build_transaction_receipt_with_block_receipts( - tx, - meta, - receipt.clone(), - &receipts, - #[cfg(feature = "optimism")] - op_tx_meta, - ) + ReceiptResponseBuilder::new(&tx, meta, receipt, &receipts) + .map(|builder| builder.build()) }) .collect::>>(); return receipts.map(Some) diff --git a/crates/rpc/rpc/src/eth/api/mod.rs b/crates/rpc/rpc/src/eth/api/mod.rs index 6c936808e99..d3efbc86724 100644 --- a/crates/rpc/rpc/src/eth/api/mod.rs +++ b/crates/rpc/rpc/src/eth/api/mod.rs @@ -36,18 +36,16 @@ use std::{ }; use tokio::sync::{oneshot, Mutex}; -mod block; +pub mod block; mod call; pub(crate) mod fee_history; mod fees; -#[cfg(feature = "optimism")] -mod optimism; mod pending_block; mod server; mod sign; mod state; -mod transactions; +pub mod transactions; use crate::eth::traits::RawTransactionForwarder; pub use transactions::{EthTransactions, TransactionSource}; @@ -188,7 +186,7 @@ where } /// Returns the state cache frontend - pub(crate) fn cache(&self) -> &EthStateCache { + pub fn cache(&self) -> &EthStateCache { &self.inner.eth_cache } @@ -463,7 +461,8 @@ impl From for u64 { } /// Container type `EthApi` -struct EthApiInner { +#[allow(missing_debug_implementations)] +pub struct EthApiInner { /// The transaction pool. pool: Pool, /// The provider that can interact with the chain. diff --git a/crates/rpc/rpc/src/eth/api/transactions.rs b/crates/rpc/rpc/src/eth/api/transactions.rs index dc01dc12c38..6d275fe3f24 100644 --- a/crates/rpc/rpc/src/eth/api/transactions.rs +++ b/crates/rpc/rpc/src/eth/api/transactions.rs @@ -26,6 +26,7 @@ use reth_provider::{ }; use reth_revm::database::StateProviderDatabase; use reth_rpc_types::{ + other::OtherFields, transaction::{ EIP1559TransactionRequest, EIP2930TransactionRequest, EIP4844TransactionRequest, LegacyTransactionRequest, @@ -48,8 +49,6 @@ use revm_inspectors::tracing::{TracingInspector, TracingInspectorConfig}; use std::future::Future; use crate::eth::revm_utils::FillableTransaction; -#[cfg(feature = "optimism")] -use reth_rpc_types::OptimismTransactionReceiptFields; use revm_primitives::db::{Database, DatabaseRef}; /// Helper alias type for the state's [CacheDB] @@ -1460,7 +1459,6 @@ where /// Helper function for `eth_getTransactionReceipt` /// /// Returns the receipt - #[cfg(not(feature = "optimism"))] pub(crate) async fn build_transaction_receipt( &self, tx: TransactionSigned, @@ -1472,76 +1470,8 @@ where Some(recpts) => recpts, None => return Err(EthApiError::UnknownBlockNumber), }; - build_transaction_receipt_with_block_receipts(tx, meta, receipt, &all_receipts) - } - /// Helper function for `eth_getTransactionReceipt` (optimism) - /// - /// Returns the receipt - #[cfg(feature = "optimism")] - pub(crate) async fn build_transaction_receipt( - &self, - tx: TransactionSigned, - meta: TransactionMeta, - receipt: Receipt, - ) -> EthResult { - let (block, receipts) = self - .cache() - .get_block_and_receipts(meta.block_hash) - .await? - .ok_or(EthApiError::UnknownBlockNumber)?; - - let block = block.unseal(); - let l1_block_info = reth_evm_optimism::extract_l1_info(&block).ok(); - let optimism_tx_meta = self.build_op_tx_meta(&tx, l1_block_info, block.timestamp)?; - - build_transaction_receipt_with_block_receipts( - tx, - meta, - receipt, - &receipts, - optimism_tx_meta, - ) - } - - /// Builds op metadata object using the provided [TransactionSigned], L1 block info and - /// `block_timestamp`. The L1BlockInfo is used to calculate the l1 fee and l1 data gas for the - /// transaction. If the L1BlockInfo is not provided, the meta info will be empty. - #[cfg(feature = "optimism")] - pub(crate) fn build_op_tx_meta( - &self, - tx: &TransactionSigned, - l1_block_info: Option, - block_timestamp: u64, - ) -> EthResult { - use crate::eth::{api::optimism::OptimismTxMeta, optimism::OptimismEthApiError}; - use reth_evm_optimism::RethL1BlockInfo; - - let Some(l1_block_info) = l1_block_info else { return Ok(OptimismTxMeta::default()) }; - - let (l1_fee, l1_data_gas) = if !tx.is_deposit() { - let envelope_buf = tx.envelope_encoded(); - - let inner_l1_fee = l1_block_info - .l1_tx_data_fee( - &self.inner.provider.chain_spec(), - block_timestamp, - &envelope_buf, - tx.is_deposit(), - ) - .map_err(|_| OptimismEthApiError::L1BlockFeeError)?; - let inner_l1_data_gas = l1_block_info - .l1_data_gas(&self.inner.provider.chain_spec(), block_timestamp, &envelope_buf) - .map_err(|_| OptimismEthApiError::L1BlockGasError)?; - ( - Some(inner_l1_fee.saturating_to::()), - Some(inner_l1_data_gas.saturating_to::()), - ) - } else { - (None, None) - }; - - Ok(OptimismTxMeta::new(Some(l1_block_info), l1_fee, l1_data_gas)) + Ok(ReceiptResponseBuilder::new(&tx, meta, &receipt, &all_receipts)?.build()) } } @@ -1698,119 +1628,120 @@ impl From for Transaction { } } -/// Helper function to construct a transaction receipt -/// -/// Note: This requires _all_ block receipts because we need to calculate the gas used by the -/// transaction. -pub(crate) fn build_transaction_receipt_with_block_receipts( - transaction: TransactionSigned, - meta: TransactionMeta, - receipt: Receipt, - all_receipts: &[Receipt], - #[cfg(feature = "optimism")] optimism_tx_meta: crate::eth::api::optimism::OptimismTxMeta, -) -> EthResult { - // Note: we assume this transaction is valid, because it's mined (or part of pending block) and - // we don't need to check for pre EIP-2 - let from = - transaction.recover_signer_unchecked().ok_or(EthApiError::InvalidTransactionSignature)?; - - // get the previous transaction cumulative gas used - let gas_used = if meta.index == 0 { - receipt.cumulative_gas_used - } else { - let prev_tx_idx = (meta.index - 1) as usize; - all_receipts - .get(prev_tx_idx) - .map(|prev_receipt| receipt.cumulative_gas_used - prev_receipt.cumulative_gas_used) - .unwrap_or_default() - }; - - let blob_gas_used = transaction.transaction.blob_gas_used(); - // Blob gas price should only be present if the transaction is a blob transaction - let blob_gas_price = blob_gas_used.and_then(|_| meta.excess_blob_gas.map(calc_blob_gasprice)); - let logs_bloom = receipt.bloom_slow(); - - // get number of logs in the block - let mut num_logs = 0; - for prev_receipt in all_receipts.iter().take(meta.index as usize) { - num_logs += prev_receipt.logs.len(); - } +/// Receipt response builder. +#[derive(Debug)] +pub struct ReceiptResponseBuilder { + /// The base response body, contains L1 fields. + base: TransactionReceipt>, + /// Additional L2 fields. + other: OtherFields, +} - let mut logs = Vec::with_capacity(receipt.logs.len()); - for (tx_log_idx, log) in receipt.logs.into_iter().enumerate() { - let rpclog = Log { - inner: log, - block_hash: Some(meta.block_hash), - block_number: Some(meta.block_number), - block_timestamp: Some(meta.timestamp), - transaction_hash: Some(meta.tx_hash), - transaction_index: Some(meta.index), - log_index: Some((num_logs + tx_log_idx) as u64), - removed: false, +impl ReceiptResponseBuilder { + /// Returns a new builder with the base response body (L1 fields) set. + /// + /// Note: This requires _all_ block receipts because we need to calculate the gas used by the + /// transaction. + pub fn new( + transaction: &TransactionSigned, + meta: TransactionMeta, + receipt: &Receipt, + all_receipts: &[Receipt], + ) -> EthResult { + // Note: we assume this transaction is valid, because it's mined (or part of pending block) + // and we don't need to check for pre EIP-2 + let from = transaction + .recover_signer_unchecked() + .ok_or(EthApiError::InvalidTransactionSignature)?; + + // get the previous transaction cumulative gas used + let gas_used = if meta.index == 0 { + receipt.cumulative_gas_used + } else { + let prev_tx_idx = (meta.index - 1) as usize; + all_receipts + .get(prev_tx_idx) + .map(|prev_receipt| receipt.cumulative_gas_used - prev_receipt.cumulative_gas_used) + .unwrap_or_default() }; - logs.push(rpclog); - } - let rpc_receipt = reth_rpc_types::Receipt { - status: receipt.success, - cumulative_gas_used: receipt.cumulative_gas_used as u128, - logs, - }; + let blob_gas_used = transaction.transaction.blob_gas_used(); + // Blob gas price should only be present if the transaction is a blob transaction + let blob_gas_price = + blob_gas_used.and_then(|_| meta.excess_blob_gas.map(calc_blob_gasprice)); + let logs_bloom = receipt.bloom_slow(); - #[allow(clippy::needless_update)] - let res_receipt = TransactionReceipt { - inner: AnyReceiptEnvelope { - inner: ReceiptWithBloom { receipt: rpc_receipt, logs_bloom }, - r#type: transaction.transaction.tx_type().into(), - }, - transaction_hash: meta.tx_hash, - transaction_index: Some(meta.index), - block_hash: Some(meta.block_hash), - block_number: Some(meta.block_number), - from, - to: None, - gas_used: gas_used as u128, - contract_address: None, - effective_gas_price: transaction.effective_gas_price(meta.base_fee), - // TODO pre-byzantium receipts have a post-transaction state root - state_root: None, - // EIP-4844 fields - blob_gas_price, - blob_gas_used: blob_gas_used.map(u128::from), - }; - let mut res_receipt = WithOtherFields::new(res_receipt); + // get number of logs in the block + let mut num_logs = 0; + for prev_receipt in all_receipts.iter().take(meta.index as usize) { + num_logs += prev_receipt.logs.len(); + } - #[cfg(feature = "optimism")] - { - let mut op_fields = OptimismTransactionReceiptFields::default(); - - if transaction.is_deposit() { - op_fields.deposit_nonce = receipt.deposit_nonce.map(reth_primitives::U64::from); - op_fields.deposit_receipt_version = - receipt.deposit_receipt_version.map(reth_primitives::U64::from); - } else if let Some(l1_block_info) = optimism_tx_meta.l1_block_info { - op_fields.l1_fee = optimism_tx_meta.l1_fee; - op_fields.l1_gas_used = optimism_tx_meta.l1_data_gas.map(|dg| { - dg + l1_block_info.l1_fee_overhead.unwrap_or_default().saturating_to::() - }); - op_fields.l1_fee_scalar = - Some(f64::from(l1_block_info.l1_base_fee_scalar) / 1_000_000.0); - op_fields.l1_gas_price = Some(l1_block_info.l1_base_fee.saturating_to()); + let mut logs = Vec::with_capacity(receipt.logs.len()); + for (tx_log_idx, log) in receipt.logs.iter().enumerate() { + let rpclog = Log { + inner: log.clone(), + block_hash: Some(meta.block_hash), + block_number: Some(meta.block_number), + block_timestamp: Some(meta.timestamp), + transaction_hash: Some(meta.tx_hash), + transaction_index: Some(meta.index), + log_index: Some((num_logs + tx_log_idx) as u64), + removed: false, + }; + logs.push(rpclog); } - res_receipt.other = op_fields.into(); + let rpc_receipt = reth_rpc_types::Receipt { + status: receipt.success, + cumulative_gas_used: receipt.cumulative_gas_used as u128, + logs, + }; + + let (contract_address, to) = match transaction.transaction.kind() { + Create => (Some(from.create(transaction.transaction.nonce())), None), + Call(addr) => (None, Some(Address(*addr))), + }; + + #[allow(clippy::needless_update)] + let base = TransactionReceipt { + inner: AnyReceiptEnvelope { + inner: ReceiptWithBloom { receipt: rpc_receipt, logs_bloom }, + r#type: transaction.transaction.tx_type().into(), + }, + transaction_hash: meta.tx_hash, + transaction_index: Some(meta.index), + block_hash: Some(meta.block_hash), + block_number: Some(meta.block_number), + from, + to, + gas_used: gas_used as u128, + contract_address, + effective_gas_price: transaction.effective_gas_price(meta.base_fee), + // TODO pre-byzantium receipts have a post-transaction state root + state_root: None, + // EIP-4844 fields + blob_gas_price, + blob_gas_used: blob_gas_used.map(u128::from), + }; + + Ok(Self { base, other: Default::default() }) } - match transaction.transaction.kind() { - Create => { - res_receipt.contract_address = Some(from.create(transaction.transaction.nonce())); - } - Call(addr) => { - res_receipt.to = Some(Address(*addr)); - } + /// Adds fields to response body. + pub fn add_other_fields(mut self, mut fields: OtherFields) -> Self { + self.other.append(&mut fields); + self } - Ok(res_receipt) + /// Builds a receipt response from the base response body, and any set additional fields. + pub fn build(self) -> AnyTransactionReceipt { + let Self { base, other } = self; + let mut res = WithOtherFields::new(base); + res.other = other; + + res + } } #[cfg(test)] diff --git a/crates/rpc/rpc/src/eth/error.rs b/crates/rpc/rpc/src/eth/error.rs index 77bffee400b..90ed87facc0 100644 --- a/crates/rpc/rpc/src/eth/error.rs +++ b/crates/rpc/rpc/src/eth/error.rs @@ -40,11 +40,11 @@ pub enum EthApiError { /// Thrown when querying for `finalized` or `safe` block before the merge transition is /// finalized, /// - /// op-node uses case sensitive string comparison to parse this error: - /// + /// op-node now checks for either `Unknown block` OR `unknown block`: + /// /// /// TODO(#8045): Temporary, until a version of is pushed through that doesn't require this to figure out the EL sync status. - #[error("Unknown block")] + #[error("unknown block")] UnknownSafeOrFinalizedBlock, /// Thrown when an unknown block or transaction index is encountered #[error("unknown block or tx index")] diff --git a/crates/rpc/rpc/src/eth/mod.rs b/crates/rpc/rpc/src/eth/mod.rs index 8d8e982c2c7..b98bac20581 100644 --- a/crates/rpc/rpc/src/eth/mod.rs +++ b/crates/rpc/rpc/src/eth/mod.rs @@ -1,6 +1,6 @@ //! `eth` namespace handler implementation. -mod api; +pub mod api; pub mod bundle; pub mod cache; pub mod error; @@ -14,9 +14,6 @@ mod signer; pub mod traits; pub(crate) mod utils; -#[cfg(feature = "optimism")] -pub mod optimism; - pub use api::{ fee_history::{fee_history_cache_new_blocks_task, FeeHistoryCache, FeeHistoryCacheConfig}, EthApi, EthApiSpec, EthTransactions, TransactionSource, RPC_DEFAULT_GAS_CAP, diff --git a/crates/rpc/rpc/src/otterscan.rs b/crates/rpc/rpc/src/otterscan.rs index 2f62e66a31d..1682f6f88d7 100644 --- a/crates/rpc/rpc/src/otterscan.rs +++ b/crates/rpc/rpc/src/otterscan.rs @@ -129,7 +129,7 @@ where if tx_len != receipts.len() { return Err(internal_rpc_err( "the number of transactions does not match the number of receipts", - )); + )) } // make sure the block is full diff --git a/crates/rpc/rpc/src/result.rs b/crates/rpc/rpc/src/result.rs index 677b537adea..c204c1574b5 100644 --- a/crates/rpc/rpc/src/result.rs +++ b/crates/rpc/rpc/src/result.rs @@ -111,9 +111,7 @@ pub(crate) fn invalid_params_rpc_err( } /// Constructs an internal JSON-RPC error. -pub(crate) fn internal_rpc_err( - msg: impl Into, -) -> jsonrpsee::types::error::ErrorObject<'static> { +pub fn internal_rpc_err(msg: impl Into) -> jsonrpsee::types::error::ErrorObject<'static> { rpc_err(jsonrpsee::types::error::INTERNAL_ERROR_CODE, msg, None) } diff --git a/crates/rpc/rpc/src/txpool.rs b/crates/rpc/rpc/src/txpool.rs index e6f7d66b51e..202d69b575a 100644 --- a/crates/rpc/rpc/src/txpool.rs +++ b/crates/rpc/rpc/src/txpool.rs @@ -87,10 +87,9 @@ where inspect: &mut BTreeMap>, ) { let entry = inspect.entry(tx.sender()).or_default(); - let key = tx.nonce().to_string(); let tx = tx.to_recovered_transaction(); entry.insert( - key, + tx.nonce().to_string(), TxpoolInspectSummary { to: tx.to(), value: tx.value(), @@ -100,17 +99,18 @@ where ); } - let mut inspect = TxpoolInspect::default(); let AllPoolTransactions { pending, queued } = self.pool.all_transactions(); - for pending in pending { - insert(&pending.transaction, &mut inspect.pending); - } - for queued in queued { - insert(&queued.transaction, &mut inspect.queued); - } - - Ok(inspect) + Ok(TxpoolInspect { + pending: pending.iter().fold(Default::default(), |mut acc, tx| { + insert(&tx.transaction, &mut acc); + acc + }), + queued: queued.iter().fold(Default::default(), |mut acc, tx| { + insert(&tx.transaction, &mut acc); + acc + }), + }) } /// Retrieves the transactions contained within the txpool, returning pending as well as queued diff --git a/crates/stages-api/Cargo.toml b/crates/stages-api/Cargo.toml index 2101961fd2d..32c4258538a 100644 --- a/crates/stages-api/Cargo.toml +++ b/crates/stages-api/Cargo.toml @@ -27,7 +27,6 @@ metrics.workspace = true # async tokio = { workspace = true, features = ["sync"] } -tokio-stream.workspace = true futures-util.workspace = true # misc @@ -40,6 +39,7 @@ auto_impl.workspace = true assert_matches.workspace = true reth-provider = { workspace = true, features = ["test-utils"] } reth-interfaces = { workspace = true, features = ["test-utils"] } +tokio-stream.workspace = true [features] test-utils = [] diff --git a/crates/stages-api/src/error.rs b/crates/stages-api/src/error.rs index 37fe2b3fdbc..f6e528ca754 100644 --- a/crates/stages-api/src/error.rs +++ b/crates/stages-api/src/error.rs @@ -1,3 +1,4 @@ +use crate::PipelineEvent; use reth_consensus::ConsensusError; use reth_interfaces::{ db::DatabaseError as DbError, executor, p2p::error::DownloadError, RethError, @@ -5,9 +6,7 @@ use reth_interfaces::{ use reth_primitives::{BlockNumber, SealedHeader, StaticFileSegment, TxNumber}; use reth_provider::ProviderError; use thiserror::Error; - -use crate::PipelineEvent; -use tokio::sync::mpsc::error::SendError; +use tokio::sync::broadcast::error::SendError; /// Represents the specific error type within a block error. #[derive(Error, Debug)] diff --git a/crates/stages-api/src/pipeline/builder.rs b/crates/stages-api/src/pipeline/builder.rs index e76f76c604c..c059067259f 100644 --- a/crates/stages-api/src/pipeline/builder.rs +++ b/crates/stages-api/src/pipeline/builder.rs @@ -80,7 +80,7 @@ where max_block, static_file_producer, tip_tx, - listeners: Default::default(), + event_sender: Default::default(), progress: Default::default(), metrics_tx, } diff --git a/crates/stages-api/src/pipeline/mod.rs b/crates/stages-api/src/pipeline/mod.rs index 5aceb515b79..66a87a0f8a4 100644 --- a/crates/stages-api/src/pipeline/mod.rs +++ b/crates/stages-api/src/pipeline/mod.rs @@ -17,10 +17,9 @@ use reth_provider::{ }; use reth_prune::PrunerBuilder; use reth_static_file::StaticFileProducer; -use reth_tokio_util::EventListeners; +use reth_tokio_util::{EventSender, EventStream}; use std::pin::Pin; use tokio::sync::watch; -use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::*; mod builder; @@ -75,8 +74,8 @@ pub struct Pipeline { /// The maximum block number to sync to. max_block: Option, static_file_producer: StaticFileProducer, - /// All listeners for events the pipeline emits. - listeners: EventListeners, + /// Sender for events the pipeline emits. + event_sender: EventSender, /// Keeps track of the progress of the pipeline. progress: PipelineProgress, /// A receiver for the current chain tip to sync to. @@ -108,8 +107,8 @@ where } /// Listen for events on the pipeline. - pub fn events(&mut self) -> UnboundedReceiverStream { - self.listeners.new_listener() + pub fn events(&self) -> EventStream { + self.event_sender.new_listener() } /// Registers progress metrics for each registered stage @@ -251,7 +250,7 @@ where /// CAUTION: This method locks the static file producer Mutex, hence can block the thread if the /// lock is occupied. pub fn move_to_static_files(&self) -> RethResult<()> { - let mut static_file_producer = self.static_file_producer.lock(); + let static_file_producer = self.static_file_producer.lock(); // Copies data from database to static files let lowest_static_file_height = { @@ -312,7 +311,8 @@ where %to, "Unwind point too far for stage" ); - self.listeners.notify(PipelineEvent::Skipped { stage_id }); + self.event_sender.notify(PipelineEvent::Skipped { stage_id }); + continue } @@ -325,7 +325,7 @@ where ); while checkpoint.block_number > to { let input = UnwindInput { checkpoint, unwind_to: to, bad_block }; - self.listeners.notify(PipelineEvent::Unwind { stage_id, input }); + self.event_sender.notify(PipelineEvent::Unwind { stage_id, input }); let output = stage.unwind(&provider_rw, input); match output { @@ -350,7 +350,7 @@ where } provider_rw.save_stage_checkpoint(stage_id, checkpoint)?; - self.listeners + self.event_sender .notify(PipelineEvent::Unwound { stage_id, result: unwind_output }); self.provider_factory.static_file_provider().commit()?; @@ -359,7 +359,8 @@ where provider_rw = self.provider_factory.provider_rw()?; } Err(err) => { - self.listeners.notify(PipelineEvent::Error { stage_id }); + self.event_sender.notify(PipelineEvent::Error { stage_id }); + return Err(PipelineError::Stage(StageError::Fatal(Box::new(err)))) } } @@ -395,7 +396,7 @@ where prev_block = prev_checkpoint.map(|progress| progress.block_number), "Stage reached target block, skipping." ); - self.listeners.notify(PipelineEvent::Skipped { stage_id }); + self.event_sender.notify(PipelineEvent::Skipped { stage_id }); // We reached the maximum block, so we skip the stage return Ok(ControlFlow::NoProgress { @@ -405,7 +406,7 @@ where let exec_input = ExecInput { target, checkpoint: prev_checkpoint }; - self.listeners.notify(PipelineEvent::Prepare { + self.event_sender.notify(PipelineEvent::Prepare { pipeline_stages_progress: PipelineStagesProgress { current: stage_index + 1, total: total_stages, @@ -416,14 +417,15 @@ where }); if let Err(err) = stage.execute_ready(exec_input).await { - self.listeners.notify(PipelineEvent::Error { stage_id }); + self.event_sender.notify(PipelineEvent::Error { stage_id }); + match on_stage_error(&self.provider_factory, stage_id, prev_checkpoint, err)? { Some(ctrl) => return Ok(ctrl), None => continue, }; } - self.listeners.notify(PipelineEvent::Run { + self.event_sender.notify(PipelineEvent::Run { pipeline_stages_progress: PipelineStagesProgress { current: stage_index + 1, total: total_stages, @@ -448,7 +450,7 @@ where } provider_rw.save_stage_checkpoint(stage_id, checkpoint)?; - self.listeners.notify(PipelineEvent::Ran { + self.event_sender.notify(PipelineEvent::Ran { pipeline_stages_progress: PipelineStagesProgress { current: stage_index + 1, total: total_stages, @@ -471,7 +473,8 @@ where } Err(err) => { drop(provider_rw); - self.listeners.notify(PipelineEvent::Error { stage_id }); + self.event_sender.notify(PipelineEvent::Error { stage_id }); + if let Some(ctrl) = on_stage_error(&self.provider_factory, stage_id, prev_checkpoint, err)? { @@ -575,7 +578,7 @@ impl std::fmt::Debug for Pipeline { f.debug_struct("Pipeline") .field("stages", &self.stages.iter().map(|stage| stage.id()).collect::>()) .field("max_block", &self.max_block) - .field("listeners", &self.listeners) + .field("event_sender", &self.event_sender) .finish() } } diff --git a/crates/stages/src/stages/execution.rs b/crates/stages/src/stages/execution.rs index 0f933cea782..d3bcfba1711 100644 --- a/crates/stages/src/stages/execution.rs +++ b/crates/stages/src/stages/execution.rs @@ -59,8 +59,7 @@ use tracing::*; /// - [tables::BlockBodyIndices] get tx index to know what needs to be unwinded /// - [tables::AccountsHistory] to remove change set and apply old values to /// - [tables::PlainAccountState] [tables::StoragesHistory] to remove change set and apply old -/// values -/// to [tables::PlainStorageState] +/// values to [tables::PlainStorageState] // false positive, we cannot derive it if !DB: Debug. #[allow(missing_debug_implementations)] pub struct ExecutionStage { @@ -240,9 +239,11 @@ where // Execute the block let execute_start = Instant::now(); - executor.execute_one((&block, td).into()).map_err(|error| StageError::Block { - block: Box::new(block.header.clone().seal_slow()), - error: BlockErrorKind::Execution(error), + executor.execute_and_verify_one((&block, td).into()).map_err(|error| { + StageError::Block { + block: Box::new(block.header.clone().seal_slow()), + error: BlockErrorKind::Execution(error), + } })?; execution_duration += execute_start.elapsed(); diff --git a/crates/stages/src/stages/hashing_account.rs b/crates/stages/src/stages/hashing_account.rs index 1a63f6d893c..6ae7fc5221b 100644 --- a/crates/stages/src/stages/hashing_account.rs +++ b/crates/stages/src/stages/hashing_account.rs @@ -53,38 +53,6 @@ impl AccountHashingStage { } } -impl Default for AccountHashingStage { - fn default() -> Self { - Self { - clean_threshold: 500_000, - commit_threshold: 100_000, - etl_config: EtlConfig::default(), - } - } -} - -// TODO: Rewrite this -/// `SeedOpts` provides configuration parameters for calling `AccountHashingStage::seed` -/// in unit tests or benchmarks to generate an initial database state for running the -/// stage. -/// -/// In order to check the "full hashing" mode of the stage you want to generate more -/// transitions than `AccountHashingStage.clean_threshold`. This requires: -/// 1. Creating enough blocks so there's enough transactions to generate -/// the required transition keys in the `BlockTransitionIndex` (which depends on the -/// `TxTransitionIndex` internally) -/// 2. Setting `blocks.len() > clean_threshold` so that there's enough diffs to actually -/// take the 2nd codepath -#[derive(Clone, Debug)] -pub struct SeedOpts { - /// The range of blocks to be generated - pub blocks: RangeInclusive, - /// The number of accounts to be generated - pub accounts: usize, - /// The range of transactions to be generated per block. - pub txs: Range, -} - #[cfg(any(test, feature = "test-utils"))] impl AccountHashingStage { /// Initializes the `PlainAccountState` table with `num_accounts` having some random state @@ -145,6 +113,16 @@ impl AccountHashingStage { } } +impl Default for AccountHashingStage { + fn default() -> Self { + Self { + clean_threshold: 500_000, + commit_threshold: 100_000, + etl_config: EtlConfig::default(), + } + } +} + impl Stage for AccountHashingStage { /// Return the id of the stage fn id(&self) -> StageId { @@ -281,6 +259,27 @@ fn collect( Ok(()) } +// TODO: Rewrite this +/// `SeedOpts` provides configuration parameters for calling `AccountHashingStage::seed` +/// in unit tests or benchmarks to generate an initial database state for running the +/// stage. +/// +/// In order to check the "full hashing" mode of the stage you want to generate more +/// transitions than `AccountHashingStage.clean_threshold`. This requires: +/// 1. Creating enough blocks so there's enough transactions to generate the required transition +/// keys in the `BlockTransitionIndex` (which depends on the `TxTransitionIndex` internally) +/// 2. Setting `blocks.len() > clean_threshold` so that there's enough diffs to actually take the +/// 2nd codepath +#[derive(Clone, Debug)] +pub struct SeedOpts { + /// The range of blocks to be generated + pub blocks: RangeInclusive, + /// The number of accounts to be generated + pub accounts: usize, + /// The range of transactions to be generated per block. + pub txs: Range, +} + fn stage_checkpoint_progress( provider: &DatabaseProviderRW, ) -> ProviderResult { diff --git a/crates/stages/src/stages/sender_recovery.rs b/crates/stages/src/stages/sender_recovery.rs index 0bb05e0c40e..2695fb074c2 100644 --- a/crates/stages/src/stages/sender_recovery.rs +++ b/crates/stages/src/stages/sender_recovery.rs @@ -507,10 +507,9 @@ mod tests { /// # Panics /// /// 1. If there are any entries in the [tables::TransactionSenders] table above a given - /// block number. - /// + /// block number. /// 2. If the is no requested block entry in the bodies table, but - /// [tables::TransactionSenders] is not empty. + /// [tables::TransactionSenders] is not empty. fn ensure_no_senders_by_block(&self, block: BlockNumber) -> Result<(), TestRunnerError> { let body_result = self .db diff --git a/crates/stages/src/stages/tx_lookup.rs b/crates/stages/src/stages/tx_lookup.rs index fae08e854dc..332bcf8e700 100644 --- a/crates/stages/src/stages/tx_lookup.rs +++ b/crates/stages/src/stages/tx_lookup.rs @@ -428,10 +428,9 @@ mod tests { /// # Panics /// /// 1. If there are any entries in the [tables::TransactionHashNumbers] table above a given - /// block number. - /// + /// block number. /// 2. If the is no requested block entry in the bodies table, but - /// [tables::TransactionHashNumbers] is not empty. + /// [tables::TransactionHashNumbers] is not empty. fn ensure_no_hash_by_block(&self, number: BlockNumber) -> Result<(), TestRunnerError> { let body_result = self .db diff --git a/crates/static-file-types/Cargo.toml b/crates/static-file-types/Cargo.toml new file mode 100644 index 00000000000..63ba40c8f52 --- /dev/null +++ b/crates/static-file-types/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "reth-static-file-types" +version.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +repository.workspace = true +rust-version.workspace = true +description = "Commonly used types for static file usage in reth." + +[lints] +workspace = true + +[dependencies] +alloy-primitives.workspace = true + +clap = { workspace = true, features = ["derive"], optional = true } +derive_more.workspace = true +serde = { workspace = true, features = ["derive"] } +strum = { workspace = true, features = ["derive"] } + +[features] +clap = ["dep:clap"] \ No newline at end of file diff --git a/crates/primitives/src/static_file/compression.rs b/crates/static-file-types/src/compression.rs similarity index 100% rename from crates/primitives/src/static_file/compression.rs rename to crates/static-file-types/src/compression.rs diff --git a/crates/primitives/src/static_file/filters.rs b/crates/static-file-types/src/filters.rs similarity index 100% rename from crates/primitives/src/static_file/filters.rs rename to crates/static-file-types/src/filters.rs diff --git a/crates/primitives/src/static_file/mod.rs b/crates/static-file-types/src/lib.rs similarity index 84% rename from crates/primitives/src/static_file/mod.rs rename to crates/static-file-types/src/lib.rs index e7e9e47fd25..26d2496948b 100644 --- a/crates/primitives/src/static_file/mod.rs +++ b/crates/static-file-types/src/lib.rs @@ -1,4 +1,12 @@ -//! StaticFile primitives. +//! Commonly used types for static file usage. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] mod compression; mod filters; diff --git a/crates/primitives/src/static_file/segment.rs b/crates/static-file-types/src/segment.rs similarity index 97% rename from crates/primitives/src/static_file/segment.rs rename to crates/static-file-types/src/segment.rs index a9ad2a075f9..82b937f2944 100644 --- a/crates/primitives/src/static_file/segment.rs +++ b/crates/static-file-types/src/segment.rs @@ -1,7 +1,5 @@ -use crate::{ - static_file::{Compression, Filters, InclusionFilter}, - BlockNumber, TxNumber, -}; +use crate::{BlockNumber, Compression, Filters, InclusionFilter}; +use alloy_primitives::TxNumber; use derive_more::Display; use serde::{Deserialize, Serialize}; use std::{ops::RangeInclusive, str::FromStr}; @@ -385,7 +383,7 @@ mod tests { Compression::Lz4, Filters::WithFilters( InclusionFilter::Cuckoo, - crate::static_file::PerfectHashingFunction::Fmph, + crate::PerfectHashingFunction::Fmph, ), )), ), @@ -397,7 +395,7 @@ mod tests { Compression::Zstd, Filters::WithFilters( InclusionFilter::Cuckoo, - crate::static_file::PerfectHashingFunction::Fmph, + crate::PerfectHashingFunction::Fmph, ), )), ), @@ -409,7 +407,7 @@ mod tests { Compression::ZstdWithDictionary, Filters::WithFilters( InclusionFilter::Cuckoo, - crate::static_file::PerfectHashingFunction::Fmph, + crate::PerfectHashingFunction::Fmph, ), )), ), diff --git a/crates/static-file/Cargo.toml b/crates/static-file/Cargo.toml index 1345b2f232f..0f6608c8084 100644 --- a/crates/static-file/Cargo.toml +++ b/crates/static-file/Cargo.toml @@ -21,6 +21,7 @@ reth-nippy-jar.workspace = true reth-tokio-util.workspace = true # async +tokio.workspace = true tokio-stream.workspace = true # misc diff --git a/crates/static-file/src/static_file_producer.rs b/crates/static-file/src/static_file_producer.rs index c7a365c9afa..4eb08256114 100644 --- a/crates/static-file/src/static_file_producer.rs +++ b/crates/static-file/src/static_file_producer.rs @@ -10,13 +10,12 @@ use reth_provider::{ providers::{StaticFileProvider, StaticFileWriter}, ProviderFactory, }; -use reth_tokio_util::EventListeners; +use reth_tokio_util::{EventSender, EventStream}; use std::{ ops::{Deref, RangeInclusive}, sync::Arc, time::Instant, }; -use tokio_stream::wrappers::UnboundedReceiverStream; use tracing::{debug, trace}; /// Result of [StaticFileProducerInner::run] execution. @@ -64,7 +63,7 @@ pub struct StaticFileProducerInner { /// needed in [StaticFileProducerInner] to prevent attempting to move prunable data to static /// files. See [StaticFileProducerInner::get_static_file_targets]. prune_modes: PruneModes, - listeners: EventListeners, + event_sender: EventSender, } /// Static File targets, per data part, measured in [`BlockNumber`]. @@ -107,12 +106,17 @@ impl StaticFileProducerInner { static_file_provider: StaticFileProvider, prune_modes: PruneModes, ) -> Self { - Self { provider_factory, static_file_provider, prune_modes, listeners: Default::default() } + Self { + provider_factory, + static_file_provider, + prune_modes, + event_sender: Default::default(), + } } /// Listen for events on the static_file_producer. - pub fn events(&mut self) -> UnboundedReceiverStream { - self.listeners.new_listener() + pub fn events(&self) -> EventStream { + self.event_sender.new_listener() } /// Run the static_file_producer. @@ -123,7 +127,7 @@ impl StaticFileProducerInner { /// /// NOTE: it doesn't delete the data from database, and the actual deleting (aka pruning) logic /// lives in the `prune` crate. - pub fn run(&mut self, targets: StaticFileTargets) -> StaticFileProducerResult { + pub fn run(&self, targets: StaticFileTargets) -> StaticFileProducerResult { // If there are no targets, do not produce any static files and return early if !targets.any() { return Ok(targets) @@ -133,7 +137,7 @@ impl StaticFileProducerInner { self.static_file_provider.get_highest_static_files() )); - self.listeners.notify(StaticFileProducerEvent::Started { targets: targets.clone() }); + self.event_sender.notify(StaticFileProducerEvent::Started { targets: targets.clone() }); debug!(target: "static_file", ?targets, "StaticFileProducer started"); let start = Instant::now(); @@ -173,7 +177,7 @@ impl StaticFileProducerInner { let elapsed = start.elapsed(); // TODO(alexey): track in metrics debug!(target: "static_file", ?targets, ?elapsed, "StaticFileProducer finished"); - self.listeners + self.event_sender .notify(StaticFileProducerEvent::Finished { targets: targets.clone(), elapsed }); Ok(targets) @@ -304,7 +308,7 @@ mod tests { fn run() { let (provider_factory, static_file_provider, _temp_static_files_dir) = setup(); - let mut static_file_producer = StaticFileProducerInner::new( + let static_file_producer = StaticFileProducerInner::new( provider_factory, static_file_provider.clone(), PruneModes::default(), @@ -392,7 +396,7 @@ mod tests { let tx = tx.clone(); std::thread::spawn(move || { - let mut locked_producer = producer.lock(); + let locked_producer = producer.lock(); if i == 0 { // Let other threads spawn as well. std::thread::sleep(Duration::from_millis(100)); diff --git a/crates/storage/codecs/src/lib.rs b/crates/storage/codecs/src/lib.rs index 9dcef12730d..79f57991906 100644 --- a/crates/storage/codecs/src/lib.rs +++ b/crates/storage/codecs/src/lib.rs @@ -1,5 +1,9 @@ //! Compact codec. //! +//! *Warning*: The `Compact` encoding format and its implementations are +//! designed for storing and retrieving data internally. They are not hardened +//! to safely read potentially malicious data. +//! //! ## Feature Flags //! //! - `alloy`: [Compact] implementation for various alloy types. diff --git a/crates/storage/db-common/Cargo.toml b/crates/storage/db-common/Cargo.toml new file mode 100644 index 00000000000..675dde4ba59 --- /dev/null +++ b/crates/storage/db-common/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "reth-db-common" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] +# reth +reth-primitives.workspace = true +reth-db = { workspace = true, features = ["mdbx"] } +reth-interfaces = { workspace = true, features = ["clap"] } +reth-provider.workspace = true +reth-config.workspace = true +reth-trie.workspace = true +reth-etl.workspace = true +reth-codecs.workspace = true + +# misc +eyre.workspace = true +thiserror.workspace = true + +# io +serde.workspace = true +serde_json.workspace = true + +# tracing +tracing.workspace = true + +[lints] +workspace = true \ No newline at end of file diff --git a/crates/node-core/src/init.rs b/crates/storage/db-common/src/init.rs similarity index 99% rename from crates/node-core/src/init.rs rename to crates/storage/db-common/src/init.rs index 6d924b6b1a4..05435ce37e9 100644 --- a/crates/node-core/src/init.rs +++ b/crates/storage/db-common/src/init.rs @@ -373,7 +373,7 @@ fn parse_accounts( while let Ok(n) = reader.read_line(&mut line) { if n == 0 { - break; + break } let GenesisAccountWithAddress { genesis_account, address } = serde_json::from_str(&line)?; diff --git a/crates/storage/db-common/src/lib.rs b/crates/storage/db-common/src/lib.rs new file mode 100644 index 00000000000..abcbc62762a --- /dev/null +++ b/crates/storage/db-common/src/lib.rs @@ -0,0 +1,11 @@ +//! Common db operations + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +pub mod init; diff --git a/crates/storage/db/Cargo.toml b/crates/storage/db/Cargo.toml index a764f270d61..34de306f695 100644 --- a/crates/storage/db/Cargo.toml +++ b/crates/storage/db/Cargo.toml @@ -15,7 +15,7 @@ workspace = true # reth reth-primitives.workspace = true reth-fs-util.workspace = true -reth-interfaces.workspace = true +reth-storage-errors.workspace = true reth-codecs.workspace = true reth-libmdbx = { workspace = true, optional = true, features = [ "return-borrowed", diff --git a/crates/storage/db/src/implementation/mdbx/cursor.rs b/crates/storage/db/src/implementation/mdbx/cursor.rs index 43adc249272..a34525f20e0 100644 --- a/crates/storage/db/src/implementation/mdbx/cursor.rs +++ b/crates/storage/db/src/implementation/mdbx/cursor.rs @@ -11,8 +11,8 @@ use crate::{ tables::utils::*, DatabaseError, }; -use reth_interfaces::db::{DatabaseErrorInfo, DatabaseWriteError, DatabaseWriteOperation}; use reth_libmdbx::{Error as MDBXError, TransactionKind, WriteFlags, RO, RW}; +use reth_storage_errors::db::{DatabaseErrorInfo, DatabaseWriteError, DatabaseWriteOperation}; use std::{borrow::Cow, collections::Bound, marker::PhantomData, ops::RangeBounds, sync::Arc}; /// Read only Cursor. @@ -191,8 +191,7 @@ impl DbDupCursorRO for Cursor { /// - Some(key), Some(subkey): a `key` item whose data is >= than `subkey` /// - Some(key), None: first item of a specified `key` /// - None, Some(subkey): like first case, but in the first key - /// - None, None: first item in the table - /// of a DUPSORT table. + /// - None, None: first item in the table of a DUPSORT table. fn walk_dup( &mut self, key: Option, diff --git a/crates/storage/db/src/implementation/mdbx/mod.rs b/crates/storage/db/src/implementation/mdbx/mod.rs index 1db86bc54f4..58977811f23 100644 --- a/crates/storage/db/src/implementation/mdbx/mod.rs +++ b/crates/storage/db/src/implementation/mdbx/mod.rs @@ -13,11 +13,11 @@ use crate::{ }; use eyre::Context; use metrics::{gauge, Label}; -use reth_interfaces::db::LogLevel; use reth_libmdbx::{ DatabaseFlags, Environment, EnvironmentFlags, Geometry, MaxReadTransactionDuration, Mode, PageSize, SyncMode, RO, RW, }; +use reth_storage_errors::db::LogLevel; use reth_tracing::tracing::error; use std::{ ops::Deref, @@ -455,9 +455,9 @@ mod tests { test_utils::*, AccountChangeSets, }; - use reth_interfaces::db::{DatabaseWriteError, DatabaseWriteOperation}; use reth_libmdbx::Error; use reth_primitives::{Account, Address, Header, IntegerList, StorageEntry, B256, U256}; + use reth_storage_errors::db::{DatabaseWriteError, DatabaseWriteOperation}; use std::str::FromStr; use tempfile::TempDir; diff --git a/crates/storage/db/src/implementation/mdbx/tx.rs b/crates/storage/db/src/implementation/mdbx/tx.rs index 62380861980..184ca4d1cc3 100644 --- a/crates/storage/db/src/implementation/mdbx/tx.rs +++ b/crates/storage/db/src/implementation/mdbx/tx.rs @@ -9,8 +9,8 @@ use crate::{ DatabaseError, }; use once_cell::sync::OnceCell; -use reth_interfaces::db::{DatabaseWriteError, DatabaseWriteOperation}; use reth_libmdbx::{ffi::DBI, CommitLatency, Transaction, TransactionKind, WriteFlags, RW}; +use reth_storage_errors::db::{DatabaseWriteError, DatabaseWriteOperation}; use reth_tracing::tracing::{debug, trace, warn}; use std::{ backtrace::Backtrace, @@ -395,8 +395,8 @@ mod tests { database::Database, mdbx::DatabaseArguments, models::client_version::ClientVersion, tables, transaction::DbTx, DatabaseEnv, DatabaseEnvKind, }; - use reth_interfaces::db::DatabaseError; use reth_libmdbx::MaxReadTransactionDuration; + use reth_storage_errors::db::DatabaseError; use std::{sync::atomic::Ordering, thread::sleep, time::Duration}; use tempfile::tempdir; diff --git a/crates/storage/db/src/lib.rs b/crates/storage/db/src/lib.rs index 6b6a22319f8..102374c3bc3 100644 --- a/crates/storage/db/src/lib.rs +++ b/crates/storage/db/src/lib.rs @@ -81,7 +81,7 @@ pub mod mdbx { } pub use abstraction::*; -pub use reth_interfaces::db::{DatabaseError, DatabaseWriteOperation}; +pub use reth_storage_errors::db::{DatabaseError, DatabaseWriteOperation}; pub use tables::*; pub use utils::is_database_empty; diff --git a/crates/storage/db/src/static_file/cursor.rs b/crates/storage/db/src/static_file/cursor.rs index 89337b56e12..ac08430d454 100644 --- a/crates/storage/db/src/static_file/cursor.rs +++ b/crates/storage/db/src/static_file/cursor.rs @@ -1,9 +1,9 @@ use super::mask::{ColumnSelectorOne, ColumnSelectorThree, ColumnSelectorTwo}; use crate::table::Decompress; use derive_more::{Deref, DerefMut}; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_nippy_jar::{DataReader, NippyJar, NippyJarCursor}; use reth_primitives::{static_file::SegmentHeader, B256}; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::sync::Arc; /// Cursor of a static file segment. diff --git a/crates/storage/db/src/static_file/generation.rs b/crates/storage/db/src/static_file/generation.rs index 50db32adb2a..b663f146268 100644 --- a/crates/storage/db/src/static_file/generation.rs +++ b/crates/storage/db/src/static_file/generation.rs @@ -5,8 +5,8 @@ use crate::{ RawKey, RawTable, }; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_nippy_jar::{ColumnResult, NippyJar, NippyJarHeader, PHFKey}; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; use reth_tracing::tracing::*; use std::{error::Error as StdError, ops::RangeInclusive}; diff --git a/crates/storage/db/src/utils.rs b/crates/storage/db/src/utils.rs index d3e760f3d99..cf6a0341ef7 100644 --- a/crates/storage/db/src/utils.rs +++ b/crates/storage/db/src/utils.rs @@ -23,9 +23,25 @@ pub fn is_database_empty>(path: P) -> bool { if !path.exists() { true + } else if path.is_file() { + false } else if let Ok(dir) = path.read_dir() { dir.count() == 0 } else { true } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn is_database_empty_false_if_db_path_is_a_file() { + let db_file = tempfile::NamedTempFile::new().unwrap(); + + let result = is_database_empty(&db_file); + + assert!(!result); + } +} diff --git a/crates/storage/errors/Cargo.toml b/crates/storage/errors/Cargo.toml new file mode 100644 index 00000000000..c1ce595ea92 --- /dev/null +++ b/crates/storage/errors/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "reth-storage-errors" +version.workspace = true +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +reth-primitives.workspace = true +reth-fs-util.workspace = true + +thiserror.workspace = true +clap = { workspace = true, features = ["derive"], optional = true } + +[features] +clap = ["dep:clap"] \ No newline at end of file diff --git a/crates/interfaces/src/db.rs b/crates/storage/errors/src/db.rs similarity index 100% rename from crates/interfaces/src/db.rs rename to crates/storage/errors/src/db.rs diff --git a/crates/storage/errors/src/lib.rs b/crates/storage/errors/src/lib.rs new file mode 100644 index 00000000000..6bab8f051af --- /dev/null +++ b/crates/storage/errors/src/lib.rs @@ -0,0 +1,15 @@ +//! Commonly used error types used when interacting with storage. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] +#![cfg_attr(not(test), warn(unused_crate_dependencies))] +#![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] + +/// Database error +pub mod db; + +/// Provider error +pub mod provider; diff --git a/crates/interfaces/src/provider.rs b/crates/storage/errors/src/provider.rs similarity index 100% rename from crates/interfaces/src/provider.rs rename to crates/storage/errors/src/provider.rs diff --git a/crates/storage/nippy-jar/src/error.rs b/crates/storage/nippy-jar/src/error.rs index 2f7bcf804cf..d59500842c7 100644 --- a/crates/storage/nippy-jar/src/error.rs +++ b/crates/storage/nippy-jar/src/error.rs @@ -40,7 +40,7 @@ pub enum NippyJarError { #[error("the size of an offset must be at most 8 bytes, got {offset_size}")] OffsetSizeTooBig { /// The read offset size in number of bytes. - offset_size: u64, + offset_size: u8, }, #[error("attempted to read an out of bounds offset: {index}")] OffsetOutOfBounds { diff --git a/crates/storage/nippy-jar/src/lib.rs b/crates/storage/nippy-jar/src/lib.rs index 435e91e877d..2eafe68c409 100644 --- a/crates/storage/nippy-jar/src/lib.rs +++ b/crates/storage/nippy-jar/src/lib.rs @@ -1,4 +1,8 @@ //! Immutable data store format. +//! +//! *Warning*: The `NippyJar` encoding format and its implementations are +//! designed for storing and retrieving data internally. They are not hardened +//! to safely read potentially malicious data. #![doc( html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", @@ -472,7 +476,7 @@ pub struct DataReader { /// Mmap handle for offsets. offset_mmap: Mmap, /// Number of bytes that represent one offset. - offset_size: u64, + offset_size: u8, } impl DataReader { @@ -487,7 +491,7 @@ impl DataReader { let offset_mmap = unsafe { Mmap::map(&offset_file)? }; // First byte is the size of one offset in bytes - let offset_size = offset_mmap[0] as u64; + let offset_size = offset_mmap[0]; // Ensure that the size of an offset is at most 8 bytes. if offset_size > 8 { @@ -521,7 +525,8 @@ impl DataReader { /// Returns total number of offsets in the file. /// The size of one offset is determined by the file itself. pub fn offsets_count(&self) -> Result { - Ok((self.offset_file.metadata()?.len().saturating_sub(1) / self.offset_size) as usize) + Ok((self.offset_file.metadata()?.len().saturating_sub(1) / self.offset_size as u64) + as usize) } /// Reads one offset-sized (determined by the offset file) u64 at the provided index. @@ -530,7 +535,7 @@ impl DataReader { let offset_end = index + self.offset_size as usize; if offset_end > self.offset_mmap.len() { - return Err(NippyJarError::OffsetOutOfBounds { index }); + return Err(NippyJarError::OffsetOutOfBounds { index }) } buffer[..self.offset_size as usize].copy_from_slice(&self.offset_mmap[index..offset_end]); @@ -538,7 +543,7 @@ impl DataReader { } /// Returns number of bytes that represent one offset. - pub fn offset_size(&self) -> u64 { + pub fn offset_size(&self) -> u8 { self.offset_size } diff --git a/crates/storage/nippy-jar/src/writer.rs b/crates/storage/nippy-jar/src/writer.rs index 6417e60076c..bd56b4a6b38 100644 --- a/crates/storage/nippy-jar/src/writer.rs +++ b/crates/storage/nippy-jar/src/writer.rs @@ -7,7 +7,7 @@ use std::{ }; /// Size of one offset in bytes. -const OFFSET_SIZE_BYTES: u64 = 8; +const OFFSET_SIZE_BYTES: u8 = 8; /// Writer of [`NippyJar`]. Handles table data and offsets only. /// @@ -112,7 +112,7 @@ impl NippyJarWriter { let mut offsets_file = OpenOptions::new().read(true).write(true).open(offsets)?; // First byte of the offset file is the size of one offset in bytes - offsets_file.write_all(&[OFFSET_SIZE_BYTES as u8])?; + offsets_file.write_all(&[OFFSET_SIZE_BYTES])?; offsets_file.seek(SeekFrom::End(0))?; Ok((data_file, offsets_file, is_created)) @@ -133,9 +133,9 @@ impl NippyJarWriter { return Err(NippyJarError::FrozenJar) } - let expected_offsets_file_size = 1 + // first byte is the size of one offset - OFFSET_SIZE_BYTES * self.jar.rows as u64 * self.jar.columns as u64 + // `offset size * num rows * num columns` - OFFSET_SIZE_BYTES; // expected size of the data file + let expected_offsets_file_size: u64 = (1 + // first byte is the size of one offset + OFFSET_SIZE_BYTES as usize* self.jar.rows * self.jar.columns + // `offset size * num rows * num columns` + OFFSET_SIZE_BYTES as usize) as u64; // expected size of the data file let actual_offsets_file_size = self.offsets_file.get_ref().metadata()?.len(); // Offsets configuration wasn't properly committed @@ -151,9 +151,9 @@ impl NippyJarWriter { // `num rows = (file size - 1 - size of one offset) / num columns` self.jar.rows = ((actual_offsets_file_size. saturating_sub(1). // first byte is the size of one offset - saturating_sub(OFFSET_SIZE_BYTES) / // expected size of the data file + saturating_sub(OFFSET_SIZE_BYTES as u64) / // expected size of the data file (self.jar.columns as u64)) / - OFFSET_SIZE_BYTES) as usize; + OFFSET_SIZE_BYTES as u64) as usize; // Freeze row count changed self.jar.freeze_config()?; @@ -183,7 +183,7 @@ impl NippyJarWriter { .get_ref() .metadata()? .len() - .saturating_sub(OFFSET_SIZE_BYTES * (index as u64 + 1)); + .saturating_sub(OFFSET_SIZE_BYTES as u64 * (index as u64 + 1)); self.offsets_file.get_mut().set_len(new_len)?; drop(reader); @@ -318,7 +318,7 @@ impl NippyJarWriter { // Handle non-empty offset file if length > 1 { // first byte is reserved for `bytes_per_offset`, which is 8 initially. - let num_offsets = (length - 1) / OFFSET_SIZE_BYTES; + let num_offsets = (length - 1) / OFFSET_SIZE_BYTES as u64; if remaining_to_prune as u64 > num_offsets { return Err(NippyJarError::InvalidPruning( @@ -336,10 +336,10 @@ impl NippyJarWriter { self.data_file.get_mut().set_len(0)?; } else { // Calculate the new length for the on-disk offset list - let new_len = 1 + new_num_offsets * OFFSET_SIZE_BYTES; + let new_len = 1 + new_num_offsets * OFFSET_SIZE_BYTES as u64; // Seek to the position of the last offset self.offsets_file - .seek(SeekFrom::Start(new_len.saturating_sub(OFFSET_SIZE_BYTES)))?; + .seek(SeekFrom::Start(new_len.saturating_sub(OFFSET_SIZE_BYTES as u64)))?; // Read the last offset value let mut last_offset = [0u8; OFFSET_SIZE_BYTES as usize]; self.offsets_file.get_ref().read_exact(&mut last_offset)?; diff --git a/crates/storage/provider/Cargo.toml b/crates/storage/provider/Cargo.toml index 4fe4ffbb959..d9a55516198 100644 --- a/crates/storage/provider/Cargo.toml +++ b/crates/storage/provider/Cargo.toml @@ -13,8 +13,10 @@ workspace = true [dependencies] # reth +reth-execution-errors.workspace = true reth-primitives.workspace = true reth-fs-util.workspace = true +reth-storage-errors.workspace = true reth-interfaces.workspace = true reth-db.workspace = true reth-trie = { workspace = true, features = ["metrics"] } @@ -65,4 +67,4 @@ rand.workspace = true [features] test-utils = ["alloy-rlp", "reth-db/test-utils", "reth-nippy-jar/test-utils"] -optimism = ["reth-primitives/optimism", "reth-interfaces/optimism"] +optimism = ["reth-primitives/optimism"] diff --git a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs index 947c6609b96..52c9366fde8 100644 --- a/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs +++ b/crates/storage/provider/src/bundle_state/bundle_state_with_receipts.rs @@ -8,13 +8,13 @@ use reth_db::{ transaction::{DbTx, DbTxMut}, }; use reth_evm::execute::BatchBlockExecutionOutput; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ logs_bloom, revm::compat::{into_reth_acc, into_revm_acc}, Account, Address, BlockHash, BlockNumber, Bloom, Bytecode, Log, Receipt, Receipts, StaticFileSegment, StorageEntry, B256, U256, }; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; use reth_trie::HashedPostState; pub use revm::db::states::OriginalValuesKnown; use revm::{ @@ -182,12 +182,11 @@ impl BundleStateWithReceipts { /// Returns the receipt root for all recorded receipts. /// Note: this function calculated Bloom filters for every receipt and created merkle trees /// of receipt. This is a expensive operation. - #[allow(unused_variables)] - pub fn receipts_root_slow(&self, block_number: BlockNumber) -> Option { + pub fn receipts_root_slow(&self, _block_number: BlockNumber) -> Option { #[cfg(feature = "optimism")] panic!("This should not be called in optimism mode. Use `optimism_receipts_root_slow` instead."); #[cfg(not(feature = "optimism"))] - self.receipts.root_slow(self.block_number_to_index(block_number)?) + self.receipts.root_slow(self.block_number_to_index(_block_number)?) } /// Returns the receipt root for all recorded receipts. diff --git a/crates/storage/provider/src/bundle_state/state_changes.rs b/crates/storage/provider/src/bundle_state/state_changes.rs index 7f7bde79e3f..71551fe6923 100644 --- a/crates/storage/provider/src/bundle_state/state_changes.rs +++ b/crates/storage/provider/src/bundle_state/state_changes.rs @@ -4,8 +4,8 @@ use reth_db::{ tables, transaction::{DbTx, DbTxMut}, }; -use reth_interfaces::db::DatabaseError; use reth_primitives::{revm::compat::into_reth_acc, Bytecode, StorageEntry, U256}; +use reth_storage_errors::db::DatabaseError; use revm::db::states::{PlainStorageChangeset, StateChangeset}; /// A change to the state of the world. diff --git a/crates/storage/provider/src/bundle_state/state_reverts.rs b/crates/storage/provider/src/bundle_state/state_reverts.rs index cc16a50ccab..1fe7a348198 100644 --- a/crates/storage/provider/src/bundle_state/state_reverts.rs +++ b/crates/storage/provider/src/bundle_state/state_reverts.rs @@ -5,8 +5,8 @@ use reth_db::{ tables, transaction::{DbTx, DbTxMut}, }; -use reth_interfaces::db::DatabaseError; use reth_primitives::{revm::compat::into_reth_acc, BlockNumber, StorageEntry, B256, U256}; +use reth_storage_errors::db::DatabaseError; use revm::db::states::{PlainStateReverts, PlainStorageRevert, RevertToSlot}; use std::iter::Peekable; diff --git a/crates/storage/provider/src/chain.rs b/crates/storage/provider/src/chain.rs index 2ff70bc4add..bc419aa3f0d 100644 --- a/crates/storage/provider/src/chain.rs +++ b/crates/storage/provider/src/chain.rs @@ -1,7 +1,7 @@ //! Contains [Chain], a chain of blocks and their final state. use crate::bundle_state::BundleStateWithReceipts; -use reth_interfaces::{executor::BlockExecutionError, RethResult}; +use reth_execution_errors::BlockExecutionError; use reth_primitives::{ Address, BlockHash, BlockNumHash, BlockNumber, ForkBlock, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, TransactionSigned, TransactionSignedEcRecovered, TxHash, @@ -235,15 +235,14 @@ impl Chain { /// Merge two chains by appending the given chain into the current one. /// /// The state of accounts for this chain is set to the state of the newest chain. - pub fn append_chain(&mut self, other: Chain) -> RethResult<()> { + pub fn append_chain(&mut self, other: Chain) -> Result<(), BlockExecutionError> { let chain_tip = self.tip(); let other_fork_block = other.fork_block(); if chain_tip.hash() != other_fork_block.hash { return Err(BlockExecutionError::AppendChainDoesntConnect { chain_tip: Box::new(chain_tip.num_hash()), other_chain_fork: Box::new(other_fork_block), - } - .into()) + }) } // Insert blocks from other chain diff --git a/crates/storage/provider/src/lib.rs b/crates/storage/provider/src/lib.rs index 2b146245efb..864a962414e 100644 --- a/crates/storage/provider/src/lib.rs +++ b/crates/storage/provider/src/lib.rs @@ -29,7 +29,7 @@ pub use providers::{ pub mod test_utils; /// Re-export provider error. -pub use reth_interfaces::provider::ProviderError; +pub use reth_storage_errors::provider::ProviderError; pub mod chain; pub use chain::{Chain, DisplayBlocksChain}; diff --git a/crates/storage/provider/src/providers/bundle_state_provider.rs b/crates/storage/provider/src/providers/bundle_state_provider.rs index e3364cadb28..bc042b32764 100644 --- a/crates/storage/provider/src/providers/bundle_state_provider.rs +++ b/crates/storage/provider/src/providers/bundle_state_provider.rs @@ -1,8 +1,8 @@ use crate::{ AccountReader, BlockHashReader, BundleStateDataProvider, StateProvider, StateRootProvider, }; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{trie::AccountProof, Account, Address, BlockNumber, Bytecode, B256}; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; use reth_trie::updates::TrieUpdates; use revm::db::BundleState; diff --git a/crates/storage/provider/src/providers/consistent_view.rs b/crates/storage/provider/src/providers/consistent_view.rs index 07d3614efe7..b956768130d 100644 --- a/crates/storage/provider/src/providers/consistent_view.rs +++ b/crates/storage/provider/src/providers/consistent_view.rs @@ -1,10 +1,10 @@ use crate::{BlockNumReader, DatabaseProviderFactory, DatabaseProviderRO, HeaderProvider}; use reth_db::database::Database; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{GotExpected, B256}; +use reth_storage_errors::provider::ProviderResult; use std::marker::PhantomData; -pub use reth_interfaces::provider::ConsistentViewError; +pub use reth_storage_errors::provider::ConsistentViewError; /// A consistent view over state in the database. /// diff --git a/crates/storage/provider/src/providers/database/mod.rs b/crates/storage/provider/src/providers/database/mod.rs index c84e9d8cec2..60dc635eb3a 100644 --- a/crates/storage/provider/src/providers/database/mod.rs +++ b/crates/storage/provider/src/providers/database/mod.rs @@ -9,7 +9,7 @@ use crate::{ }; use reth_db::{database::Database, init_db, models::StoredBlockBodyIndices, DatabaseEnv}; use reth_evm::ConfigureEvmEnv; -use reth_interfaces::{provider::ProviderResult, RethError, RethResult}; +use reth_interfaces::{RethError, RethResult}; use reth_primitives::{ stage::{StageCheckpoint, StageId}, Address, Block, BlockHash, BlockHashOrNumber, BlockNumber, BlockWithSenders, ChainInfo, @@ -30,6 +30,7 @@ mod provider; pub use provider::{DatabaseProvider, DatabaseProviderRO, DatabaseProviderRW}; use reth_db::mdbx::DatabaseArguments; +use reth_storage_errors::provider::ProviderResult; /// A common provider that fetches data from a database or static file. /// @@ -583,7 +584,6 @@ mod tests { test_utils::{create_test_static_files_dir, ERROR_TEMPDIR}, }; use reth_interfaces::{ - provider::ProviderError, test_utils::{ generators, generators::{random_block, random_header}, @@ -594,6 +594,7 @@ mod tests { hex_literal::hex, ChainSpecBuilder, PruneMode, PruneModes, SealedBlock, StaticFileSegment, TxNumber, B256, U256, }; + use reth_storage_errors::provider::ProviderError; use std::{ops::RangeInclusive, sync::Arc}; use tokio::sync::watch; diff --git a/crates/storage/provider/src/providers/database/provider.rs b/crates/storage/provider/src/providers/database/provider.rs index 6e07b7c46a1..643bc23e65e 100644 --- a/crates/storage/provider/src/providers/database/provider.rs +++ b/crates/storage/provider/src/providers/database/provider.rs @@ -375,20 +375,20 @@ impl DatabaseProvider { /// /// If UNWIND is false we will just read the state/blocks and return them. /// - /// 1. Iterate over the [BlockBodyIndices][tables::BlockBodyIndices] table to get all - /// the transaction ids. - /// 2. Iterate over the [StorageChangeSets][tables::StorageChangeSets] table - /// and the [AccountChangeSets][tables::AccountChangeSets] tables in reverse order to - /// reconstruct the changesets. - /// - In order to have both the old and new values in the changesets, we also access the - /// plain state tables. + /// 1. Iterate over the [BlockBodyIndices][tables::BlockBodyIndices] table to get all the + /// transaction ids. + /// 2. Iterate over the [StorageChangeSets][tables::StorageChangeSets] table and the + /// [AccountChangeSets][tables::AccountChangeSets] tables in reverse order to reconstruct + /// the changesets. + /// - In order to have both the old and new values in the changesets, we also access the + /// plain state tables. /// 3. While iterating over the changeset tables, if we encounter a new account or storage slot, - /// we: + /// we: /// 1. Take the old value from the changeset /// 2. Take the new value from the plain state /// 3. Save the old value to the local state /// 4. While iterating over the changeset tables, if we encounter an account/storage slot we - /// have seen before we: + /// have seen before we: /// 1. Take the old value from the changeset /// 2. Take the new value from the local state /// 3. Set the local state to the value in the changeset diff --git a/crates/storage/provider/src/providers/state/historical.rs b/crates/storage/provider/src/providers/state/historical.rs index ed64314aa2b..f815c282d56 100644 --- a/crates/storage/provider/src/providers/state/historical.rs +++ b/crates/storage/provider/src/providers/state/historical.rs @@ -10,11 +10,11 @@ use reth_db::{ transaction::DbTx, BlockNumberList, }; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{ constants::EPOCH_SLOTS, trie::AccountProof, Account, Address, BlockNumber, Bytecode, StaticFileSegment, StorageKey, StorageValue, B256, }; +use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdates, HashedPostState}; use revm::db::BundleState; use std::fmt::Debug; @@ -413,8 +413,8 @@ mod tests { transaction::{DbTx, DbTxMut}, BlockNumberList, }; - use reth_interfaces::provider::ProviderError; use reth_primitives::{address, b256, Account, Address, StorageEntry, B256, U256}; + use reth_storage_errors::provider::ProviderError; const ADDRESS: Address = address!("0000000000000000000000000000000000000001"); const HIGHER_ADDRESS: Address = address!("0000000000000000000000000000000000000005"); diff --git a/crates/storage/provider/src/providers/state/latest.rs b/crates/storage/provider/src/providers/state/latest.rs index d3c8af6b7fc..5079a15d759 100644 --- a/crates/storage/provider/src/providers/state/latest.rs +++ b/crates/storage/provider/src/providers/state/latest.rs @@ -7,11 +7,11 @@ use reth_db::{ tables, transaction::DbTx, }; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ trie::AccountProof, Account, Address, BlockNumber, Bytecode, StaticFileSegment, StorageKey, StorageValue, B256, }; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; use reth_trie::{proof::Proof, updates::TrieUpdates, HashedPostState}; use revm::db::BundleState; diff --git a/crates/storage/provider/src/providers/state/macros.rs b/crates/storage/provider/src/providers/state/macros.rs index 0e2f088bfeb..0efd8d9c7d9 100644 --- a/crates/storage/provider/src/providers/state/macros.rs +++ b/crates/storage/provider/src/providers/state/macros.rs @@ -31,20 +31,20 @@ macro_rules! delegate_provider_impls { $crate::providers::state::macros::delegate_impls_to_as_ref!( for $target => StateRootProvider $(where [$($generics)*])? { - fn state_root(&self, state: &revm::db::BundleState) -> reth_interfaces::provider::ProviderResult; - fn state_root_with_updates(&self, state: &revm::db::BundleState) -> reth_interfaces::provider::ProviderResult<(reth_primitives::B256, reth_trie::updates::TrieUpdates)>; + fn state_root(&self, state: &revm::db::BundleState) -> reth_storage_errors::provider::ProviderResult; + fn state_root_with_updates(&self, state: &revm::db::BundleState) -> reth_storage_errors::provider::ProviderResult<(reth_primitives::B256, reth_trie::updates::TrieUpdates)>; } AccountReader $(where [$($generics)*])? { - fn basic_account(&self, address: reth_primitives::Address) -> reth_interfaces::provider::ProviderResult>; + fn basic_account(&self, address: reth_primitives::Address) -> reth_storage_errors::provider::ProviderResult>; } BlockHashReader $(where [$($generics)*])? { - fn block_hash(&self, number: u64) -> reth_interfaces::provider::ProviderResult>; - fn canonical_hashes_range(&self, start: reth_primitives::BlockNumber, end: reth_primitives::BlockNumber) -> reth_interfaces::provider::ProviderResult>; + fn block_hash(&self, number: u64) -> reth_storage_errors::provider::ProviderResult>; + fn canonical_hashes_range(&self, start: reth_primitives::BlockNumber, end: reth_primitives::BlockNumber) -> reth_storage_errors::provider::ProviderResult>; } StateProvider $(where [$($generics)*])?{ - fn storage(&self, account: reth_primitives::Address, storage_key: reth_primitives::StorageKey) -> reth_interfaces::provider::ProviderResult>; - fn proof(&self, address: reth_primitives::Address, keys: &[reth_primitives::B256]) -> reth_interfaces::provider::ProviderResult; - fn bytecode_by_hash(&self, code_hash: reth_primitives::B256) -> reth_interfaces::provider::ProviderResult>; + fn storage(&self, account: reth_primitives::Address, storage_key: reth_primitives::StorageKey) -> reth_storage_errors::provider::ProviderResult>; + fn proof(&self, address: reth_primitives::Address, keys: &[reth_primitives::B256]) -> reth_storage_errors::provider::ProviderResult; + fn bytecode_by_hash(&self, code_hash: reth_primitives::B256) -> reth_storage_errors::provider::ProviderResult>; } ); } diff --git a/crates/storage/provider/src/providers/static_file/jar.rs b/crates/storage/provider/src/providers/static_file/jar.rs index 6dc75e3074d..7f4b14fee37 100644 --- a/crates/storage/provider/src/providers/static_file/jar.rs +++ b/crates/storage/provider/src/providers/static_file/jar.rs @@ -10,11 +10,11 @@ use reth_db::{ codecs::CompactU256, static_file::{HeaderMask, ReceiptMask, StaticFileCursor, TransactionMask}, }; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ Address, BlockHash, BlockHashOrNumber, BlockNumber, ChainInfo, Header, Receipt, SealedHeader, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, B256, U256, }; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ ops::{Deref, RangeBounds}, sync::Arc, diff --git a/crates/storage/provider/src/providers/static_file/manager.rs b/crates/storage/provider/src/providers/static_file/manager.rs index 610021d70cf..275c8935e1a 100644 --- a/crates/storage/provider/src/providers/static_file/manager.rs +++ b/crates/storage/provider/src/providers/static_file/manager.rs @@ -16,7 +16,6 @@ use reth_db::{ table::Table, tables, }; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_nippy_jar::NippyJar; use reth_primitives::{ keccak256, @@ -26,6 +25,7 @@ use reth_primitives::{ TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, Withdrawals, B256, U256, }; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap}, ops::{Deref, Range, RangeBounds, RangeInclusive}, diff --git a/crates/storage/provider/src/providers/static_file/mod.rs b/crates/storage/provider/src/providers/static_file/mod.rs index 46a1b7453ec..cb9f879dde6 100644 --- a/crates/storage/provider/src/providers/static_file/mod.rs +++ b/crates/storage/provider/src/providers/static_file/mod.rs @@ -9,9 +9,9 @@ pub use writer::{StaticFileProviderRW, StaticFileProviderRWRefMut}; mod metrics; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_nippy_jar::NippyJar; use reth_primitives::{static_file::SegmentHeader, StaticFileSegment}; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ops::Deref, sync::Arc}; const BLOCKS_PER_STATIC_FILE: u64 = 500_000; diff --git a/crates/storage/provider/src/providers/static_file/writer.rs b/crates/storage/provider/src/providers/static_file/writer.rs index 3a0f2d03174..2a2fcf12ad8 100644 --- a/crates/storage/provider/src/providers/static_file/writer.rs +++ b/crates/storage/provider/src/providers/static_file/writer.rs @@ -6,13 +6,13 @@ use super::{ use dashmap::mapref::one::RefMut; use reth_codecs::Compact; use reth_db::codecs::CompactU256; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_nippy_jar::{NippyJar, NippyJarError, NippyJarWriter}; use reth_primitives::{ static_file::{find_fixed_range, SegmentHeader, SegmentRangeInclusive}, BlockHash, BlockNumber, Header, Receipt, StaticFileSegment, TransactionSignedNoHash, TxNumber, U256, }; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::{ path::{Path, PathBuf}, sync::{Arc, Weak}, @@ -522,7 +522,7 @@ impl StaticFileProviderRW { if self.prune_on_commit.is_some() { return Err(ProviderError::NippyJar( "Pruning should be comitted before appending or pruning more data".to_string(), - )); + )) } Ok(()) } diff --git a/crates/storage/provider/src/test_utils/mock.rs b/crates/storage/provider/src/test_utils/mock.rs index 893bd052d0b..7dd7c5b4dc5 100644 --- a/crates/storage/provider/src/test_utils/mock.rs +++ b/crates/storage/provider/src/test_utils/mock.rs @@ -8,7 +8,6 @@ use crate::{ use parking_lot::Mutex; use reth_db::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ keccak256, trie::AccountProof, Account, Address, Block, BlockHash, BlockHashOrNumber, BlockId, BlockNumber, BlockWithSenders, Bytecode, Bytes, ChainInfo, ChainSpec, Header, Receipt, @@ -16,6 +15,7 @@ use reth_primitives::{ TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, Withdrawals, B256, U256, }; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; use reth_trie::updates::TrieUpdates; use revm::{ db::BundleState, diff --git a/crates/storage/provider/src/test_utils/noop.rs b/crates/storage/provider/src/test_utils/noop.rs index 02890eaf193..6593b74ccfa 100644 --- a/crates/storage/provider/src/test_utils/noop.rs +++ b/crates/storage/provider/src/test_utils/noop.rs @@ -8,7 +8,6 @@ use crate::{ }; use reth_db::models::{AccountBeforeTx, StoredBlockBodyIndices}; use reth_evm::ConfigureEvmEnv; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{ stage::{StageCheckpoint, StageId}, trie::AccountProof, @@ -18,6 +17,7 @@ use reth_primitives::{ TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, Withdrawal, Withdrawals, B256, MAINNET, U256, }; +use reth_storage_errors::provider::ProviderResult; use reth_trie::updates::TrieUpdates; use revm::{ db::BundleState, diff --git a/crates/storage/provider/src/traits/account.rs b/crates/storage/provider/src/traits/account.rs index 16042bce122..09161d31bb6 100644 --- a/crates/storage/provider/src/traits/account.rs +++ b/crates/storage/provider/src/traits/account.rs @@ -1,7 +1,7 @@ use auto_impl::auto_impl; use reth_db::models::AccountBeforeTx; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{Account, Address, BlockNumber}; +use reth_storage_errors::provider::ProviderResult; use std::{ collections::{BTreeMap, BTreeSet}, ops::{RangeBounds, RangeInclusive}, diff --git a/crates/storage/provider/src/traits/block.rs b/crates/storage/provider/src/traits/block.rs index 1b767f350a3..99984d346b8 100644 --- a/crates/storage/provider/src/traits/block.rs +++ b/crates/storage/provider/src/traits/block.rs @@ -4,11 +4,11 @@ use crate::{ }; use auto_impl::auto_impl; use reth_db::models::StoredBlockBodyIndices; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{ Block, BlockHashOrNumber, BlockId, BlockNumber, BlockNumberOrTag, BlockWithSenders, Header, PruneModes, Receipt, SealedBlock, SealedBlockWithSenders, SealedHeader, B256, }; +use reth_storage_errors::provider::ProviderResult; use reth_trie::{updates::TrieUpdates, HashedPostState}; use std::ops::RangeInclusive; diff --git a/crates/storage/provider/src/traits/block_hash.rs b/crates/storage/provider/src/traits/block_hash.rs index 8bb334c8b84..7413bb09c21 100644 --- a/crates/storage/provider/src/traits/block_hash.rs +++ b/crates/storage/provider/src/traits/block_hash.rs @@ -1,6 +1,6 @@ use auto_impl::auto_impl; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{BlockHashOrNumber, BlockNumber, B256}; +use reth_storage_errors::provider::ProviderResult; /// Client trait for fetching block hashes by number. #[auto_impl(&, Arc, Box)] diff --git a/crates/storage/provider/src/traits/block_id.rs b/crates/storage/provider/src/traits/block_id.rs index fd52f6c326b..8ca2c98f8b0 100644 --- a/crates/storage/provider/src/traits/block_id.rs +++ b/crates/storage/provider/src/traits/block_id.rs @@ -1,6 +1,6 @@ use super::BlockHashReader; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{BlockHashOrNumber, BlockId, BlockNumber, BlockNumberOrTag, ChainInfo, B256}; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; /// Client trait for getting important block numbers (such as the latest block number), converting /// block hashes to numbers, and fetching a block hash from its block number. diff --git a/crates/storage/provider/src/traits/database_provider.rs b/crates/storage/provider/src/traits/database_provider.rs index 4335917cc1a..152c4935fd0 100644 --- a/crates/storage/provider/src/traits/database_provider.rs +++ b/crates/storage/provider/src/traits/database_provider.rs @@ -1,6 +1,6 @@ use crate::DatabaseProviderRO; use reth_db::database::Database; -use reth_interfaces::provider::ProviderResult; +use reth_storage_errors::provider::ProviderResult; /// Database provider factory. pub trait DatabaseProviderFactory { diff --git a/crates/storage/provider/src/traits/evm_env.rs b/crates/storage/provider/src/traits/evm_env.rs index 8c821984601..cecedad0c91 100644 --- a/crates/storage/provider/src/traits/evm_env.rs +++ b/crates/storage/provider/src/traits/evm_env.rs @@ -1,6 +1,6 @@ use reth_evm::ConfigureEvmEnv; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{BlockHashOrNumber, Header}; +use reth_storage_errors::provider::ProviderResult; use revm::primitives::{BlockEnv, CfgEnv, CfgEnvWithHandlerCfg, SpecId}; /// A provider type that knows chain specific information required to configure an diff --git a/crates/storage/provider/src/traits/hashing.rs b/crates/storage/provider/src/traits/hashing.rs index 7978a4b1940..4e0375c7c4d 100644 --- a/crates/storage/provider/src/traits/hashing.rs +++ b/crates/storage/provider/src/traits/hashing.rs @@ -1,7 +1,7 @@ use auto_impl::auto_impl; use reth_db::models::BlockNumberAddress; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{Account, Address, BlockNumber, StorageEntry, B256}; +use reth_storage_errors::provider::ProviderResult; use std::{ collections::{BTreeMap, BTreeSet, HashMap}, ops::{Range, RangeInclusive}, diff --git a/crates/storage/provider/src/traits/header.rs b/crates/storage/provider/src/traits/header.rs index ad04f52ac95..4719470a776 100644 --- a/crates/storage/provider/src/traits/header.rs +++ b/crates/storage/provider/src/traits/header.rs @@ -1,6 +1,6 @@ use auto_impl::auto_impl; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{BlockHash, BlockHashOrNumber, BlockNumber, Header, SealedHeader, U256}; +use reth_storage_errors::provider::ProviderResult; use std::ops::RangeBounds; /// Client trait for fetching `Header` related data. diff --git a/crates/storage/provider/src/traits/history.rs b/crates/storage/provider/src/traits/history.rs index daef02b9f36..ec9625bdcdd 100644 --- a/crates/storage/provider/src/traits/history.rs +++ b/crates/storage/provider/src/traits/history.rs @@ -1,7 +1,7 @@ use auto_impl::auto_impl; use reth_db::models::BlockNumberAddress; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{Address, BlockNumber, B256}; +use reth_storage_errors::provider::ProviderResult; use std::{ collections::BTreeMap, ops::{Range, RangeInclusive}, diff --git a/crates/storage/provider/src/traits/prune_checkpoint.rs b/crates/storage/provider/src/traits/prune_checkpoint.rs index 60470bfecde..a872e27b33a 100644 --- a/crates/storage/provider/src/traits/prune_checkpoint.rs +++ b/crates/storage/provider/src/traits/prune_checkpoint.rs @@ -1,5 +1,5 @@ -use reth_interfaces::provider::ProviderResult; use reth_primitives::{PruneCheckpoint, PruneSegment}; +use reth_storage_errors::provider::ProviderResult; /// The trait for fetching prune checkpoint related data. #[auto_impl::auto_impl(&, Arc)] diff --git a/crates/storage/provider/src/traits/receipts.rs b/crates/storage/provider/src/traits/receipts.rs index 8ac2917d77d..138adcfa779 100644 --- a/crates/storage/provider/src/traits/receipts.rs +++ b/crates/storage/provider/src/traits/receipts.rs @@ -1,7 +1,7 @@ use std::ops::RangeBounds; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{BlockHashOrNumber, BlockId, BlockNumberOrTag, Receipt, TxHash, TxNumber}; +use reth_storage_errors::provider::ProviderResult; use crate::BlockIdReader; diff --git a/crates/storage/provider/src/traits/stage_checkpoint.rs b/crates/storage/provider/src/traits/stage_checkpoint.rs index ff58fa3eafb..1eca807638d 100644 --- a/crates/storage/provider/src/traits/stage_checkpoint.rs +++ b/crates/storage/provider/src/traits/stage_checkpoint.rs @@ -1,8 +1,8 @@ -use reth_interfaces::provider::ProviderResult; use reth_primitives::{ stage::{StageCheckpoint, StageId}, BlockNumber, }; +use reth_storage_errors::provider::ProviderResult; /// The trait for fetching stage checkpoint related data. #[auto_impl::auto_impl(&, Arc)] diff --git a/crates/storage/provider/src/traits/state.rs b/crates/storage/provider/src/traits/state.rs index ac72d52f9f4..f31469a3def 100644 --- a/crates/storage/provider/src/traits/state.rs +++ b/crates/storage/provider/src/traits/state.rs @@ -5,11 +5,11 @@ use crate::{ }; use auto_impl::auto_impl; use reth_db::transaction::{DbTx, DbTxMut}; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ trie::AccountProof, Address, BlockHash, BlockId, BlockNumHash, BlockNumber, BlockNumberOrTag, Bytecode, StorageKey, StorageValue, B256, KECCAK_EMPTY, U256, }; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; use revm::db::OriginalValuesKnown; /// Type alias of boxed [StateProvider]. diff --git a/crates/storage/provider/src/traits/stats.rs b/crates/storage/provider/src/traits/stats.rs index dece75e287b..97052cf594e 100644 --- a/crates/storage/provider/src/traits/stats.rs +++ b/crates/storage/provider/src/traits/stats.rs @@ -1,5 +1,5 @@ use reth_db::table::Table; -use reth_interfaces::provider::ProviderResult; +use reth_storage_errors::provider::ProviderResult; /// The trait for fetching provider statistics. #[auto_impl::auto_impl(&, Arc)] diff --git a/crates/storage/provider/src/traits/storage.rs b/crates/storage/provider/src/traits/storage.rs index 302acad8b18..04cb3a0d2dd 100644 --- a/crates/storage/provider/src/traits/storage.rs +++ b/crates/storage/provider/src/traits/storage.rs @@ -4,8 +4,8 @@ use std::{ }; use auto_impl::auto_impl; -use reth_interfaces::provider::ProviderResult; use reth_primitives::{Address, BlockNumber, StorageEntry, B256}; +use reth_storage_errors::provider::ProviderResult; /// Storage reader #[auto_impl(&, Arc, Box)] diff --git a/crates/storage/provider/src/traits/transactions.rs b/crates/storage/provider/src/traits/transactions.rs index 3e798bb419c..d693c52f80e 100644 --- a/crates/storage/provider/src/traits/transactions.rs +++ b/crates/storage/provider/src/traits/transactions.rs @@ -1,9 +1,9 @@ use crate::{BlockNumReader, BlockReader}; -use reth_interfaces::provider::{ProviderError, ProviderResult}; use reth_primitives::{ Address, BlockHashOrNumber, BlockNumber, TransactionMeta, TransactionSigned, TransactionSignedNoHash, TxHash, TxNumber, }; +use reth_storage_errors::provider::{ProviderError, ProviderResult}; use std::ops::{Range, RangeBounds, RangeInclusive}; /// Client trait for fetching [TransactionSigned] related data. diff --git a/crates/storage/provider/src/traits/trie.rs b/crates/storage/provider/src/traits/trie.rs index 1fa5d780ba2..52f3317a3f3 100644 --- a/crates/storage/provider/src/traits/trie.rs +++ b/crates/storage/provider/src/traits/trie.rs @@ -1,6 +1,6 @@ use auto_impl::auto_impl; -use reth_interfaces::provider::ProviderResult; use reth_primitives::B256; +use reth_storage_errors::provider::ProviderResult; use reth_trie::updates::TrieUpdates; use revm::db::BundleState; diff --git a/crates/storage/provider/src/traits/withdrawals.rs b/crates/storage/provider/src/traits/withdrawals.rs index a54dc7db816..b79cd253976 100644 --- a/crates/storage/provider/src/traits/withdrawals.rs +++ b/crates/storage/provider/src/traits/withdrawals.rs @@ -1,5 +1,5 @@ -use reth_interfaces::provider::ProviderResult; use reth_primitives::{BlockHashOrNumber, Withdrawal, Withdrawals}; +use reth_storage_errors::provider::ProviderResult; /// Client trait for fetching [Withdrawal] related data. #[auto_impl::auto_impl(&, Arc)] diff --git a/crates/tokio-util/Cargo.toml b/crates/tokio-util/Cargo.toml index e8c21e0fa05..ccace030c0f 100644 --- a/crates/tokio-util/Cargo.toml +++ b/crates/tokio-util/Cargo.toml @@ -12,7 +12,11 @@ description = "Additional utilities for working with Tokio in reth." workspace = true [dependencies] +tracing.workspace = true # async tokio = { workspace = true, features = ["sync"] } tokio-stream = { workspace = true, features = ["sync"] } + +[dev-dependencies] +tokio = { workspace = true, features = ["full", "macros"] } \ No newline at end of file diff --git a/crates/tokio-util/src/event_listeners.rs b/crates/tokio-util/src/event_listeners.rs deleted file mode 100644 index 3c940e28022..00000000000 --- a/crates/tokio-util/src/event_listeners.rs +++ /dev/null @@ -1,46 +0,0 @@ -use tokio::sync::mpsc; -use tokio_stream::wrappers::UnboundedReceiverStream; - -/// A collection of event listeners for a task. -#[derive(Clone, Debug)] -pub struct EventListeners { - /// All listeners for events - listeners: Vec>, -} - -impl Default for EventListeners { - fn default() -> Self { - Self { listeners: Vec::new() } - } -} - -impl EventListeners { - /// Send an event to all listeners. - /// - /// Channels that were closed are removed. - pub fn notify(&mut self, event: T) { - self.listeners.retain(|listener| listener.send(event.clone()).is_ok()) - } - - /// Add a new event listener. - pub fn new_listener(&mut self) -> UnboundedReceiverStream { - let (sender, receiver) = mpsc::unbounded_channel(); - self.listeners.push(sender); - UnboundedReceiverStream::new(receiver) - } - - /// Push new event listener. - pub fn push_listener(&mut self, listener: mpsc::UnboundedSender) { - self.listeners.push(listener); - } - - /// Returns the number of registered listeners. - pub fn len(&self) -> usize { - self.listeners.len() - } - - /// Returns true if there are no registered listeners. - pub fn is_empty(&self) -> bool { - self.listeners.is_empty() - } -} diff --git a/crates/tokio-util/src/event_sender.rs b/crates/tokio-util/src/event_sender.rs new file mode 100644 index 00000000000..3ed6e85910d --- /dev/null +++ b/crates/tokio-util/src/event_sender.rs @@ -0,0 +1,42 @@ +use crate::EventStream; +use tokio::sync::broadcast::{self, Sender}; +use tracing::error; + +const DEFAULT_SIZE_BROADCAST_CHANNEL: usize = 2000; + +/// A bounded broadcast channel for a task. +#[derive(Debug, Clone)] +pub struct EventSender { + /// The sender part of the broadcast channel + sender: Sender, +} + +impl Default for EventSender +where + T: Clone + Send + Sync + 'static, +{ + fn default() -> Self { + Self::new(DEFAULT_SIZE_BROADCAST_CHANNEL) + } +} + +impl EventSender { + /// Creates a new `EventSender`. + pub fn new(events_channel_size: usize) -> Self { + let (sender, _) = broadcast::channel(events_channel_size); + Self { sender } + } + + /// Broadcasts an event to all listeners. + pub fn notify(&self, event: T) { + if self.sender.send(event).is_err() { + error!("channel closed"); + } + } + + /// Creates a new event stream with a subscriber to the sender as the + /// receiver. + pub fn new_listener(&self) -> EventStream { + EventStream::new(self.sender.subscribe()) + } +} diff --git a/crates/tokio-util/src/event_stream.rs b/crates/tokio-util/src/event_stream.rs new file mode 100644 index 00000000000..67bc72a97d0 --- /dev/null +++ b/crates/tokio-util/src/event_stream.rs @@ -0,0 +1,92 @@ +//! Event streams related functionality. + +use std::{ + pin::Pin, + task::{Context, Poll}, +}; +use tokio_stream::Stream; +use tracing::warn; + +/// Thin wrapper around tokio's BroadcastStream to allow skipping broadcast errors. +#[derive(Debug)] +pub struct EventStream { + inner: tokio_stream::wrappers::BroadcastStream, +} + +impl EventStream +where + T: Clone + Send + 'static, +{ + /// Creates a new `EventStream`. + pub fn new(receiver: tokio::sync::broadcast::Receiver) -> Self { + let inner = tokio_stream::wrappers::BroadcastStream::new(receiver); + EventStream { inner } + } +} + +impl Stream for EventStream +where + T: Clone + Send + 'static, +{ + type Item = T; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + loop { + match Pin::new(&mut self.inner).poll_next(cx) { + Poll::Ready(Some(Ok(item))) => return Poll::Ready(Some(item)), + Poll::Ready(Some(Err(e))) => { + warn!("BroadcastStream lagged: {e:?}"); + continue + } + Poll::Ready(None) => return Poll::Ready(None), + Poll::Pending => return Poll::Pending, + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tokio::sync::broadcast; + use tokio_stream::StreamExt; + + #[tokio::test] + async fn test_event_stream_yields_items() { + let (tx, _) = broadcast::channel(16); + let my_stream = EventStream::new(tx.subscribe()); + + tx.send(1).unwrap(); + tx.send(2).unwrap(); + tx.send(3).unwrap(); + + // drop the sender to terminate the stream and allow collect to work. + drop(tx); + + let items: Vec = my_stream.collect().await; + + assert_eq!(items, vec![1, 2, 3]); + } + + #[tokio::test] + async fn test_event_stream_skips_lag_errors() { + let (tx, _) = broadcast::channel(2); + let my_stream = EventStream::new(tx.subscribe()); + + let mut _rx2 = tx.subscribe(); + let mut _rx3 = tx.subscribe(); + + tx.send(1).unwrap(); + tx.send(2).unwrap(); + tx.send(3).unwrap(); + tx.send(4).unwrap(); // This will cause lag for the first subscriber + + // drop the sender to terminate the stream and allow collect to work. + drop(tx); + + // Ensure lag errors are skipped and only valid items are collected + let items: Vec = my_stream.collect().await; + + assert_eq!(items, vec![3, 4]); + } +} diff --git a/crates/tokio-util/src/lib.rs b/crates/tokio-util/src/lib.rs index 7db8dcfba16..2053bf60bc5 100644 --- a/crates/tokio-util/src/lib.rs +++ b/crates/tokio-util/src/lib.rs @@ -8,5 +8,7 @@ #![cfg_attr(not(test), warn(unused_crate_dependencies))] #![cfg_attr(docsrs, feature(doc_cfg, doc_auto_cfg))] -mod event_listeners; -pub use event_listeners::EventListeners; +mod event_sender; +mod event_stream; +pub use event_sender::EventSender; +pub use event_stream::EventStream; diff --git a/crates/transaction-pool/src/pool/mod.rs b/crates/transaction-pool/src/pool/mod.rs index a27b9d02167..163f30ea6b4 100644 --- a/crates/transaction-pool/src/pool/mod.rs +++ b/crates/transaction-pool/src/pool/mod.rs @@ -34,16 +34,16 @@ //! //! In essence the transaction pool is made of three separate sub-pools: //! -//! - Pending Pool: Contains all transactions that are valid on the current state and satisfy -//! (3. a)(1): _No_ nonce gaps. A _pending_ transaction is considered _ready_ when it has the lowest -//! nonce of all transactions from the same sender. Once a _ready_ transaction with nonce `n` has -//! been executed, the next highest transaction from the same sender `n + 1` becomes ready. +//! - Pending Pool: Contains all transactions that are valid on the current state and satisfy (3. +//! a)(1): _No_ nonce gaps. A _pending_ transaction is considered _ready_ when it has the lowest +//! nonce of all transactions from the same sender. Once a _ready_ transaction with nonce `n` has +//! been executed, the next highest transaction from the same sender `n + 1` becomes ready. //! -//! - Queued Pool: Contains all transactions that are currently blocked by missing -//! transactions: (3. a)(2): _With_ nonce gaps or due to lack of funds. +//! - Queued Pool: Contains all transactions that are currently blocked by missing transactions: +//! (3. a)(2): _With_ nonce gaps or due to lack of funds. //! -//! - Basefee Pool: To account for the dynamic base fee requirement (3. b) which could render -//! an EIP-1559 and all subsequent transactions of the sender currently invalid. +//! - Basefee Pool: To account for the dynamic base fee requirement (3. b) which could render an +//! EIP-1559 and all subsequent transactions of the sender currently invalid. //! //! The classification of transactions is always dependent on the current state that is changed as //! soon as a new block is mined. Once a new block is mined, the account changeset must be applied diff --git a/crates/transaction-pool/src/pool/pending.rs b/crates/transaction-pool/src/pool/pending.rs index 7e733a6593c..d78af79085b 100644 --- a/crates/transaction-pool/src/pool/pending.rs +++ b/crates/transaction-pool/src/pool/pending.rs @@ -90,9 +90,9 @@ impl PendingPool { /// Returns an iterator over all transactions that are _currently_ ready. /// /// 1. The iterator _always_ returns transaction in order: It never returns a transaction with - /// an unsatisfied dependency and only returns them if dependency transaction were yielded - /// previously. In other words: The nonces of transactions with the same sender will _always_ - /// increase by exactly 1. + /// an unsatisfied dependency and only returns them if dependency transaction were yielded + /// previously. In other words: The nonces of transactions with the same sender will _always_ + /// increase by exactly 1. /// /// The order of transactions which satisfy (1.) is determent by their computed priority: A /// transaction with a higher priority is returned before a transaction with a lower priority. diff --git a/crates/transaction-pool/src/pool/txpool.rs b/crates/transaction-pool/src/pool/txpool.rs index bcad71edbd4..4e35733d4bc 100644 --- a/crates/transaction-pool/src/pool/txpool.rs +++ b/crates/transaction-pool/src/pool/txpool.rs @@ -1002,6 +1002,7 @@ impl AllTransactions { /// For all transactions: /// - decreased basefee: promotes from `basefee` to `pending` sub-pool. /// - increased basefee: demotes from `pending` to `basefee` sub-pool. + /// /// Individually: /// - decreased sender allowance: demote from (`basefee`|`pending`) to `queued`. /// - increased sender allowance: promote from `queued` to diff --git a/crates/trie/src/hashed_cursor/default.rs b/crates/trie/src/hashed_cursor/default.rs index 298c5ce2e75..1e5068870d7 100644 --- a/crates/trie/src/hashed_cursor/default.rs +++ b/crates/trie/src/hashed_cursor/default.rs @@ -8,14 +8,21 @@ use reth_primitives::{Account, StorageEntry, B256}; impl<'a, TX: DbTx> HashedCursorFactory for &'a TX { type AccountCursor = ::Cursor; - type StorageCursor = ::DupCursor; + type StorageCursor = + DatabaseHashedStorageCursor<::DupCursor>; fn hashed_account_cursor(&self) -> Result { self.cursor_read::() } - fn hashed_storage_cursor(&self) -> Result { - self.cursor_dup_read::() + fn hashed_storage_cursor( + &self, + hashed_address: B256, + ) -> Result { + Ok(DatabaseHashedStorageCursor::new( + self.cursor_dup_read::()?, + hashed_address, + )) } } @@ -32,23 +39,35 @@ where } } -impl HashedStorageCursor for C +/// The structure wrapping a database cursor for hashed storage and +/// a target hashed address. Implements [HashedStorageCursor] for iterating +/// hashed state +#[derive(Debug)] +pub struct DatabaseHashedStorageCursor { + cursor: C, + hashed_address: B256, +} + +impl DatabaseHashedStorageCursor { + /// Create new [DatabaseHashedStorageCursor]. + pub fn new(cursor: C, hashed_address: B256) -> Self { + Self { cursor, hashed_address } + } +} + +impl HashedStorageCursor for DatabaseHashedStorageCursor where C: DbCursorRO + DbDupCursorRO, { - fn is_storage_empty(&mut self, key: B256) -> Result { - Ok(self.seek_exact(key)?.is_none()) + fn is_storage_empty(&mut self) -> Result { + Ok(self.cursor.seek_exact(self.hashed_address)?.is_none()) } - fn seek( - &mut self, - key: B256, - subkey: B256, - ) -> Result, reth_db::DatabaseError> { - self.seek_by_key_subkey(key, subkey) + fn seek(&mut self, subkey: B256) -> Result, reth_db::DatabaseError> { + self.cursor.seek_by_key_subkey(self.hashed_address, subkey) } fn next(&mut self) -> Result, reth_db::DatabaseError> { - self.next_dup_val() + self.cursor.next_dup_val() } } diff --git a/crates/trie/src/hashed_cursor/mod.rs b/crates/trie/src/hashed_cursor/mod.rs index 72caee26aaa..916dd6f4241 100644 --- a/crates/trie/src/hashed_cursor/mod.rs +++ b/crates/trie/src/hashed_cursor/mod.rs @@ -2,6 +2,7 @@ use reth_primitives::{Account, StorageEntry, B256}; /// Default implementation of the hashed state cursor traits. mod default; +pub use default::DatabaseHashedStorageCursor; /// Implementation of hashed state cursor traits for the post state. mod post_state; @@ -18,7 +19,10 @@ pub trait HashedCursorFactory { fn hashed_account_cursor(&self) -> Result; /// Returns a cursor for iterating over all hashed storage entries in the state. - fn hashed_storage_cursor(&self) -> Result; + fn hashed_storage_cursor( + &self, + hashed_address: B256, + ) -> Result; } /// The cursor for iterating over hashed accounts. @@ -33,14 +37,10 @@ pub trait HashedAccountCursor { /// The cursor for iterating over hashed storage entries. pub trait HashedStorageCursor { /// Returns `true` if there are no entries for a given key. - fn is_storage_empty(&mut self, key: B256) -> Result; + fn is_storage_empty(&mut self) -> Result; /// Seek an entry greater or equal to the given key/subkey and position the cursor there. - fn seek( - &mut self, - key: B256, - subkey: B256, - ) -> Result, reth_db::DatabaseError>; + fn seek(&mut self, subkey: B256) -> Result, reth_db::DatabaseError>; /// Move the cursor to the next entry and return it. fn next(&mut self) -> Result, reth_db::DatabaseError>; diff --git a/crates/trie/src/hashed_cursor/post_state.rs b/crates/trie/src/hashed_cursor/post_state.rs index be623da741d..379b08c2cb6 100644 --- a/crates/trie/src/hashed_cursor/post_state.rs +++ b/crates/trie/src/hashed_cursor/post_state.rs @@ -25,9 +25,12 @@ impl<'a, CF: HashedCursorFactory> HashedCursorFactory for HashedPostStateCursorF Ok(HashedPostStateAccountCursor::new(cursor, self.post_state)) } - fn hashed_storage_cursor(&self) -> Result { - let cursor = self.cursor_factory.hashed_storage_cursor()?; - Ok(HashedPostStateStorageCursor::new(cursor, self.post_state)) + fn hashed_storage_cursor( + &self, + hashed_address: B256, + ) -> Result { + let cursor = self.cursor_factory.hashed_storage_cursor(hashed_address)?; + Ok(HashedPostStateStorageCursor::new(cursor, self.post_state, hashed_address)) } } @@ -179,10 +182,10 @@ pub struct HashedPostStateStorageCursor<'b, C> { cursor: C, /// The reference to the post state. post_state: &'b HashedPostStateSorted, + /// The current hashed account key. + hashed_address: B256, /// The post state index where the cursor is currently at. post_state_storage_index: usize, - /// The current hashed account key. - account: Option, /// The last slot that has been returned by the cursor. /// De facto, this is the cursor's position for the given account key. last_slot: Option, @@ -190,14 +193,14 @@ pub struct HashedPostStateStorageCursor<'b, C> { impl<'b, C> HashedPostStateStorageCursor<'b, C> { /// Create new instance of [HashedPostStateStorageCursor]. - pub fn new(cursor: C, post_state: &'b HashedPostStateSorted) -> Self { - Self { cursor, post_state, account: None, last_slot: None, post_state_storage_index: 0 } + pub fn new(cursor: C, post_state: &'b HashedPostStateSorted, hashed_address: B256) -> Self { + Self { cursor, post_state, hashed_address, last_slot: None, post_state_storage_index: 0 } } /// Returns `true` if the storage for the given /// The database is not checked since it already has no wiped storage entries. - fn is_db_storage_wiped(&self, account: &B256) -> bool { - match self.post_state.storages.get(account) { + fn is_db_storage_wiped(&self) -> bool { + match self.post_state.storages.get(&self.hashed_address) { Some(storage) => storage.wiped, None => false, } @@ -205,10 +208,10 @@ impl<'b, C> HashedPostStateStorageCursor<'b, C> { /// Check if the slot was zeroed out in the post state. /// The database is not checked since it already has no zero-valued slots. - fn is_slot_zero_valued(&self, account: &B256, slot: &B256) -> bool { + fn is_slot_zero_valued(&self, slot: &B256) -> bool { self.post_state .storages - .get(account) + .get(&self.hashed_address) .map(|storage| storage.zero_valued_slots.contains(slot)) .unwrap_or_default() } @@ -247,34 +250,24 @@ where /// /// This function should be called before attempting to call [HashedStorageCursor::seek] or /// [HashedStorageCursor::next]. - fn is_storage_empty(&mut self, key: B256) -> Result { - let is_empty = match self.post_state.storages.get(&key) { + fn is_storage_empty(&mut self) -> Result { + let is_empty = match self.post_state.storages.get(&self.hashed_address) { Some(storage) => { // If the storage has been wiped at any point storage.wiped && // and the current storage does not contain any non-zero values storage.non_zero_valued_slots.is_empty() } - None => self.cursor.is_storage_empty(key)?, + None => self.cursor.is_storage_empty()?, }; Ok(is_empty) } /// Seek the next account storage entry for a given hashed key pair. - fn seek( - &mut self, - account: B256, - subkey: B256, - ) -> Result, reth_db::DatabaseError> { - if self.account.map_or(true, |acc| acc != account) { - self.account = Some(account); - self.last_slot = None; - self.post_state_storage_index = 0; - } - + fn seek(&mut self, subkey: B256) -> Result, reth_db::DatabaseError> { // Attempt to find the account's storage in post state. let mut post_state_entry = None; - if let Some(storage) = self.post_state.storages.get(&account) { + if let Some(storage) = self.post_state.storages.get(&self.hashed_address) { post_state_entry = storage.non_zero_valued_slots.get(self.post_state_storage_index); while post_state_entry.map(|(slot, _)| slot < &subkey).unwrap_or_default() { @@ -293,14 +286,14 @@ where } // It's not an exact match, reposition to the first greater or equal account. - let db_entry = if self.is_db_storage_wiped(&account) { + let db_entry = if self.is_db_storage_wiped() { None } else { - let mut db_entry = self.cursor.seek(account, subkey)?; + let mut db_entry = self.cursor.seek(subkey)?; while db_entry .as_ref() - .map(|entry| self.is_slot_zero_valued(&account, &entry.key)) + .map(|entry| self.is_slot_zero_valued(&entry.key)) .unwrap_or_default() { db_entry = self.cursor.next()?; @@ -322,25 +315,21 @@ where /// If the account key is not set. [HashedStorageCursor::seek] must be called first in order to /// position the cursor. fn next(&mut self) -> Result, reth_db::DatabaseError> { - let account = self.account.expect("`seek` must be called first"); - let last_slot = match self.last_slot.as_ref() { Some(slot) => slot, None => return Ok(None), // no previous entry was found }; - let db_entry = if self.is_db_storage_wiped(&account) { + let db_entry = if self.is_db_storage_wiped() { None } else { // If post state was given precedence, move the cursor forward. - let mut db_entry = self.cursor.seek(account, *last_slot)?; + let mut db_entry = self.cursor.seek(*last_slot)?; // If the entry was already returned or is zero-values, move to the next. while db_entry .as_ref() - .map(|entry| { - &entry.key == last_slot || self.is_slot_zero_valued(&account, &entry.key) - }) + .map(|entry| &entry.key == last_slot || self.is_slot_zero_valued(&entry.key)) .unwrap_or_default() { db_entry = self.cursor.next()?; @@ -351,7 +340,7 @@ where // Attempt to find the account's storage in post state. let mut post_state_entry = None; - if let Some(storage) = self.post_state.storages.get(&account) { + if let Some(storage) = self.post_state.storages.get(&self.hashed_address) { post_state_entry = storage.non_zero_valued_slots.get(self.post_state_storage_index); while post_state_entry.map(|(slot, _)| slot <= last_slot).unwrap_or_default() { self.post_state_storage_index += 1; @@ -397,12 +386,11 @@ mod tests { factory: &impl HashedCursorFactory, expected: impl Iterator)>, ) { - let mut cursor = factory.hashed_storage_cursor().unwrap(); - for (account, storage) in expected { + let mut cursor = factory.hashed_storage_cursor(account).unwrap(); let mut expected_storage = storage.into_iter(); - let first_storage = cursor.seek(account, B256::default()).unwrap(); + let first_storage = cursor.seek(B256::default()).unwrap(); assert_eq!(first_storage.map(|e| (e.key, e.value)), expected_storage.next()); for expected_entry in expected_storage { @@ -577,8 +565,8 @@ mod tests { let sorted = HashedPostState::default().into_sorted(); let tx = db.tx().unwrap(); let factory = HashedPostStateCursorFactory::new(&tx, &sorted); - let mut cursor = factory.hashed_storage_cursor().unwrap(); - assert!(cursor.is_storage_empty(address).unwrap()); + let mut cursor = factory.hashed_storage_cursor(address).unwrap(); + assert!(cursor.is_storage_empty().unwrap()); } let db_storage = @@ -600,8 +588,8 @@ mod tests { let sorted = HashedPostState::default().into_sorted(); let tx = db.tx().unwrap(); let factory = HashedPostStateCursorFactory::new(&tx, &sorted); - let mut cursor = factory.hashed_storage_cursor().unwrap(); - assert!(!cursor.is_storage_empty(address).unwrap()); + let mut cursor = factory.hashed_storage_cursor(address).unwrap(); + assert!(!cursor.is_storage_empty().unwrap()); } // wiped storage, must be empty @@ -615,8 +603,8 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); let factory = HashedPostStateCursorFactory::new(&tx, &sorted); - let mut cursor = factory.hashed_storage_cursor().unwrap(); - assert!(cursor.is_storage_empty(address).unwrap()); + let mut cursor = factory.hashed_storage_cursor(address).unwrap(); + assert!(cursor.is_storage_empty().unwrap()); } // wiped storage, but post state has zero-value entries @@ -631,8 +619,8 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); let factory = HashedPostStateCursorFactory::new(&tx, &sorted); - let mut cursor = factory.hashed_storage_cursor().unwrap(); - assert!(cursor.is_storage_empty(address).unwrap()); + let mut cursor = factory.hashed_storage_cursor(address).unwrap(); + assert!(cursor.is_storage_empty().unwrap()); } // wiped storage, but post state has non-zero entries @@ -647,8 +635,8 @@ mod tests { let sorted = hashed_post_state.into_sorted(); let tx = db.tx().unwrap(); let factory = HashedPostStateCursorFactory::new(&tx, &sorted); - let mut cursor = factory.hashed_storage_cursor().unwrap(); - assert!(!cursor.is_storage_empty(address).unwrap()); + let mut cursor = factory.hashed_storage_cursor(address).unwrap(); + assert!(!cursor.is_storage_empty().unwrap()); } } diff --git a/crates/trie/src/node_iter.rs b/crates/trie/src/node_iter.rs index 742896140bc..3a621a38b1f 100644 --- a/crates/trie/src/node_iter.rs +++ b/crates/trie/src/node_iter.rs @@ -166,8 +166,6 @@ pub struct StorageNodeIter { pub walker: TrieWalker, /// The cursor for the hashed storage entries. pub hashed_storage_cursor: H, - /// The hashed address this storage trie belongs to. - hashed_address: B256, /// Current hashed storage entry. current_hashed_entry: Option, @@ -177,11 +175,10 @@ pub struct StorageNodeIter { impl StorageNodeIter { /// Creates a new instance of StorageNodeIter. - pub fn new(walker: TrieWalker, hashed_storage_cursor: H, hashed_address: B256) -> Self { + pub fn new(walker: TrieWalker, hashed_storage_cursor: H) -> Self { Self { walker, hashed_storage_cursor, - hashed_address, current_walker_key_checked: false, current_hashed_entry: None, } @@ -238,8 +235,7 @@ where // Attempt to get the next unprocessed key from the walker. if let Some(seek_key) = self.walker.next_unprocessed_key() { // Seek and update the current hashed entry based on the new seek key. - self.current_hashed_entry = - self.hashed_storage_cursor.seek(self.hashed_address, seek_key)?; + self.current_hashed_entry = self.hashed_storage_cursor.seek(seek_key)?; self.walker.advance()?; } else { // No more keys to process, break the loop. diff --git a/crates/trie/src/prefix_set/mod.rs b/crates/trie/src/prefix_set/mod.rs index b556dd37907..32fdc68c812 100644 --- a/crates/trie/src/prefix_set/mod.rs +++ b/crates/trie/src/prefix_set/mod.rs @@ -161,6 +161,11 @@ impl PrefixSet { false } + /// Returns an iterator over reference to _all_ nibbles regardless of cursor position. + pub fn iter(&self) -> core::slice::Iter<'_, Nibbles> { + self.keys.iter() + } + /// Returns the number of elements in the set. pub fn len(&self) -> usize { self.keys.len() diff --git a/crates/trie/src/proof.rs b/crates/trie/src/proof.rs index 80f0c552e38..094372a851f 100644 --- a/crates/trie/src/proof.rs +++ b/crates/trie/src/proof.rs @@ -109,12 +109,13 @@ where hashed_address: B256, slots: &[B256], ) -> Result<(B256, Vec), StorageRootError> { - let mut hashed_storage_cursor = self.hashed_cursor_factory.hashed_storage_cursor()?; + let mut hashed_storage_cursor = + self.hashed_cursor_factory.hashed_storage_cursor(hashed_address)?; let mut proofs = slots.iter().copied().map(StorageProof::new).collect::>(); // short circuit on empty storage - if hashed_storage_cursor.is_storage_empty(hashed_address)? { + if hashed_storage_cursor.is_storage_empty()? { return Ok((EMPTY_ROOT_HASH, proofs)) } @@ -128,8 +129,7 @@ where let retainer = ProofRetainer::from_iter(target_nibbles); let mut hash_builder = HashBuilder::default().with_proof_retainer(retainer); - let mut storage_node_iter = - StorageNodeIter::new(walker, hashed_storage_cursor, hashed_address); + let mut storage_node_iter = StorageNodeIter::new(walker, hashed_storage_cursor); while let Some(node) = storage_node_iter.try_next()? { match node { StorageNode::Branch(node) => { diff --git a/crates/trie/src/trie.rs b/crates/trie/src/trie.rs index faf693f3398..55ee1bebdbe 100644 --- a/crates/trie/src/trie.rs +++ b/crates/trie/src/trie.rs @@ -483,10 +483,11 @@ where ) -> Result<(B256, usize, TrieUpdates), StorageRootError> { trace!(target: "trie::storage_root", hashed_address = ?self.hashed_address, "calculating storage root"); - let mut hashed_storage_cursor = self.hashed_cursor_factory.hashed_storage_cursor()?; + let mut hashed_storage_cursor = + self.hashed_cursor_factory.hashed_storage_cursor(self.hashed_address)?; // short circuit on empty storage - if hashed_storage_cursor.is_storage_empty(self.hashed_address)? { + if hashed_storage_cursor.is_storage_empty()? { return Ok(( EMPTY_ROOT_HASH, 0, @@ -500,8 +501,7 @@ where let mut hash_builder = HashBuilder::default().with_updates(retain_updates); - let mut storage_node_iter = - StorageNodeIter::new(walker, hashed_storage_cursor, self.hashed_address); + let mut storage_node_iter = StorageNodeIter::new(walker, hashed_storage_cursor); while let Some(node) = storage_node_iter.try_next()? { match node { StorageNode::Branch(node) => { diff --git a/deny.toml b/deny.toml index 38994d19745..99b2c8d4f97 100644 --- a/deny.toml +++ b/deny.toml @@ -38,6 +38,7 @@ allow = [ "Apache-2.0 WITH LLVM-exception", "BSD-2-Clause", "BSD-3-Clause", + "0BSD", "ISC", "Unicode-DFS-2016", "Unlicense", diff --git a/docs/repo/ci.md b/docs/repo/ci.md index a1102b4a776..18356ddb732 100644 --- a/docs/repo/ci.md +++ b/docs/repo/ci.md @@ -7,7 +7,6 @@ The CI runs a couple of workflows: - **[ci]**: A catch-all for small jobs. Currently only runs lints (rustfmt, clippy etc.) - **[unit]**: Runs unit tests (tests in `src/`) and doc tests - **[integration]**: Runs integration tests (tests in `tests/` and sync tests) -- **[fuzz]**: Runs fuzz tests - **[bench]**: Runs benchmarks ### Docs @@ -23,10 +22,8 @@ The CI runs a couple of workflows: [ci]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/ci.yml [unit]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/unit.yml [integration]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/integration.yml -[fuzz]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/fuzz.yml [bench]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/bench.yml [book]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/book.yml [deny]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/deny.yml [sanity]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/sanity.yml [release]: https://github.com/paradigmxyz/reth/blob/main/.github/workflows/release.yml -[gh-projects]: https://docs.github.com/en/issues/planning-and-tracking-with-projects/automating-your-project/automating-projects-using-actions \ No newline at end of file diff --git a/examples/node-event-hooks/src/main.rs b/examples/node-event-hooks/src/main.rs index b9cd53298b4..e8a751840e0 100644 --- a/examples/node-event-hooks/src/main.rs +++ b/examples/node-event-hooks/src/main.rs @@ -8,12 +8,8 @@ //! ``` //! //! This launch the regular reth node and also print: -//! -//! > "All components initialized" -//! once all components have been initialized and -//! -//! > "Node started" -//! once the node has been started. +//! > "All components initialized" – once all components have been initialized +//! > "Node started" – once the node has been started. use reth::cli::Cli; use reth_node_ethereum::EthereumNode;