diff --git a/Cargo.lock b/Cargo.lock index 233b3901ebf..cfb02855966 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -18,7 +18,7 @@ version = "0.3.5" dependencies = [ "account_utils", "bls", - "clap", + "clap 2.34.0", "clap_utils", "directory", "environment", @@ -117,6 +117,21 @@ dependencies = [ "subtle", ] +[[package]] +name = "afl" +version = "0.13.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75fcbb256454d6df540c49e0cb355f86767fdccf4f82021fbd84c260b1ae7920" +dependencies = [ + "clap 4.3.21", + "fs_extra", + "home", + "libc", + "rustc_version 0.4.0", + "tempfile", + "xdg", +] + [[package]] name = "ahash" version = "0.7.6" @@ -183,6 +198,55 @@ dependencies = [ "winapi", ] +[[package]] +name = "anstream" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ca84f3628370c59db74ee214b3263d58f9aadd9b4fe7e711fd87dc452b7f163" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is-terminal", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a30da5c5f2d5e72842e00bcb57657162cdabef0931f40e2deb9b4140440cecd" + +[[package]] +name = "anstyle-parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "938874ff5980b03a87c5524b3ae5b59cf99b1d6bc836848df7bc5ada9643c333" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "anstyle-wincon" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c677ab05e09154296dd37acecd46420c17b9713e8366facafa8fc0885167cf4c" +dependencies = [ + "anstyle", + "windows-sys", +] + [[package]] name = "anvil-rpc" version = "0.1.0" @@ -269,6 +333,17 @@ dependencies = [ "event-listener", ] +[[package]] +name = "async-recursion" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.28", +] + [[package]] name = "async-stream" version = "0.3.5" @@ -498,6 +573,7 @@ dependencies = [ name = "beacon_chain" version = "0.2.0" dependencies = [ + "arbitrary", "bitvec 0.20.4", "bls", "derivative", @@ -553,12 +629,37 @@ dependencies = [ "unused_port", ] +[[package]] +name = "beacon_chain_fuzz" +version = "0.0.0" +dependencies = [ + "afl", + "arbitrary", + "async-recursion", + "beacon_chain", + "bls", + "fork_choice", + "lazy_static", + "logging", + "parking_lot 0.12.1", + "rand 0.8.5", + "slog", + "sloggers", + "state_processing", + "store", + "strum", + "tempfile", + "tokio", + "tree_hash", + "types", +] + [[package]] name = "beacon_node" version = "4.3.0" dependencies = [ "beacon_chain", - "clap", + "clap 2.34.0", "clap_utils", "client", "directory", @@ -759,7 +860,7 @@ name = "boot_node" version = "4.3.0" dependencies = [ "beacon_node", - "clap", + "clap 2.34.0", "clap_utils", "eth2_network_config", "ethereum_ssz", @@ -1015,11 +1116,39 @@ dependencies = [ "vec_map", ] +[[package]] +name = "clap" +version = "4.3.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c27cdf28c0f604ba3f512b0c9a409f8de8513e4816705deb0498b627e7c3a3fd" +dependencies = [ + "clap_builder", +] + +[[package]] +name = "clap_builder" +version = "4.3.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08a9f1ab5e9f01a9b81f202e8562eb9a10de70abf9eaeac1be465c28b75aa4aa" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "once_cell", + "strsim 0.10.0", +] + +[[package]] +name = "clap_lex" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2da6da31387c7e4ef160ffab6d5e7f00c42626fe39aea70a7b0f1773f7dd6c1b" + [[package]] name = "clap_utils" version = "0.1.0" dependencies = [ - "clap", + "clap 2.34.0", "dirs", "eth2_network_config", "ethereum-types 0.14.1", @@ -1083,6 +1212,12 @@ dependencies = [ "cc", ] +[[package]] +name = "colorchoice" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" + [[package]] name = "compare_fields" version = "0.2.0" @@ -1170,7 +1305,7 @@ checksum = "b01d6de93b2b6c65e17c634a26653a29d107b3c98c607c765bf38d041531cd8f" dependencies = [ "atty", "cast", - "clap", + "clap 2.34.0", "criterion-plot", "csv", "itertools", @@ -1456,7 +1591,7 @@ version = "0.1.0" dependencies = [ "beacon_chain", "beacon_node", - "clap", + "clap 2.34.0", "clap_utils", "environment", "logging", @@ -1631,7 +1766,7 @@ dependencies = [ name = "directory" version = "0.1.0" dependencies = [ - "clap", + "clap 2.34.0", "clap_utils", "eth2_network_config", ] @@ -2741,6 +2876,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "fs_extra" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + [[package]] name = "funty" version = "1.1.0" @@ -3230,6 +3371,15 @@ dependencies = [ "hmac 0.8.1", ] +[[package]] +name = "home" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" +dependencies = [ + "windows-sys", +] + [[package]] name = "hostname" version = "0.3.1" @@ -3655,6 +3805,17 @@ version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28b29a3cd74f0f4598934efe3aeba42bae0eb4680554128851ebbecb02af14e6" +[[package]] +name = "is-terminal" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb0889898416213fab133e1d33a0e5858a48177452750691bde3666d0fdbaf8b" +dependencies = [ + "hermit-abi 0.3.2", + "rustix 0.38.4", + "windows-sys", +] + [[package]] name = "itertools" version = "0.10.5" @@ -3792,7 +3953,7 @@ dependencies = [ "account_utils", "beacon_chain", "bls", - "clap", + "clap 2.34.0", "clap_utils", "deposit_contract", "directory", @@ -4326,7 +4487,7 @@ dependencies = [ "beacon_processor", "bls", "boot_node", - "clap", + "clap 2.34.0", "clap_utils", "database_manager", "directory", @@ -7039,7 +7200,7 @@ dependencies = [ name = "simulator" version = "0.2.0" dependencies = [ - "clap", + "clap 2.34.0", "env_logger 0.9.3", "eth1", "eth1_test_rig", @@ -8548,6 +8709,12 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" +[[package]] +name = "utf8parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" + [[package]] name = "uuid" version = "0.8.2" @@ -8565,7 +8732,7 @@ dependencies = [ "account_utils", "bincode", "bls", - "clap", + "clap 2.34.0", "clap_utils", "deposit_contract", "directory", @@ -8638,7 +8805,7 @@ version = "0.1.0" dependencies = [ "account_utils", "bls", - "clap", + "clap 2.34.0", "clap_utils", "environment", "eth2", @@ -8881,7 +9048,7 @@ dependencies = [ "beacon_node", "bls", "byteorder", - "clap", + "clap 2.34.0", "diesel", "diesel_migrations", "env_logger 0.9.3", @@ -9244,6 +9411,12 @@ dependencies = [ "zeroize", ] +[[package]] +name = "xdg" +version = "2.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "213b7324336b53d2414b2db8537e56544d981803139155afa84f76eeebb7a546" + [[package]] name = "xml-rs" version = "0.8.16" diff --git a/Cargo.toml b/Cargo.toml index 15906a03065..da08d809ce0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,6 +4,7 @@ members = [ "beacon_node", "beacon_node/beacon_chain", + "beacon_node/beacon_chain/fuzz", "beacon_node/beacon_processor", "beacon_node/builder_client", "beacon_node/client", diff --git a/Makefile b/Makefile index b833686e1b5..0e8029be8bb 100644 --- a/Makefile +++ b/Makefile @@ -12,7 +12,6 @@ AARCH64_TAG = "aarch64-unknown-linux-gnu" BUILD_PATH_AARCH64 = "target/$(AARCH64_TAG)/release" PINNED_NIGHTLY ?= nightly -CLIPPY_PINNED_NIGHTLY=nightly-2022-05-19 # List of features to use when building natively. Can be overriden via the environment. # No jemalloc on Windows @@ -121,6 +120,11 @@ cargo-fmt: check-benches: cargo check --workspace --benches +# Type-checks the Hydra code. +check-hydra: + cargo +$(PINNED_NIGHTLY) check -p beacon_chain_fuzz --features afl + cargo +$(PINNED_NIGHTLY) check -p beacon_chain_fuzz --features repro + # Runs only the ef-test vectors. run-ef-tests: rm -rf $(EF_TESTS)/.accessed_file_log.txt @@ -184,13 +188,6 @@ lint: lint-fix: EXTRA_CLIPPY_OPTS="--fix --allow-staged --allow-dirty" $(MAKE) lint -nightly-lint: - cp .github/custom/clippy.toml . - cargo +$(CLIPPY_PINNED_NIGHTLY) clippy --workspace --tests --release -- \ - -A clippy::all \ - -D clippy::disallowed_from_async - rm clippy.toml - # Runs the makefile in the `ef_tests` repo. # # May download and extract an archive of test vectors from the ethereum diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index b537327fb31..a0bc12ebfa9 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -10,6 +10,7 @@ default = ["participation_metrics"] write_ssz_files = [] # Writes debugging .ssz files to /tmp during block processing. participation_metrics = [] # Exposes validator participation metrics to Prometheus. fork_from_env = [] # Initialise the harness chain spec from the FORK_NAME env variable +hydra = ["rand/small_rng", "arbitrary"] # enable Hydra testing framework (DO NOT ENABLE THIS EXCEPT FOR TESTING) [dev-dependencies] maplit = "1.0.2" @@ -67,6 +68,7 @@ hex = "0.4.2" exit-future = "0.2.0" unused_port = {path = "../../common/unused_port"} oneshot_broadcast = { path = "../../common/oneshot_broadcast" } +arbitrary = { version = "1.1.3", optional = true } [[test]] name = "beacon_chain_tests" diff --git a/beacon_node/beacon_chain/fuzz/.gitignore b/beacon_node/beacon_chain/fuzz/.gitignore new file mode 100644 index 00000000000..57b6d69173d --- /dev/null +++ b/beacon_node/beacon_chain/fuzz/.gitignore @@ -0,0 +1,5 @@ +target +corpus +artifacts +data +Cargo.lock diff --git a/beacon_node/beacon_chain/fuzz/Cargo.toml b/beacon_node/beacon_chain/fuzz/Cargo.toml new file mode 100644 index 00000000000..4c70d5b4f4e --- /dev/null +++ b/beacon_node/beacon_chain/fuzz/Cargo.toml @@ -0,0 +1,54 @@ +[package] +name = "beacon_chain_fuzz" +version = "0.0.0" +publish = false +edition = "2021" + +[features] +default = ["minimal"] +minimal = [] +mainnet = [] +afl = ["dep:afl"] # this feature enables the fuzzer +repro = [] # this feature enables the test-case reproducer + +[package.metadata] +cargo-fuzz = true + +[dependencies] +arbitrary = { version = "1.1.3", features = ["derive"] } +store = { path = "../../store" } +lazy_static = "1.4.0" +slog = { version = "2.5.2", features = ["max_level_trace"] } +sloggers = { version = "2.1.1", features = ["json"] } +state_processing = { path = "../../../consensus/state_processing" } +tree_hash = "0.5.0" +types = { path = "../../../consensus/types" } +rand = "0.8.5" +tempfile = "3.1.0" +fork_choice = { path = "../../../consensus/fork_choice" } +logging = { path = "../../../common/logging" } +tokio = { version = "1", features = ["rt-multi-thread"] } +bls = { path = "../../../crypto/bls" } +strum = "0.24.1" +async-recursion = "1.0.0" +parking_lot = "0.12.1" + +afl = { version = "*", optional = true } + +[dependencies.beacon_chain] +path = ".." +features = ["hydra"] + +[[bin]] +name = "fuzzer" +path = "fuzz_targets/run.rs" +test = false +doc = false +required-features = ["afl"] + +[[bin]] +name = "repro" +path = "fuzz_targets/repro.rs" +test = false +doc = false +required-features = ["repro"] diff --git a/beacon_node/beacon_chain/fuzz/README.md b/beacon_node/beacon_chain/fuzz/README.md new file mode 100644 index 00000000000..b7d8ecb3f9b --- /dev/null +++ b/beacon_node/beacon_chain/fuzz/README.md @@ -0,0 +1,183 @@ +Hydra Fuzzer +============ + +The Hydra fuzzer is a fuzzer that targets fork choice, block production and other consensus-adjacent +components that are external to the state transition function. It simulates an attacker that controls +a portion of validators and broadcasts blocks with random delays to a set of simulated honest +nodes. More detail on the algorithm can be found in the [Design](#design) section below. + +Hydra is implemented on top of the popular [AFL++][] fuzzer. + +## Dependencies + +You need a nightly Rust compiler: + +``` +rustup toolchain install nightly +``` + +Once you have a nightly compiler, install AFL: + +``` +cargo +nightly install afl +``` + +To run multiple parallel tasks you'll also need GNU screen. On Ubuntu: + +``` +sudo apt install screen +``` + +Hydra has only been tested on Linux and probably won't work on macOS/Windows. + +## Running + +The interface to the Hydra fuzzer is a Python script `hydra.py` with two main subcommands: + +- `run`: start a new fuzzing session +- `repro`: replay a crash or test case + +Both commands take a `--spec {minimal,mainnet}` flag to choose between 8 or 32 slots per epoch. + +To start a fuzzing session using 16 parallel processes for the mainnet spec, first create a +starting corpus: + +``` +cd lighthouse/beacon_node/beacon_chain/fuzz # this directory + +mkdir -p data/in + +echo "lets start fuzzing" > data/in/start +``` + +Next, we need to tweak some kernel parameters so that AFL is happy: + +``` +sudo ./setup.sh +``` + +Then you can start Hydra like so: + +``` +./hydra.py run --spec mainnet --num-workers 16 +``` + +This will create a new `screen` session with 16 windows running the AFL processes. You +can attach to it with `screen -x hydra`. Crashes found will be output to +`data/out/{worker}/crashes`. + +You can check for crashes from all workers using a command like: + +``` +ls data/out/*/crashes/* | grep -v README +``` + +## Environment Variables + +Hydra is configured via environment variables. Some useful ones are: + +- `HYDRA_CONFIG_BASE`: choose the base configuration including the attacker's fraction of the + network. +- `HYDRA_MAX_REORG_LENGTH`: choose the maximum length of re-org to be tolerated during testing. + The default is 8 slots, but should be set lower if you're using the minimal spec. +- `HYDRA_DEBUG_LOGS`: enable/disable the output of Hydra-specific debug logs _on stdout_. +- `HYDRA_LOG_PERSPECTIVE`: enable/disable the output of Lighthouse debug logs _on stderr_, from the + perspective of a single honest node. + +For more information see `src/env.rs` and `src/config.rs`. + +## Reproducing a crash + +Usually when reproducing a crash you'll want both Hydra and Lighthouse logs: + +``` +env HYDRA_DEBUG_LOGS=true HYDRA_LOG_PERSPECTIVE=0 ./hydra.py repro --spec minimal data/out/worker0/crashes/example > stdout.log 2> stderr.log +``` + +## Design + +_This is a description of how Hydra works at a high-level. It may become out of date as the +underlying code changes._ + +Hydra maintains the following state during simulation: + +- `honest_nodes`: a list of beacon chain harnesses for honest nodes. Each node has its own in-memory + database, fork choice and signing keys. +- `attacker`: a beacon chain harness used by the attacker. +- `hydra`: a collection of "viable" head states that the attacker may build a block upon. This is + the key data-structure which gives the fuzzer its name. +- `u`: an instance of [`arbitrary::Unstructured`][unstructured] which the simulator uses to convert + the random bytes from the fuzzer into attacker actions. +- `time`: a representation of the current time with sub-slot accuracy. Time is split into ticks + with a configurable number of ticks per slot (default 3). + +Hydra runs a loop with these key steps: + +- At the start of a slot, all honest nodes propose blocks according to their view of the head. + Honest blocks are broadcast with 0 delay and arrive immediately at all other honest nodes. +- At the attestation deadline tick, honest nodes sign and broadcast attestations according to + their view of the head. Honest attestations are broadcast with 0 delay and arrive immediately + at all other honest nodes. +- At the start of a slot, the attacker updates the Hydra head tracker and then randomly chooses + heads to propose on. They choose based on the input bytes (via `Unstructured`) from the heads that + they are eligible to propose on (selected as proposer). Each attacker proposer index proposes at + most 1 block per slot (they don't commit any slashable offences). For each `(block, node)` the + attacker chooses a random delay for the `block` to arrive at `node`. The spread of delays is + limited by the `max_delay_difference` parameter which represents the honest nodes' ability to + propagate blocks regardless of the attacker's attempt to withhold them. + +The loop logic is contained in `src/runner.rs`. + +The fuzzer is trying to trigger certain bad behaviours on the honest nodes. Currently this +includes: + +- Logs at `ERROR` or `CRIT` level. +- Re-orgs longer than the `max_reorg_length` (`HYDRA_MAX_REORG_LENGTH`). + +It achieves this via a custom logger implementation that snoops on the logs from the honest nodes +(see `src/log_interceptor.rs`). + +The simulation terminates once the attacker runs out of entropy, at which point a few more +iterations are run to finish delivery of in-flight messages. If the attacker runs out of entropy +in the middle of the simulation then an error is returned and the simulation ends early (without +panicking). The fuzzer will usually extend the input in this case to explore more of the search +space, and the effort spent exploring aborted runs is not wasted as the fuzzer is still checking for +errors and re-orgs as it goes. + +### Message Delivery + +Messages from honest nodes are delivered instantly. + +Attacker messages are queued at each node to be dequeued at a given tick. + +If a message is undeliverable because one of its dependent messages hasn't arrived yet (e.g. its +parent block) then it is added to a separate queue of messages to be delivered immediately upon that +message's arrival. This model slightly favours the honest nodes but was found to be more realistic +than a previous approach that just naively requeued messages until they could be processed (the +attacker exploited the naive requeueing to split the network for extended periods during which some +attacker blocks were withheld). + +Most of the message queueing logic is in `src/node.rs`. + +### The Hydra Data Structure + +The `Hydra` structure is a map from block roots to `BeaconState`s. It contains states for all blocks +descended from finalization, including those that are ancestors of other blocks in the set. Each +state is kept advanced to the current epoch so that it can be used to determine proposer shufflings +and used for block proposals on-demand. + +Non-viable blocks/states are pruned from the Hydra based on their finalization information. + +The logic for the `Hydra` is contained in `src/hydra.rs`. + +## Limitations + +The current limitations which may be removed in future versions are: + +- [ ] The attacker only sends valid messages. +- [ ] Neither the attacker nor the honest nodes send aggregate attestations. +- [ ] Signature verification is required (slow). +- [ ] There is no slasher, so the attacker currently cannot attempt anything slashable. + +[AFL++]: https://github.com/AFLplusplus/AFLplusplus +[unstructured]: https://docs.rs/arbitrary/latest/arbitrary/struct.Unstructured.html diff --git a/beacon_node/beacon_chain/fuzz/fuzz_targets/fuzzer.rs b/beacon_node/beacon_chain/fuzz/fuzz_targets/fuzzer.rs new file mode 100644 index 00000000000..194118cd210 --- /dev/null +++ b/beacon_node/beacon_chain/fuzz/fuzz_targets/fuzzer.rs @@ -0,0 +1,69 @@ +use beacon_chain::test_utils::test_spec; +use beacon_chain_fuzz::{Config, LogConfig, LogInterceptor, Runner, TestHarness}; +use tokio::runtime::Runtime; +use types::{ChainSpec, ForkName, Keypair, Uint256}; + +const TEST_FORK: ForkName = ForkName::Capella; + +#[cfg(feature = "afl")] +use afl::fuzz; + +#[cfg(not(feature = "afl"))] +macro_rules! fuzz { + ($e:expr) => { + use std::io::Read; + + let mut stdin = std::io::stdin(); + let mut data = vec![]; + stdin.read_to_end(&mut data).unwrap(); + ($e)(&data) + }; +} + +// Use a `cfg` for the spec to avoid bloating the binary. +#[cfg(all(feature = "minimal", not(feature = "mainnet")))] +type E = types::MinimalEthSpec; +#[cfg(feature = "mainnet")] +type E = types::MainnetEthSpec; + +fn get_harness( + id: String, + log_config: LogConfig, + spec: ChainSpec, + keypairs: &[Keypair], +) -> TestHarness { + let log = LogInterceptor::new(id, log_config).into_logger(); + + // Start from PoS. + let terminal_block_number = 0; + + let harness = TestHarness::builder(E::default()) + .spec(spec) + .logger(log) + .keypairs(keypairs.to_vec()) + .mock_execution_layer_generic(terminal_block_number) + .fresh_ephemeral_store() + .build(); + harness +} + +pub fn main_func() { + fuzz!(|data: &[u8]| { + let config = Config::from_env(); + let rt = Runtime::new().unwrap(); + + let mut spec = TEST_FORK.make_genesis_spec(test_spec::()); + spec.terminal_total_difficulty = Uint256::zero(); + let mut runner = Runner::new(data, config, spec, get_harness); + + match rt.block_on(async move { runner.run().await }) { + Ok(()) => (), + Err(arbitrary::Error::NotEnoughData) => { + println!("aborted run due to lack of entropy"); + } + Err(_) => { + panic!("bad arbitrary usage"); + } + } + }); +} diff --git a/beacon_node/beacon_chain/fuzz/fuzz_targets/repro.rs b/beacon_node/beacon_chain/fuzz/fuzz_targets/repro.rs new file mode 100644 index 00000000000..8927a5cc12c --- /dev/null +++ b/beacon_node/beacon_chain/fuzz/fuzz_targets/repro.rs @@ -0,0 +1,6 @@ +#![cfg(feature = "repro")] + +mod fuzzer; +fn main() { + fuzzer::main_func(); +} diff --git a/beacon_node/beacon_chain/fuzz/fuzz_targets/run.rs b/beacon_node/beacon_chain/fuzz/fuzz_targets/run.rs new file mode 100644 index 00000000000..5ac5ceacfc2 --- /dev/null +++ b/beacon_node/beacon_chain/fuzz/fuzz_targets/run.rs @@ -0,0 +1,6 @@ +#![cfg(feature = "afl")] + +mod fuzzer; +fn main() { + fuzzer::main_func(); +} diff --git a/beacon_node/beacon_chain/fuzz/hydra.py b/beacon_node/beacon_chain/fuzz/hydra.py new file mode 100755 index 00000000000..740e2806c33 --- /dev/null +++ b/beacon_node/beacon_chain/fuzz/hydra.py @@ -0,0 +1,165 @@ +#!/usr/bin/env python3 +import argparse +import subprocess +import sys + +FUZZ_TARGET = "fuzzer" +REPRO_TARGET = "repro" + +def build_fuzz_target(args): + spec = args.spec + subprocess.run( + [ + "cargo", + "+nightly", + "afl", + "build", + "--release", + "--bin", + FUZZ_TARGET, + "--features", + f"afl,{spec}", + ], + stdout=sys.stdout, + stderr=sys.stderr, + check=True, + ) + +def build_repro_target(args): + spec = args.spec + subprocess.run( + [ + "cargo", + "+nightly", + "build", + "--release", + "--bin", + REPRO_TARGET, + "--features", + f"repro,{spec}", + "--features", + "logging/test_logger", + ], + stdout=sys.stdout, + stderr=sys.stderr, + check=True, + ) + +def fuzz_command(i, args): + return [ + "cargo", + "+nightly", + "afl", + "fuzz", + "-i" + "data/in", + "-o", + "data/out", + "-S", + f"worker{i}", + "-t", + str(args.timeout * 1000), + f"../../../target/release/{FUZZ_TARGET}" + ] + +def run(args): + if args.num_workers == 1: + run_single(args) + else: + run_multi(args) + +def run_single(args): + # Build with AFL. + build_fuzz_target(args) + + # Fuzz the compiled binary. + subprocess.run( + fuzz_command(0, args), + stdout=sys.stdout, + stderr=sys.stderr, + ) + +def run_multi(args): + # Build with AFL. + build_fuzz_target(args) + + # Start a screen session. + session = args.session + print(f"starting new screen session named {session}") + subprocess.check_call( + [ + "screen", + "-d", + "-m", + "-S", + session + ], + stdout=sys.stdout, + stderr=sys.stderr, + ) + subprocess.check_call( + ["screen", "-S", session, "-X", "zombie", "qr"], + stdout=sys.stdout, + stderr=sys.stderr, + ) + + for i in range(args.worker_offset, args.worker_offset + args.num_workers): + print(f"starting worker{i}") + subprocess.check_call( + [ + "screen", + "-S", + session, + "-X", + "screen", + *fuzz_command(i, args) + ], + stdout=sys.stdout, + stderr=sys.stderr + ) + + +def repro(args): + build_repro_target(args) + + with open(args.input, "rb") as f: + input_bytes = f.read() + + subprocess.run( + [ + f"../../../target/release/{REPRO_TARGET}" + ], + input=input_bytes, + stdout=sys.stdout, + stderr=sys.stderr + ) + +def parse_args(): + parser = argparse.ArgumentParser(prog="hydra") + subparsers = parser.add_subparsers() + + run_parser = subparsers.add_parser("run", help="Start a fuzzing session with AFL") + run_parser.add_argument("--spec", default="mainnet") + run_parser.add_argument("--num-workers", metavar="N", type=int, default=1) + # FIXME(hydra): plumb through re-org limit + # run_parser.add_argument("--reorg-limit", metavar="N", type=int, default=5) + run_parser.add_argument("--session", metavar="NAME", type=str, default="hydra") + run_parser.add_argument("--worker-offset", metavar="N", type=int, default=0) + run_parser.add_argument("--timeout", metavar="SECONDS", type=int, default=10 * 60) + run_parser.set_defaults(func=run) + + repro_parser = subparsers.add_parser("repro", help="Reproduce a crash with debugging output") + repro_parser.add_argument("--spec", default="mainnet") + repro_parser.add_argument("input", metavar="FILE") + repro_parser.set_defaults(func=repro) + + return parser.parse_args() + +def main(): + args = parse_args() + + # Invoke appropriate subcommand + args.func(args) + +if __name__ == "__main__": + main() diff --git a/beacon_node/beacon_chain/fuzz/setup.sh b/beacon_node/beacon_chain/fuzz/setup.sh new file mode 100755 index 00000000000..7738501563b --- /dev/null +++ b/beacon_node/beacon_chain/fuzz/setup.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +if [ "$EUID" -ne 0 ]; +then + echo "you must be root to run this script" + exit 1 +fi + +echo core > /proc/sys/kernel/core_pattern + +echo performance | tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor + diff --git a/beacon_node/beacon_chain/fuzz/src/config.rs b/beacon_node/beacon_chain/fuzz/src/config.rs new file mode 100644 index 00000000000..c23e68e61e6 --- /dev/null +++ b/beacon_node/beacon_chain/fuzz/src/config.rs @@ -0,0 +1,173 @@ +use crate::LogConfig; +use std::marker::PhantomData; +use std::time::Duration; +use types::{ChainSpec, EthSpec}; + +pub struct Config { + pub num_honest_nodes: usize, + pub total_validators: usize, + pub attacker_validators: usize, + pub ticks_per_slot: usize, + pub min_attacker_proposers_per_slot: usize, + pub max_attacker_proposers_per_slot: usize, + /// Maximum delay in ticks before each attacker message must reach at least one honest node. + /// + /// For example if this is set to 5, then all attacker messages must be broadcast to at least + /// one honest node 5 ticks after they were created. They may be broadcast sooner. + /// + /// Together with `max_delay_difference`, this parameter sets the ranges on message delays: + /// + /// - `first_node_delay` in `0..=max_first_node_delay` + /// - `node_delay` in `first_node_delay..=first_node_delay + max_delay_difference` + pub max_first_node_delay: usize, + /// Maxmimum delay in ticks between an attacker message reaching its first honest node and its + /// last. + /// + /// This is meant to simulate network gossip amongst honest nodes, an attacker can't keep a + /// message secret if the honest nodes gossip it amongst themselves. + pub max_delay_difference: usize, + /// Maximum length of re-org that will be tolerated. + pub max_reorg_length: usize, + /// Number of slots to run the chain for with only honest nodes, before the attacker becomes + /// active. + pub num_warmup_slots: usize, + pub debug: DebugConfig, + pub _phantom: PhantomData, +} + +#[derive(Clone)] +pub struct DebugConfig { + /// Log the number of hydra heads. + pub num_hydra_heads: bool, + /// Log each block proposal as it occurs. + pub block_proposals: bool, + /// Log the attempted delivery of each block at each honest node. + pub block_delivery: bool, + /// Log each proposal by the attacker. + pub attacker_proposals: bool, + /// Print debug logs to stderr from the perspective of this honest node. + pub log_perspective: Option, +} + +impl Default for Config { + fn default() -> Self { + Config::with_15pc_attacker() + } +} + +#[allow(clippy::derivable_impls)] +impl Default for DebugConfig { + fn default() -> Self { + DebugConfig { + num_hydra_heads: false, + block_proposals: false, + block_delivery: false, + attacker_proposals: false, + log_perspective: None, + } + } +} + +impl Config { + pub fn with_10pc_attacker() -> Self { + let ticks_per_slot = 3; + let slots_per_epoch = E::slots_per_epoch() as usize; + Config { + num_honest_nodes: 3, + total_validators: 60, + attacker_validators: 6, + ticks_per_slot, + min_attacker_proposers_per_slot: 0, + max_attacker_proposers_per_slot: 4, + max_first_node_delay: 2 * slots_per_epoch * ticks_per_slot, + max_delay_difference: ticks_per_slot, + max_reorg_length: 8, + num_warmup_slots: 4 * slots_per_epoch, + debug: DebugConfig::default(), + _phantom: PhantomData, + } + } + + pub fn with_15pc_attacker() -> Self { + Config { + num_honest_nodes: 3, + attacker_validators: 9, + ..Config::with_10pc_attacker() + } + } + + pub fn with_33pc_attacker() -> Self { + Config { + num_honest_nodes: 4, + attacker_validators: 20, + ..Config::with_10pc_attacker() + } + } + + pub fn with_50pc_attacker() -> Self { + Config { + num_honest_nodes: 3, + attacker_validators: 30, + ..Config::with_10pc_attacker() + } + } + + pub fn is_valid(&self) -> bool { + self.ticks_per_slot % 3 == 0 + && self.honest_validators() % self.num_honest_nodes == 0 + && self.max_attacker_proposers_per_slot >= self.min_attacker_proposers_per_slot + && self + .debug + .log_perspective + .map_or(true, |i| i < self.num_honest_nodes) + } + + pub fn log_config(&self, node_index: usize) -> LogConfig { + LogConfig { + max_reorg_length: Some(self.max_reorg_length), + forward_logs: self.debug.log_perspective == Some(node_index), + ..LogConfig::default() + } + } + + pub fn attacker_log_config(&self) -> LogConfig { + // Allow the attacker to re-org themself. They'll process their local blocks before the rest + // of the network with proposer boost. + LogConfig { + max_reorg_length: None, + ..LogConfig::default() + } + } + + pub fn honest_validators(&self) -> usize { + self.total_validators - self.attacker_validators + } + + pub fn honest_validators_per_node(&self) -> usize { + self.honest_validators() / self.num_honest_nodes + } + + pub fn attestation_tick(&self) -> usize { + self.ticks_per_slot / 3 + } + + pub fn is_block_proposal_tick(&self, tick: usize) -> bool { + tick % self.ticks_per_slot == 0 && tick != 0 + } + + pub fn is_attestation_tick(&self, tick: usize) -> bool { + tick % self.ticks_per_slot == self.attestation_tick() + } + + pub fn min_attacker_proposers(&self, available: usize) -> Option { + Some(std::cmp::min(self.min_attacker_proposers_per_slot, available) as u32) + } + + pub fn max_attacker_proposers(&self, available: usize) -> Option { + Some(std::cmp::min(self.max_attacker_proposers_per_slot, available) as u32) + } + + pub fn tick_duration(&self, spec: &ChainSpec) -> Duration { + Duration::from_secs(spec.seconds_per_slot) / self.ticks_per_slot as u32 + } +} diff --git a/beacon_node/beacon_chain/fuzz/src/env.rs b/beacon_node/beacon_chain/fuzz/src/env.rs new file mode 100644 index 00000000000..994b379bd94 --- /dev/null +++ b/beacon_node/beacon_chain/fuzz/src/env.rs @@ -0,0 +1,75 @@ +use crate::Config; +use std::env::VarError; +use std::str::FromStr; +use strum::EnumString; +use types::EthSpec; + +const CONFIG_BASE: &str = "HYDRA_CONFIG_BASE"; +const MAX_REORG_LENGTH: &str = "HYDRA_MAX_REORG_LENGTH"; +const DEBUG_LOGS: &str = "HYDRA_DEBUG_LOGS"; +const LOG_PERSPECTIVE: &str = "HYDRA_LOG_PERSPECTIVE"; +const WARMUP_EPOCHS: &str = "HYDRA_WARMUP_EPOCHS"; + +#[derive(EnumString)] +pub enum BaseConfig { + #[strum(serialize = "10pc")] + Attacker10Percent, + #[strum(serialize = "15pc")] + Attacker15Percent, + #[strum(serialize = "33pc")] + Attacker33Percent, + #[strum(serialize = "50pc")] + Attacker50Percent, +} + +fn env(var_name: &str) -> Option +where + T: FromStr, + T::Err: std::fmt::Debug, +{ + std::env::var(var_name) + .map(Some) + .or_else(|e| match e { + VarError::NotPresent => Ok(None), + _ => Err(e), + }) + .unwrap() + .map(|value| { + value + .parse() + .unwrap_or_else(|e| panic!("invalid value for {var_name}: {e:?}")) + }) +} + +impl Config { + pub fn from_env() -> Self { + let mut config = match env(CONFIG_BASE) { + Some(BaseConfig::Attacker10Percent) => Config::with_10pc_attacker(), + Some(BaseConfig::Attacker15Percent) => Config::with_15pc_attacker(), + Some(BaseConfig::Attacker33Percent) => Config::with_33pc_attacker(), + Some(BaseConfig::Attacker50Percent) => Config::with_50pc_attacker(), + None => Config::default(), + }; + + if let Some(max_reorg_length) = env(MAX_REORG_LENGTH) { + config.max_reorg_length = max_reorg_length; + } + + if let Some(debug) = env(DEBUG_LOGS) { + config.debug.num_hydra_heads = debug; + config.debug.block_proposals = debug; + config.debug.block_delivery = debug; + config.debug.attacker_proposals = debug; + } + + if let Some(log_perspective) = env(LOG_PERSPECTIVE) { + config.debug.log_perspective = Some(log_perspective); + } + + if let Some(warmup_epochs) = env::(WARMUP_EPOCHS) { + config.num_warmup_slots = warmup_epochs * E::slots_per_epoch() as usize; + } + + config + } +} diff --git a/beacon_node/beacon_chain/fuzz/src/lib.rs b/beacon_node/beacon_chain/fuzz/src/lib.rs new file mode 100644 index 00000000000..fa6aa9b0d27 --- /dev/null +++ b/beacon_node/beacon_chain/fuzz/src/lib.rs @@ -0,0 +1,22 @@ +// Prevent compilation as part of the rest of Lighthouse. +#![cfg(any(feature = "afl", feature = "repro"))] + +pub mod config; +pub mod env; +pub mod log_interceptor; +pub mod message_queue; +pub mod node; +pub mod runner; +pub mod slashing_protection; + +pub use beacon_chain::hydra::{Hydra, HydraChoose}; +pub use config::Config; +pub use log_interceptor::{LogConfig, LogInterceptor}; +pub use message_queue::Message; +pub use node::Node; +pub use runner::Runner; +pub use slashing_protection::SlashingProtection; + +use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; + +pub type TestHarness = BeaconChainHarness>; diff --git a/beacon_node/beacon_chain/fuzz/src/log_interceptor.rs b/beacon_node/beacon_chain/fuzz/src/log_interceptor.rs new file mode 100644 index 00000000000..bd31d01bc2a --- /dev/null +++ b/beacon_node/beacon_chain/fuzz/src/log_interceptor.rs @@ -0,0 +1,126 @@ +use logging::test_logger; +use slog::{o, Drain, Key, Level, Logger, OwnedKVList, Record, Serializer, KV}; +use std::fmt::Arguments; + +pub struct LogInterceptor { + /// Unique identifier for this logger (e.g. the node name). + id: String, + /// Logging configuration. + conf: LogConfig, + /// Underlying logger to output logs to. + underlying: Logger, +} + +pub struct LogConfig { + /// Log level at which to panic. + pub panic_threshold: Option, + /// Maximum re-org distance allowed (values greater will cause panics). + pub max_reorg_length: Option, + /// Forward logs to the underlying logger. + pub forward_logs: bool, +} + +impl Default for LogConfig { + fn default() -> Self { + Self { + panic_threshold: Some(Level::Error), + max_reorg_length: Some(1), + forward_logs: false, + } + } +} + +impl LogInterceptor { + pub fn new(id: String, conf: LogConfig) -> Self { + Self { + id, + conf, + underlying: test_logger(), + } + } + + pub fn into_logger(self) -> Logger { + Logger::root(self.ignore_res(), o!()) + } +} + +impl Drain for LogInterceptor { + type Ok = (); + type Err = (); + + fn log(&self, record: &Record, _: &OwnedKVList) -> Result<(), ()> { + if self.conf.forward_logs { + self.underlying.log(record); + } + + // Check for messages above the threshold. + if let Some(panic_threshold) = self.conf.panic_threshold { + if record.level().is_at_least(panic_threshold) { + panic!( + "{} logged a message above the panic threshold: {} {}, from {}:{}", + self.id, + record.level().as_short_str(), + record.msg(), + record.file(), + record.line(), + ); + } + } + + // Check for re-orgs longer than the re-org limit. + if let (Some(reorg_limit), Level::Warning) = (self.conf.max_reorg_length, record.level()) { + let message = format!("{}", record.msg()); + if message == "Beacon chain re-org" { + let mut snooper = ReorgSnooper::default(); + record.kv().serialize(record, &mut snooper).unwrap(); + + let (prev_head, new_head, distance) = snooper.unwrap(); + + if distance > reorg_limit { + panic!( + "{} experienced a re-org of length {} (> {}) from {} to {}", + self.id, distance, reorg_limit, prev_head, new_head + ); + } + } + } + + Ok(()) + } +} + +/// Serializer to snoop on a logged usize value. +#[derive(Default)] +pub struct ReorgSnooper { + previous_head: Option, + new_head: Option, + reorg_distance: Option, +} + +impl ReorgSnooper { + fn unwrap(self) -> (String, String, usize) { + ( + self.previous_head.unwrap(), + self.new_head.unwrap(), + self.reorg_distance.unwrap(), + ) + } +} + +impl Serializer for ReorgSnooper { + fn emit_arguments(&mut self, key: Key, args: &Arguments) -> slog::Result { + if key == "previous_head" { + self.previous_head = Some(args.to_string()); + } else if key == "new_head" { + self.new_head = Some(args.to_string()); + } + Ok(()) + } + + fn emit_usize(&mut self, key: Key, value: usize) -> slog::Result { + if key == "reorg_distance" { + self.reorg_distance = Some(value); + } + Ok(()) + } +} diff --git a/beacon_node/beacon_chain/fuzz/src/message_queue.rs b/beacon_node/beacon_chain/fuzz/src/message_queue.rs new file mode 100644 index 00000000000..191e1d14ab2 --- /dev/null +++ b/beacon_node/beacon_chain/fuzz/src/message_queue.rs @@ -0,0 +1,30 @@ +use types::{Attestation, EthSpec, Hash256, SignedBeaconBlock}; + +#[derive(Debug, Clone)] +#[must_use] +#[allow(clippy::large_enum_variant)] +pub enum Message { + Attestation(Attestation), + Block(SignedBeaconBlock), +} + +impl Message { + pub fn block_root(&self) -> Hash256 { + match self { + Self::Attestation(att) => att.data.beacon_block_root, + Self::Block(block) => block.canonical_root(), + } + } + + /// The root of a block which must be processed before this message can be processed. + pub fn dependent_block_root(&self) -> Hash256 { + match self { + Self::Attestation(att) => att.data.beacon_block_root, + Self::Block(block) => block.parent_root(), + } + } + + pub fn is_block(&self) -> bool { + matches!(self, Message::Block(_)) + } +} diff --git a/beacon_node/beacon_chain/fuzz/src/node.rs b/beacon_node/beacon_chain/fuzz/src/node.rs new file mode 100644 index 00000000000..e5d69484c03 --- /dev/null +++ b/beacon_node/beacon_chain/fuzz/src/node.rs @@ -0,0 +1,192 @@ +use crate::{config::DebugConfig, Message, SlashingProtection, TestHarness}; +use async_recursion::async_recursion; +use beacon_chain::{AttestationError, BeaconChainError, BlockError, ForkChoiceError}; +use fork_choice::InvalidAttestation; +use std::borrow::Cow; +use std::collections::HashMap; +use std::collections::VecDeque; +use types::{Attestation, EthSpec, Hash256, Slot}; + +pub struct Node { + pub id: String, + pub harness: TestHarness, + /// Queue of ordered `(tick, message)` pairs. + /// + /// Each `message` will be delivered to the node at `tick`. + pub message_queue: VecDeque<(usize, Message)>, + /// Messages that are dependent on others, to be processed immediately once their dependent + /// block is processed. + pub dependent_messages: HashMap>>, + /// Validator indices assigned to this node. + pub validators: Vec, + /// Slashing protection for validators assigned to this node. + pub slashing_protection: SlashingProtection, + pub debug_config: DebugConfig, +} + +impl Node { + pub fn queue_message(&mut self, message: Message, arrive_tick: usize) { + let insert_at = self + .message_queue + .partition_point(|&(tick, _)| tick <= arrive_tick); + self.message_queue.insert(insert_at, (arrive_tick, message)); + } + + pub fn has_messages_queued(&self) -> bool { + !self.message_queue.is_empty() + } + + /// Deliver the message, or cache it in the dependent messages for later processing. + #[async_recursion] + pub async fn deliver_message(&mut self, message: Message) { + let unblocking_block_root = message.is_block().then(|| message.block_root()); + + if let Some(undelivered) = self.try_deliver_message(message).await { + if undelivered.is_block() && self.debug_config.block_delivery { + println!( + "{}: queueing block {:?} in dependent messages", + self.id, + undelivered.block_root() + ); + } + self.dependent_messages + .entry(undelivered.dependent_block_root()) + .or_insert_with(Vec::new) + .push(undelivered); + } else if let Some(unblocking_root) = unblocking_block_root { + if self.debug_config.block_delivery { + println!("{}: processed block {:?}", self.id, unblocking_root); + } + + // Block was delivered successfully: process all messages dependent on this block. + if let Some(messages) = self.dependent_messages.remove(&unblocking_root) { + for message in messages { + self.deliver_message(message).await; + } + } + } + } + + /// Attempt to deliver the message, returning it if is unable to be processed right now. + /// + /// Undelivered messages should be requeued to simulate the node queueing them outside the + /// `BeaconChain` module, or fetching them via network RPC. + async fn try_deliver_message(&self, message: Message) -> Option> { + match message { + Message::Attestation(att) => match self + .harness + .process_unaggregated_attestation(att.clone()) + { + Ok(()) => None, + // Re-queue attestations for which the head block is not yet known. + Err(AttestationError::UnknownHeadBlock { .. }) => Some(Message::Attestation(att)), + // Ignore attestations from past slots. + Err(AttestationError::PastSlot { .. }) => None, + // Workaround for a minor bug: https://github.com/sigp/lighthouse/issues/4633 + Err(AttestationError::BeaconChainError(BeaconChainError::ForkChoiceError( + ForkChoiceError::InvalidAttestation(InvalidAttestation::PastEpoch { .. }), + ))) => None, + Err(e) => panic!("unable to deliver attestation to node {}: {e:?}", self.id), + }, + Message::Block(block) => { + match self.harness.process_block_result(block).await { + Ok(_) => None, + // Re-queue blocks that arrive out of order. + Err(BlockError::ParentUnknown(block)) => Some(Message::Block((*block).clone())), + // If a block arrives after the node has already finalized a conflicting block + // then it is useless and doesn't need to be reprocessed. + Err( + BlockError::WouldRevertFinalizedSlot { .. } + | BlockError::NotFinalizedDescendant { .. }, + ) => None, + Err(e) => panic!("unable to process block: {e:?}"), + } + } + } + } + + pub async fn deliver_queued_at(&mut self, tick: usize) { + loop { + match self.message_queue.front() { + Some((message_tick, _)) if *message_tick <= tick => { + let (_, message) = self.message_queue.pop_front().unwrap(); + self.deliver_message(message).await; + } + _ => break, + } + } + } + + pub fn prune_dependent_messages(&mut self, block_is_viable: impl Fn(Hash256) -> bool) { + self.dependent_messages + .retain(|block_root, _| block_is_viable(*block_root)); + } + + /// Produce unaggregated attestations for all (honest) validators managed by this node. + /// + /// Do not produce attestations if they are slashable. + pub fn get_honest_unaggregated_attestations( + &mut self, + attestation_slot: Slot, + ) -> Vec> { + let head = self.harness.chain.canonical_head.cached_head(); + let state = &head.snapshot.beacon_state; + let head_block_root = head.head_block_root(); + let state_root = head.head_state_root(); + let fork = self + .harness + .chain + .spec + .fork_at_epoch(attestation_slot.epoch(E::slots_per_epoch())); + + let mut attestations = vec![]; + for bc in state + .get_beacon_committees_at_slot(attestation_slot) + .unwrap() + { + for (committee_position, &validator_index) in bc.committee.iter().enumerate() { + if !self.validators.contains(&validator_index) { + continue; + } + + let mut attestation = self + .harness + .produce_unaggregated_attestation_for_block( + attestation_slot, + bc.index, + head_block_root.into(), + Cow::Borrowed(state), + state_root, + ) + .unwrap(); + + // Check slashability. + if !self.slashing_protection.can_attest( + validator_index, + attestation.data.source.epoch, + attestation.data.target.epoch, + ) { + continue; + } + self.slashing_protection.record_attestation( + validator_index, + attestation.data.source.epoch, + attestation.data.target.epoch, + ); + + attestation + .sign( + &self.harness.validator_keypairs[validator_index].sk, + committee_position, + &fork, + self.harness.chain.genesis_validators_root, + &self.harness.spec, + ) + .unwrap(); + + attestations.push(attestation); + } + } + attestations + } +} diff --git a/beacon_node/beacon_chain/fuzz/src/runner.rs b/beacon_node/beacon_chain/fuzz/src/runner.rs new file mode 100644 index 00000000000..4e62477279d --- /dev/null +++ b/beacon_node/beacon_chain/fuzz/src/runner.rs @@ -0,0 +1,458 @@ +use crate::{Config, Hydra, LogConfig, Message, Node, SlashingProtection, TestHarness}; +use arbitrary::Unstructured; +use beacon_chain::{ + beacon_proposer_cache::compute_proposer_duties_from_head, + slot_clock::SlotClock, + test_utils::{EphemeralHarnessType, RelativeSyncCommittee}, +}; +use parking_lot::RwLock; +use std::collections::HashMap; +use std::collections::VecDeque; +use std::ops::ControlFlow; +use std::sync::Arc; +use std::time::Duration; +use types::{test_utils::generate_deterministic_keypairs, *}; + +pub struct Runner<'a, E: EthSpec> { + conf: Config, + honest_nodes: Vec>, + attacker: Node, + hydra: Hydra, Arc>>>, + u: Arc>>, + time: CurrentTime, + all_blocks: Vec<(Hash256, Slot)>, + spec: ChainSpec, +} + +pub struct CurrentTime { + tick: usize, + current_time: Duration, + tick_duration: Duration, +} + +impl CurrentTime { + fn increment(&mut self) { + self.tick += 1; + self.current_time += self.tick_duration; + } +} + +impl<'a, E: EthSpec> Runner<'a, E> { + pub fn new( + data: &'a [u8], + conf: Config, + spec: ChainSpec, + get_harness: impl for<'b> Fn(String, LogConfig, ChainSpec, &'b [Keypair]) -> TestHarness, + ) -> Self { + assert!(conf.is_valid()); + + let u = Arc::new(RwLock::new(Unstructured::new(data))); + + let keypairs = generate_deterministic_keypairs(conf.total_validators); + + // Create honest nodes. + let validators_per_node = conf.honest_validators_per_node(); + let honest_nodes = (0..conf.num_honest_nodes) + .map(|i| { + let id = format!("node_{i}"); + let log_config = conf.log_config(i); + let harness = get_harness(id.clone(), log_config, spec.clone(), &keypairs); + let validators = (i * validators_per_node..(i + 1) * validators_per_node).collect(); + Node { + id, + harness, + message_queue: VecDeque::new(), + dependent_messages: HashMap::default(), + validators, + slashing_protection: SlashingProtection::default(), + debug_config: conf.debug.clone(), + } + }) + .collect::>(); + + // Set up attacker values. + let attacker_id = "attacker".to_string(); + let attacker = Node { + id: attacker_id.clone(), + harness: get_harness( + attacker_id, + conf.attacker_log_config(), + spec.clone(), + &keypairs, + ), + message_queue: VecDeque::new(), + dependent_messages: HashMap::default(), + validators: (conf.honest_validators()..conf.total_validators).collect(), + slashing_protection: SlashingProtection::default(), + debug_config: conf.debug.clone(), + }; + + // This should not really be necessary. + // See: https://github.com/sigp/lighthouse/issues/4631 + attacker.harness.chain.head_tracker.register_block( + attacker.harness.chain.genesis_block_root, + Hash256::zero(), + Slot::new(0), + ); + + let hydra = Hydra::new(u.clone()); + + // Simulation parameters. + let time = CurrentTime { + tick: 0, + current_time: *attacker.harness.chain.slot_clock.genesis_duration(), + tick_duration: conf.tick_duration(&spec), + }; + + let all_blocks = vec![(attacker.harness.head_block_root(), Slot::new(0))]; + + Runner { + conf, + honest_nodes, + attacker, + hydra, + u, + time, + all_blocks, + spec, + } + } + + fn tick(&self) -> usize { + self.time.tick + } + + fn current_slot(&self) -> Slot { + self.attacker.harness.chain.slot_clock.now().unwrap() + } + + fn current_epoch(&self) -> Epoch { + self.current_slot().epoch(E::slots_per_epoch()) + } + + fn record_block_proposal(&mut self, block: &SignedBeaconBlock) { + let block_root = block.canonical_root(); + let slot = block.slot(); + if self.conf.debug.block_proposals { + println!( + "block {:?} @ slot {}, parent: {:?}, withdrawals: {}", + block_root, + slot, + block.parent_root(), + block + .message() + .body() + .execution_payload() + .and_then(|payload| { + payload + .execution_payload_capella() + .map(|p| p.withdrawals.len()) + }) + .unwrap_or(0) + ); + } + self.all_blocks.push((block_root, slot)); + } + + async fn queue_all_with_random_delay(&mut self, message: Message) -> arbitrary::Result<()> { + // Choose the delay for the message to reach the first honest node. + let first_node_delay = self + .u + .write() + .int_in_range(0..=self.conf.max_first_node_delay)?; + + // Choose that node. + let first_node = self.u.write().choose_index(self.honest_nodes.len())?; + + // Choose the delays for the other nodes randomly within the configured range. + for (i, node) in self.honest_nodes.iter_mut().enumerate() { + let delay = if i == first_node { + first_node_delay + } else { + self.u.write().int_in_range( + first_node_delay..=first_node_delay + self.conf.max_delay_difference, + )? + }; + node.queue_message(message.clone(), self.time.tick + delay); + } + + // Deliver the message to the attacker's node instantly. + self.attacker.deliver_message(message).await; + + Ok(()) + } + + async fn deliver_all_honest(&mut self, message: &Message) { + for node in &mut self.honest_nodes { + node.deliver_message(message.clone()).await; + } + } + + async fn deliver_all(&mut self, message: Message) { + self.deliver_all_honest(&message).await; + self.attacker.deliver_message(message).await; + } + + /// Update time and deliver queued messages on all nodes. + async fn on_clock_advance(&mut self) { + // Update the Hydra as we use it to determine block viability. + let current_epoch = self.current_epoch(); + self.hydra + .update(&self.attacker.harness.chain, current_epoch); + + for node in &mut self.honest_nodes { + node.harness + .chain + .slot_clock + .set_current_time(self.time.current_time); + + // Run fork choice at every slot boundary. + if self.conf.is_block_proposal_tick(self.time.tick) { + node.harness.chain.per_slot_task().await; + } + + node.deliver_queued_at(self.time.tick).await; + node.prune_dependent_messages(|block_root| self.hydra.block_is_viable(&block_root)); + } + + self.attacker + .harness + .chain + .slot_clock + .set_current_time(self.time.current_time); + if self.conf.is_block_proposal_tick(self.tick()) { + self.attacker.harness.chain.per_slot_task().await; + } + } + + pub async fn run(&mut self) -> arbitrary::Result<()> { + let slots_per_epoch = E::slots_per_epoch() as usize; + + // Keep running while there is entropy remaining, or the message queues contain undelivered + // messages. + while !self.u.write().is_empty() + || self + .honest_nodes + .iter() + .any(|node| node.has_messages_queued()) + { + let current_slot = self.current_slot(); + let current_epoch = self.current_epoch(); + + // Slot start activities for honest nodes. + if self.conf.is_block_proposal_tick(self.tick()) { + let mut new_blocks = vec![]; + + // Produce block(s). + for node in &mut self.honest_nodes { + let (proposers, _, _, _) = + compute_proposer_duties_from_head(current_epoch, &node.harness.chain) + .unwrap(); + let current_slot_proposer = + proposers[current_slot.as_usize() % slots_per_epoch]; + + if !node.validators.contains(¤t_slot_proposer) { + continue; + } + + let head_state = node.harness.get_current_state(); + let (block, _) = node.harness.make_block(head_state, current_slot).await; + new_blocks.push(block); + } + + // New honest blocks get delivered instantly. + for block in new_blocks { + self.record_block_proposal(&block); + self.deliver_all(Message::Block(block)).await; + } + } + + // Unaggregated attestations from the honest nodes. + if self.conf.is_attestation_tick(self.tick()) { + let mut new_attestations = vec![]; + for node in &mut self.honest_nodes { + let attestations = node.get_honest_unaggregated_attestations(current_slot); + new_attestations.extend(attestations); + } + for attestation in new_attestations { + self.deliver_all(Message::Attestation(attestation)).await; + } + } + + // Sync committee messages and contributions for *all* nodes. + // + // We don't care too much about their quality, but want to use them to prevent + // validator balances from dropping so low that partial withdrawals are disabled. + if self.conf.is_attestation_tick(self.tick()) { + // Use the attacker harness as it has full view of the network. + let harness = &self.attacker.harness; + let head = harness.chain.canonical_head.cached_head(); + + let current_period = head.snapshot.beacon_state.current_epoch() + / harness.spec.epochs_per_sync_committee_period; + let next_slot_period = (current_slot + 1).epoch(E::slots_per_epoch()) + / harness.spec.epochs_per_sync_committee_period; + let relative_sync_committee = if current_period == next_slot_period { + RelativeSyncCommittee::Current + } else { + RelativeSyncCommittee::Next + }; + + let sync_contributions = self.attacker.harness.make_sync_contributions( + &head.snapshot.beacon_state, + head.head_block_root(), + current_slot, + relative_sync_committee, + ); + for node in &self.honest_nodes { + node.harness + .process_sync_contributions(sync_contributions.clone()) + .expect("should process sync contributions"); + } + harness + .process_sync_contributions(sync_contributions) + .expect("should process sync contributions"); + } + + // Slot start activities for the attacker. The attacker only generates new actions + // so long as there is entropy remaining. + if current_slot.as_usize() >= self.conf.num_warmup_slots + && !self.u.write().is_empty() + && self.conf.is_block_proposal_tick(self.tick()) + { + self.hydra + .update(&self.attacker.harness.chain, current_epoch); + let proposer_heads = self.hydra.proposer_heads_at_slot( + current_slot, + &self.attacker.validators, + &self.spec, + ); + if self.conf.debug.num_hydra_heads { + println!( + "number of hydra heads at slot {}: {}, attacker proposers: {}", + current_slot, + self.hydra.num_heads(), + proposer_heads.len(), + ); + } + + if !proposer_heads.is_empty() { + let mut proposers = proposer_heads.iter(); + let mut selected_proposals = vec![]; + + self.u.write().arbitrary_loop( + self.conf.min_attacker_proposers(proposers.len()), + self.conf.max_attacker_proposers(proposers.len()), + |ux| { + let (_, head_choices) = proposers.next().unwrap(); + let (block_root, state_ref) = ux.choose(head_choices)?; + let state = if state_ref.advanced.slot() < current_slot { + state_ref.advanced.clone() + } else { + state_ref.unadvanced.clone() + }; + + selected_proposals.push((block_root, state)); + Ok(ControlFlow::Continue(())) + }, + )?; + + let mut new_blocks = vec![]; + + for (_, state) in selected_proposals { + let (block, _) = + self.attacker.harness.make_block(state, current_slot).await; + if self.conf.debug.attacker_proposals { + println!( + "attacker proposed block {:?} at slot {} atop {:?}", + block.canonical_root(), + current_slot, + block.parent_root(), + ); + } + new_blocks.push(block); + } + + for block in new_blocks { + self.record_block_proposal(&block); + self.queue_all_with_random_delay(Message::Block(block)) + .await?; + } + } + } + + // Unaggregated attestations from the attacker. + if self.conf.is_attestation_tick(self.tick()) && !self.u.write().is_empty() { + self.hydra + .update(&self.attacker.harness.chain, current_epoch); + let (duties, _dep_root) = self + .hydra + .get_attester_duties( + &self.attacker.harness.chain, + current_epoch, + &self.attacker.validators, + ) + .unwrap(); + + for (validator_index, (duty, fork)) in self + .attacker + .validators + .clone() + .into_iter() + .zip(duties.into_iter()) + .filter_map(|(validator_index, opt_duty)| { + opt_duty + .filter(|(d, _)| d.slot == current_slot) + .map(|duty| (validator_index, duty)) + }) + { + let att_data = self + .hydra + .produce_attestation_data(duty.slot, duty.index) + .unwrap(); + + if !self.attacker.slashing_protection.can_attest( + validator_index, + att_data.source.epoch, + att_data.target.epoch, + ) { + continue; + } + self.attacker.slashing_protection.record_attestation( + validator_index, + att_data.source.epoch, + att_data.target.epoch, + ); + + let mut attestation = Attestation { + aggregation_bits: BitList::with_capacity(duty.committee_len).unwrap(), + data: att_data, + signature: AggregateSignature::empty(), + }; + attestation + .sign( + &self.attacker.harness.validator_keypairs[validator_index].sk, + duty.committee_position, + &fork, + self.attacker.harness.chain.genesis_validators_root, + &self.attacker.harness.spec, + ) + .unwrap(); + self.queue_all_with_random_delay(Message::Attestation(attestation)) + .await?; + } + } + + // Increment clock on each node and deliver messages. + self.time.increment(); + self.on_clock_advance().await; + } + + println!( + "finished a run that generated {} blocks up to slot {}", + self.all_blocks.len(), + self.all_blocks.iter().map(|(_, slot)| slot).max().unwrap() + ); + Ok(()) + } +} diff --git a/beacon_node/beacon_chain/fuzz/src/slashing_protection.rs b/beacon_node/beacon_chain/fuzz/src/slashing_protection.rs new file mode 100644 index 00000000000..8db31ffa37e --- /dev/null +++ b/beacon_node/beacon_chain/fuzz/src/slashing_protection.rs @@ -0,0 +1,34 @@ +use std::cmp::max; +use std::collections::HashMap; +use types::Epoch; + +/// Simple in-memory slashing protection for a group of validators. +#[derive(Debug, Default)] +pub struct SlashingProtection { + pub validators: HashMap, +} + +#[derive(Debug, Default)] +pub struct Watermark { + pub max_source_epoch: Epoch, + pub max_target_epoch: Epoch, +} + +impl SlashingProtection { + pub fn can_attest(&self, validator: usize, source_epoch: Epoch, target_epoch: Epoch) -> bool { + self.validators.get(&validator).map_or(true, |w| { + source_epoch >= w.max_source_epoch && target_epoch > w.max_target_epoch + }) + } + + pub fn record_attestation( + &mut self, + validator: usize, + source_epoch: Epoch, + target_epoch: Epoch, + ) { + let entry = self.validators.entry(validator).or_default(); + entry.max_source_epoch = max(source_epoch, entry.max_source_epoch); + entry.max_target_epoch = max(target_epoch, entry.max_target_epoch); + } +} diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index 5535fec37c4..f9182c601f5 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -1059,6 +1059,10 @@ pub fn verify_propagation_slot_range( earliest_permissible_slot, }); } + println!( + "attestation at {attestation_slot} is OK wrt {earliest_permissible_slot}, current slot is {:?}", + slot_clock.now() + ); Ok(()) } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 987ea9e7c33..af023441436 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -396,7 +396,7 @@ pub struct BeaconChain { /// HTTP server is enabled. pub event_handler: Option>, /// Used to track the heads of the beacon chain. - pub(crate) head_tracker: Arc, + pub head_tracker: Arc, /// A cache dedicated to block processing. pub(crate) snapshot_cache: TimeoutRwLock>, /// Caches the attester shuffling for a given epoch and shuffling key root. @@ -4420,6 +4420,19 @@ impl BeaconChain { .map_err(BlockProductionError::OpPoolError)?; drop(attestation_packing_timer); + let simple_attestation_ordering = true; + if simple_attestation_ordering { + attestations.sort_unstable_by_key(|att| { + // This 4-tuple should be unique per attestation. + ( + att.data.slot, + att.data.index, + att.data.beacon_block_root, + att.aggregation_bits.tree_hash_root(), + ) + }); + } + // If paranoid mode is enabled re-check the signatures of every included message. // This will be a lot slower but guards against bugs in block production and can be // quickly rolled out without a release. diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 2b1f714362f..1fad4347d5f 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -1259,6 +1259,8 @@ fn detect_reorg( &metrics::FORK_CHOICE_REORG_DISTANCE, reorg_distance.as_u64() as i64, ); + // NOTE: this log is used by the Hydra fuzzer, if you want to change this log message please + // ask @michaelsproul or update Hydra to parse the new log. warn!( log, "Beacon chain re-org"; @@ -1266,7 +1268,7 @@ fn detect_reorg( "previous_slot" => old_state.slot(), "new_head" => ?new_block_root, "new_slot" => new_state.slot(), - "reorg_distance" => reorg_distance, + "reorg_distance" => reorg_distance.as_usize(), ); Some(reorg_distance) diff --git a/beacon_node/beacon_chain/src/hydra.rs b/beacon_node/beacon_chain/src/hydra.rs new file mode 100644 index 00000000000..cced2087bfc --- /dev/null +++ b/beacon_node/beacon_chain/src/hydra.rs @@ -0,0 +1,534 @@ +#![cfg(feature = "hydra")] + +use crate::{BeaconChain, BeaconChainError, BeaconChainTypes}; +use arbitrary::Unstructured; +use eth2::types::ProposerData; +use parking_lot::RwLock; +use rand::rngs::SmallRng; +use rand::seq::SliceRandom; +use rand::SeedableRng; +use slog::warn; +use state_processing::{state_advance::complete_state_advance, BlockReplayer}; +use std::collections::{BTreeMap, HashSet}; +use std::sync::Arc; +use types::*; + +/// For every head removed, I spawn another. +pub struct Hydra { + /// Map from `block_root` to advanced state with that block as head. + states: BTreeMap>, + /// Random number generator/choice maker. + rng: C, + /// Map of validator index and epoch to selected head block root. + validator_to_block_root: BTreeMap<(usize, Epoch), Hash256>, + /// Map of slot and committee index to selected head block root. + /// + /// It's possible that we get conflicts in this map if we're connected + /// to a lot of validators spread across different VCs. In this case we will return + /// `None` to the validators whose duties conflict. + committee_index_to_block_root: BTreeMap<(Slot, u64), Hash256>, + /// Map from proposal slot to proposer and head block root (key into `self.states`). + proposers: BTreeMap, + /// The epoch up to which Hydra has finished advancing heads. + current_epoch: Epoch, +} + +pub struct HydraState { + /// Unadvanced state for this block (state.slot == block.slot). + pub unadvanced: BeaconState, + /// Advanced state for this block (state.slot.epoch() == current_epoch). + pub advanced: BeaconState, +} + +pub trait HydraChoose { + fn choose_slice<'a, T>(&mut self, values: &'a [T]) -> Option<&'a T>; +} + +impl HydraChoose for SmallRng { + fn choose_slice<'a, T>(&mut self, values: &'a [T]) -> Option<&'a T> { + values.choose(self) + } +} + +impl<'a> HydraChoose for Arc>> { + fn choose_slice<'b, T>(&mut self, values: &'b [T]) -> Option<&'b T> { + self.write().choose(values).ok() + } +} + +impl Hydra { + pub fn new_random() -> Self { + Self::new(SmallRng::from_entropy()) + } +} + +impl Hydra { + pub fn new(rng: C) -> Self { + Self { + states: BTreeMap::new(), + validator_to_block_root: BTreeMap::new(), + committee_index_to_block_root: BTreeMap::new(), + proposers: BTreeMap::new(), + rng, + current_epoch: Epoch::new(0), + } + } + + pub fn update(&mut self, chain: &BeaconChain, current_epoch: Epoch) { + let finalized_checkpoint = chain.canonical_head.cached_head().finalized_checkpoint(); + let finalized_slot = finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + + // Pull up every block on every viable chain that descends from finalization. + for (head_block_root, _) in chain.heads() { + let relevant_block_roots = match chain + .rev_iter_block_roots_from(head_block_root) + .and_then(|iter| { + itertools::process_results(iter, |iter| { + iter.take_while(|(_, slot)| *slot >= finalized_slot) + .collect::>() + }) + }) { + Ok(block_roots) => block_roots, + Err(e) => { + warn!( + chain.log, + "Skipping outdated Hydra head"; + "error" => ?e, + ); + continue; + } + }; + + // Discard this head if it isn't descended from the finalized checkpoint (special case + // for genesis). + if relevant_block_roots.last().map(|(root, _)| root) != Some(&finalized_checkpoint.root) + && finalized_slot != 0 + { + continue; + } + + // Iterate in reverse order so we can hit the parent state in the cache. + for (block_root, _) in relevant_block_roots.into_iter().rev() { + self.ensure_block(chain, block_root, current_epoch, finalized_slot); + } + } + // Prune all stale heads. + self.prune(finalized_checkpoint); + + // Update current epoch. + self.current_epoch = current_epoch; + } + + fn ensure_block( + &mut self, + chain: &BeaconChain, + block_root: Hash256, + current_epoch: Epoch, + finalized_slot: Slot, + ) { + let spec = &chain.spec; + let state = if let Some(hydra_state) = self.states.get_mut(&block_root) { + hydra_state + } else { + let Ok(Some(block)) = chain.get_blinded_block(&block_root) else { + slog::warn!( + chain.log, + "Skipping missing block"; + "block_root" => ?block_root + ); + return; + }; + + // This check necessary to prevent freakouts at skipped slots. + if block.slot() < finalized_slot { + return; + } + + // Try to get the parent state from the cache so we can share memory with it. + let mut state = if let Some(parent_state) = self.states.get(&block.parent_root()) { + // Use advanced parent state if possible + let pre_state = if parent_state.advanced.slot() < block.slot() { + parent_state.advanced.clone() + } else { + parent_state.unadvanced.clone() + }; + // Re-apply block. + let block_slot = block.slot(); + match BlockReplayer::new(pre_state, &chain.spec) + .no_signature_verification() + .no_state_root_iter() + .minimal_block_root_verification() + .apply_blocks(vec![block], Some(block_slot)) + { + Ok(r) => r.into_state(), + Err(e) => { + let e: BeaconChainError = e; + slog::error!( + chain.log, + "Hydra block reconstruction error"; + "error" => ?e, + "block_root" => ?block_root + ); + return; + } + } + } else { + // Cache miss, load the full state for this block from disk (slow). + warn!( + chain.log, + "Missed Hydra state cache"; + "slot" => block.slot(), + "block_root" => ?block_root + ); + if let Some(state) = chain + .store + .get_state(&block.state_root(), Some(block.slot())) + .ok() + .flatten() + { + state + } else { + warn!( + chain.log, + "No state found"; + "state_root" => ?block.state_root(), + ); + return; + } + }; + state.build_caches(&chain.spec).unwrap(); + + self.states.entry(block_root).or_insert(HydraState { + unadvanced: state.clone(), + advanced: state, + }) + }; + + if state.advanced.current_epoch() < current_epoch { + complete_state_advance( + &mut state.advanced, + None, + current_epoch.start_slot(T::EthSpec::slots_per_epoch()), + spec, + ) + .unwrap(); + } + } + + pub fn prune(&mut self, finalized_checkpoint: Checkpoint) { + let finalized_slot = finalized_checkpoint + .epoch + .start_slot(T::EthSpec::slots_per_epoch()); + let mut deleted_heads = vec![]; + self.states.retain(|block_root, state| { + let keep = *block_root == finalized_checkpoint.root + || state + .advanced + .get_block_root(finalized_slot) + .map_or(false, |ancestor| *ancestor == finalized_checkpoint.root); + if !keep { + deleted_heads.push(*block_root); + } + keep + }); + + // Blow away any duties that refer to the deleted blocks. This can happen if we choose + // duties for the next epoch and then the state becomes unviable. + self.validator_to_block_root + .retain(|_, block_root| !deleted_heads.contains(block_root)); + self.committee_index_to_block_root + .retain(|_, block_root| !deleted_heads.contains(block_root)); + } + + pub fn num_heads(&self) -> usize { + self.states.len() + } + + pub fn block_is_viable(&self, block_root: &Hash256) -> bool { + self.states.contains_key(block_root) + } + + /// Get a set of *chaotic* attester duties. + /// + /// This function is memoized so that it returns the same result on repeat calls with the same + /// arguments. + pub fn get_attester_duties( + &mut self, + chain: &BeaconChain, + epoch: Epoch, + request_indices: &[usize], + ) -> Result<(Vec>, Hash256), String> { + let current_epoch = self.current_epoch; + + if epoch != current_epoch && epoch != current_epoch + 1 { + return Err(format!( + "not ready for epoch {epoch}, still at {current_epoch}" + )); + } + + let duties = request_indices + .iter() + .map(|validator_index| self.get_attester_duty(chain, epoch, *validator_index)) + .collect::, _>>()?; + + // Use current epoch as dependent root: any queries made in the same epoch should result in + // the same duties. Queries of the *next epoch* from the *current epoch* are liable to + // change. + let dependent_root = Hash256::from_low_u64_be(current_epoch.as_u64()); + + Ok((duties, dependent_root)) + } + + pub fn get_attester_duty( + &mut self, + chain: &BeaconChain, + epoch: Epoch, + validator_index: usize, + ) -> Result, String> { + // Check for an existing decision. + let existing_block_root = self.validator_to_block_root.get(&(validator_index, epoch)); + + let (relative_epoch, head_block_root, state) = if let Some(block_root) = existing_block_root + { + let state = &mut self + .states + .get_mut(block_root) + .ok_or_else(|| format!("missing state for {block_root:?}"))? + .advanced; + let relative_epoch = RelativeEpoch::from_epoch(state.current_epoch(), epoch) + .map_err(|e| format!("bad relative epoch {block_root:?}: {e:?}"))?; + (relative_epoch, *block_root, state) + } else { + // Select a random head to base the duties on. + let viable_states = self + .states + .iter() + .filter_map(|(block_root, state)| { + let relative_epoch = + RelativeEpoch::from_epoch(state.advanced.current_epoch(), epoch).ok()?; + Some((relative_epoch, *block_root)) + }) + .collect::>(); + self.rng + .choose_slice(&viable_states) + .copied() + .map(|(epoch, block_root)| { + let state = self.states.get_mut(&block_root).expect("state exists"); + (epoch, block_root, &mut state.advanced) + }) + .ok_or("no suitable heads")? + }; + + state + .build_committee_cache(relative_epoch, &chain.spec) + .map_err(|e| format!("error computing committee: {e:?}"))?; + + let mut opt_duty = state + .get_attestation_duties(validator_index, relative_epoch) + .map_err(|e| format!("no duties for {validator_index}: {e:?}"))?; + + // Update caches. + if let Some(duty) = opt_duty { + // Check for collision by (slot, committee index). This would prevent us from + // forming an attestation with a state that's consistent with the duties, so in this + // case we return a null duty. The validator client will retry and we'll hopefully + // pick a different random head that does work. + if self + .committee_index_to_block_root + .get(&(duty.slot, duty.index)) + .map_or(false, |cached_block_root| { + *cached_block_root != head_block_root + }) + { + warn!( + chain.log, + "Duties collision"; + "validator_index" => validator_index, + "slot" => duty.slot, + "committee_index" => duty.index + ); + opt_duty = None; + } else { + self.committee_index_to_block_root + .insert((duty.slot, duty.index), head_block_root); + self.validator_to_block_root + .insert((validator_index, epoch), head_block_root); + } + } + + // Update shuffling cache used for attestation verification. + let shuffling_id = AttestationShufflingId::new(head_block_root, state, relative_epoch) + .map_err(|_| "cannot compute shuffling id")?; + let cache = state + .committee_cache(relative_epoch) + .map_err(|_| "cache unbuilt")?; + chain + .shuffling_cache + .try_write_for(std::time::Duration::from_secs(2)) + .ok_or("cache timeout")? + .insert_committee_cache(shuffling_id, cache); + + Ok(opt_duty.map(|duty| (duty, state.fork()))) + } + + pub fn produce_attestation_data( + &self, + slot: Slot, + committee_index: u64, + ) -> Result { + let beacon_block_root = *self + .committee_index_to_block_root + .get(&(slot, committee_index)) + .ok_or_else(|| format!("no committee {committee_index} cached for {slot}"))?; + let state = &self + .states + .get(&beacon_block_root) + .ok_or("missing state")? + .advanced; + + let slots_per_epoch = T::EthSpec::slots_per_epoch(); + let epoch = slot.epoch(slots_per_epoch); + let epoch_start_slot = epoch.start_slot(slots_per_epoch); + let block_slot = state.latest_block_header().slot; + + // If head block is prior to target slot, then it is the target. + let target_root = if block_slot <= epoch_start_slot { + beacon_block_root + } else { + // Otherwise the epoch boundary block is certainly in the past and can be looked up + // in `block_roots`. + *state + .get_block_root(epoch_start_slot) + .map_err(|_| "out of bounds")? + }; + let target = Checkpoint { + epoch, + root: target_root, + }; + let source = state.current_justified_checkpoint(); + + Ok(AttestationData { + slot, + index: committee_index, + source, + target, + beacon_block_root, + }) + } + + pub fn get_proposer_duties( + &mut self, + chain: &BeaconChain, + epoch: Epoch, + validator_indices: &HashSet, + ) -> Result<(Vec, Hash256), String> { + let current_epoch = self.current_epoch; + if epoch != current_epoch { + return Err(format!( + "not ready for epoch {epoch}, still at {current_epoch}" + )); + } + let dependent_root = Hash256::from_low_u64_be(current_epoch.as_u64()); + + // Some garbage proposer data for slots we don't care about. + let dummy_proposer_data = |slot| ProposerData { + pubkey: PublicKeyBytes::empty(), + validator_index: u64::MAX, + slot, + }; + + // Check for cached duties. + let mut duties = vec![]; + let mut cache_hit = false; + let slots_per_epoch = T::EthSpec::slots_per_epoch(); + for slot in epoch.slot_iter(slots_per_epoch) { + if let Some((proposer_data, _)) = self.proposers.get(&slot) { + cache_hit = true; + duties.push(proposer_data.clone()); + } else { + duties.push(dummy_proposer_data(slot)); + } + } + + if cache_hit { + return Ok((duties, dependent_root)); + } + + // Iterate all heads, looking for heads that award proposal duties to our validators. + let mut slot_candidates: BTreeMap> = BTreeMap::new(); + for (block_root, hydra_state) in &self.states { + let proposers = hydra_state + .advanced + .get_beacon_proposer_indices(&chain.spec) + .map_err(|e| format!("error computing proposers: {e:?}"))?; + + for (i, slot) in epoch.slot_iter(slots_per_epoch).enumerate() { + let proposer = proposers[i] as u64; + + if validator_indices.contains(&proposer) { + slot_candidates + .entry(slot) + .or_default() + .push((proposer, *block_root)); + } + } + } + + for (slot, candidates) in slot_candidates { + // Choose one proposer for each slot. + if let Some((proposer, block_root)) = self.rng.choose_slice(&candidates).copied() { + let proposer_data = ProposerData { + pubkey: chain + .validator_pubkey_bytes(proposer as usize) + .unwrap() + .unwrap(), + validator_index: proposer, + slot, + }; + let offset = slot.as_u64() % T::EthSpec::slots_per_epoch(); + duties[offset as usize] = proposer_data.clone(); + self.proposers.insert(slot, (proposer_data, block_root)); + } + } + + Ok((duties, dependent_root)) + } + + pub fn state_for_proposal(&self, slot: Slot) -> Option> { + let (_, block_root) = self.proposers.get(&slot)?; + let state = self.states.get(block_root)?; + Some(state.advanced.clone()) + } + + #[allow(clippy::type_complexity)] + pub fn proposer_heads_at_slot( + &self, + slot: Slot, + validator_indices: &[usize], + spec: &ChainSpec, + ) -> BTreeMap)>> { + let mut proposer_heads = BTreeMap::new(); + + for (block_root, state) in &self.states { + let proposer = state + .advanced + .get_beacon_proposer_index(slot, spec) + .unwrap(); + if validator_indices.contains(&proposer) { + proposer_heads + .entry(proposer) + .or_insert_with(Vec::new) + .push((*block_root, state)); + } + } + + // Sort vecs to establish deterministic ordering. + for proposal_opps in proposer_heads.values_mut() { + proposal_opps.sort_by_key(|(block_root, _)| *block_root); + } + + proposer_heads + } +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 4ea1eeee011..fd3a04cc54e 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -24,6 +24,7 @@ pub mod fork_choice_signal; pub mod fork_revert; mod head_tracker; pub mod historical_blocks; +pub mod hydra; pub mod light_client_finality_update_verification; pub mod light_client_optimistic_update_verification; pub mod merge_readiness; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 5fc05c5551f..16bdeb50319 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1,6 +1,7 @@ use crate::observed_operations::ObservationOutcome; pub use crate::persisted_beacon_chain::PersistedBeaconChain; pub use crate::{ + attestation_verification::Error as AttestationError, beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}, migrate::MigratorConfig, sync_committee_verification::Error as SyncCommitteeError, @@ -119,7 +120,7 @@ pub enum SyncCommitteeStrategy { /// Indicates whether the `BeaconChainHarness` should use the `state.current_sync_committee` or /// `state.next_sync_committee` when creating sync messages or contributions. -#[derive(Clone, Debug)] +#[derive(Clone, Copy, Debug)] pub enum RelativeSyncCommittee { Current, Next, @@ -181,6 +182,11 @@ impl Builder> { .clone() .expect("cannot build without validator keypairs"); + let genesis_execution_payload_header = self + .mock_execution_layer + .as_ref() + .and_then(|el| el.genesis_execution_payload_header()); + let store = Arc::new( HotColdDB::open_ephemeral( self.store_config.clone().unwrap_or_default(), @@ -194,7 +200,7 @@ impl Builder> { &validator_keypairs, HARNESS_GENESIS_TIME, Hash256::from_slice(DEFAULT_ETH1_BLOCK_HASH), - None, + genesis_execution_payload_header, builder.get_spec(), ) .expect("should generate interop state"); @@ -433,14 +439,19 @@ where self } - pub fn mock_execution_layer(mut self) -> Self { + pub fn mock_execution_layer(self) -> Self { + self.mock_execution_layer_generic(DEFAULT_TERMINAL_BLOCK) + } + + pub fn mock_execution_layer_generic(mut self, terminal_block_number: u64) -> Self { let spec = self.spec.clone().expect("cannot build without spec"); let shanghai_time = spec.capella_fork_epoch.map(|epoch| { HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() }); let mock = MockExecutionLayer::new( self.runtime.task_executor.clone(), - DEFAULT_TERMINAL_BLOCK, + terminal_block_number, + HARNESS_GENESIS_TIME, shanghai_time, None, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), @@ -467,6 +478,7 @@ where }); let mock_el = MockExecutionLayer::new( self.runtime.task_executor.clone(), + HARNESS_GENESIS_TIME, DEFAULT_TERMINAL_BLOCK, shanghai_time, builder_threshold, @@ -1062,15 +1074,12 @@ where relative_sync_committee: RelativeSyncCommittee, ) -> Vec> { let sync_committee: Arc> = match relative_sync_committee { - RelativeSyncCommittee::Current => state - .current_sync_committee() - .expect("should be called on altair beacon state") - .clone(), - RelativeSyncCommittee::Next => state - .next_sync_committee() - .expect("should be called on altair beacon state") - .clone(), - }; + RelativeSyncCommittee::Current => state.current_sync_committee(), + RelativeSyncCommittee::Next => state.next_sync_committee(), + } + .expect("should be called on altair beacon state") + .clone(); + let fork = self .spec .fork_at_epoch(message_slot.epoch(E::slots_per_epoch())); @@ -1286,10 +1295,12 @@ where .map(|(subnet_id, committee_messages)| { // If there are any sync messages in this committee, create an aggregate. if let Some((sync_message, subcommittee_position)) = committee_messages.first() { - let sync_committee: Arc> = state - .current_sync_committee() - .expect("should be called on altair beacon state") - .clone(); + let sync_committee: Arc> = match relative_sync_committee { + RelativeSyncCommittee::Current => state.current_sync_committee(), + RelativeSyncCommittee::Next => state.next_sync_committee(), + } + .expect("should be called on altair beacon state") + .clone(); let aggregator_index = sync_committee .get_subcommittee_pubkeys(subnet_id) @@ -1801,6 +1812,18 @@ where } } + pub fn process_unaggregated_attestation( + &self, + attestation: Attestation, + ) -> Result<(), AttestationError> { + let verified = self + .chain + .verify_unaggregated_attestation_for_gossip(&attestation, None)?; + self.chain.add_to_naive_aggregation_pool(&verified)?; + self.chain.apply_attestation_to_fork_choice(&verified)?; + self.chain.add_to_block_inclusion_pool(verified) + } + pub fn set_current_slot(&self, slot: Slot) { let current_slot = self.chain.slot().unwrap(); let current_epoch = current_slot.epoch(E::slots_per_epoch()); @@ -2257,7 +2280,9 @@ where let mut verified_contributions = Vec::with_capacity(sync_contributions.len()); for (_, contribution_and_proof) in sync_contributions { - let signed_contribution_and_proof = contribution_and_proof.unwrap(); + let Some(signed_contribution_and_proof) = contribution_and_proof else { + continue; + }; let verified_contribution = self .chain diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index a8d98a767fb..efd7084596c 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -13,8 +13,8 @@ use std::collections::HashMap; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; use types::{ - EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella, ExecutionPayloadMerge, - ForkName, Hash256, Uint256, + Address, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadCapella, + ExecutionPayloadHeader, ExecutionPayloadMerge, ForkName, Hash256, Uint256, }; const GAS_LIMIT: u64 = 16384; @@ -125,6 +125,7 @@ impl ExecutionBlockGenerator { terminal_total_difficulty: Uint256, terminal_block_number: u64, terminal_block_hash: ExecutionBlockHash, + genesis_time: u64, shanghai_time: Option, ) -> Self { let mut gen = Self { @@ -141,7 +142,25 @@ impl ExecutionBlockGenerator { shanghai_time, }; - gen.insert_pow_block(0).unwrap(); + // Proof-of-stake start. + if terminal_total_difficulty.is_zero() + && terminal_block_number == 0 + && terminal_block_hash == ExecutionBlockHash::zero() + { + let payload_attributes = PayloadAttributes::new( + genesis_time, + Hash256::zero(), + Address::zero(), + Some(vec![]), + ); + let execution_payload = + gen.generate_pos_payload(ExecutionBlockHash::zero(), 0, &payload_attributes); + let block = Block::PoS(execution_payload); + gen.insert_block(block.clone()).unwrap(); + gen.head_block = Some(block); + } else { + gen.insert_pow_block(0).unwrap(); + } gen } @@ -155,6 +174,14 @@ impl ExecutionBlockGenerator { .map(|block| block.as_execution_block(self.terminal_total_difficulty)) } + /// Get the execution payload header for the genesis block, if the chain was started from PoS. + pub fn genesis_execution_payload_header(&self) -> Option> { + match self.block_by_number(0) { + None | Some(Block::PoW(_)) => None, + Some(Block::PoS(payload)) => Some(payload.to_ref().into()), + } + } + pub fn block_by_number(&self, number: u64) -> Option> { // Get the latest canonical head block let mut latest_block = self.latest_block()?; @@ -475,63 +502,11 @@ impl ExecutionBlockGenerator { let id = payload_id_from_u64(self.next_payload_id); self.next_payload_id += 1; - let mut execution_payload = match &attributes { - PayloadAttributes::V1(pa) => ExecutionPayload::Merge(ExecutionPayloadMerge { - parent_hash: forkchoice_state.head_block_hash, - fee_recipient: pa.suggested_fee_recipient, - receipts_root: Hash256::repeat_byte(42), - state_root: Hash256::repeat_byte(43), - logs_bloom: vec![0; 256].into(), - prev_randao: pa.prev_randao, - block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, - gas_used: GAS_USED, - timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), - base_fee_per_gas: Uint256::one(), - block_hash: ExecutionBlockHash::zero(), - transactions: vec![].into(), - }), - PayloadAttributes::V2(pa) => match self.get_fork_at_timestamp(pa.timestamp) { - ForkName::Merge => ExecutionPayload::Merge(ExecutionPayloadMerge { - parent_hash: forkchoice_state.head_block_hash, - fee_recipient: pa.suggested_fee_recipient, - receipts_root: Hash256::repeat_byte(42), - state_root: Hash256::repeat_byte(43), - logs_bloom: vec![0; 256].into(), - prev_randao: pa.prev_randao, - block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, - gas_used: GAS_USED, - timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), - base_fee_per_gas: Uint256::one(), - block_hash: ExecutionBlockHash::zero(), - transactions: vec![].into(), - }), - ForkName::Capella => ExecutionPayload::Capella(ExecutionPayloadCapella { - parent_hash: forkchoice_state.head_block_hash, - fee_recipient: pa.suggested_fee_recipient, - receipts_root: Hash256::repeat_byte(42), - state_root: Hash256::repeat_byte(43), - logs_bloom: vec![0; 256].into(), - prev_randao: pa.prev_randao, - block_number: parent.block_number() + 1, - gas_limit: GAS_LIMIT, - gas_used: GAS_USED, - timestamp: pa.timestamp, - extra_data: "block gen was here".as_bytes().to_vec().into(), - base_fee_per_gas: Uint256::one(), - block_hash: ExecutionBlockHash::zero(), - transactions: vec![].into(), - withdrawals: pa.withdrawals.clone().into(), - }), - _ => unreachable!(), - }, - }; - - *execution_payload.block_hash_mut() = - ExecutionBlockHash::from_root(execution_payload.tree_hash_root()); + let execution_payload = self.generate_pos_payload( + forkchoice_state.head_block_hash, + parent.block_number() + 1, + &attributes, + ); self.payload_ids.insert(id, execution_payload); @@ -559,6 +534,62 @@ impl ExecutionBlockGenerator { payload_id: id.map(Into::into), }) } + + pub fn generate_pos_payload( + &self, + parent_hash: ExecutionBlockHash, + block_number: u64, + attributes: &PayloadAttributes, + ) -> ExecutionPayload { + let pa = attributes; + let mut execution_payload = match self.get_fork_at_timestamp(attributes.timestamp()) { + ForkName::Merge => ExecutionPayload::Merge(ExecutionPayloadMerge { + parent_hash, + fee_recipient: pa.suggested_fee_recipient(), + receipts_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].into(), + prev_randao: pa.prev_randao(), + block_number, + gas_limit: GAS_LIMIT, + gas_used: GAS_USED, + timestamp: pa.timestamp(), + extra_data: "block gen was here".as_bytes().to_vec().into(), + base_fee_per_gas: Uint256::one(), + block_hash: ExecutionBlockHash::zero(), + transactions: vec![].into(), + }), + ForkName::Capella => { + let withdrawals = attributes + .withdrawals() + .expect("capella payload attributes must have withdrawals") + .clone(); + + ExecutionPayload::Capella(ExecutionPayloadCapella { + parent_hash, + fee_recipient: pa.suggested_fee_recipient(), + receipts_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].into(), + prev_randao: pa.prev_randao(), + block_number, + gas_limit: GAS_LIMIT, + gas_used: GAS_USED, + timestamp: pa.timestamp(), + extra_data: "block gen was here".as_bytes().to_vec().into(), + base_fee_per_gas: Uint256::one(), + block_hash: ExecutionBlockHash::zero(), + transactions: vec![].into(), + withdrawals: withdrawals.into(), + }) + } + _ => unreachable!(), + }; + + *execution_payload.block_hash_mut() = + ExecutionBlockHash::from_root(execution_payload.tree_hash_root()); + execution_payload + } } fn payload_id_from_u64(n: u64) -> PayloadId { @@ -617,6 +648,7 @@ mod test { TERMINAL_DIFFICULTY.into(), TERMINAL_BLOCK, ExecutionBlockHash::zero(), + 0, None, ); diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 2b512d8b1c2..384d616dd4b 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -27,6 +27,7 @@ impl MockExecutionLayer { Self::new( executor, DEFAULT_TERMINAL_BLOCK, + 0, None, None, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), @@ -39,6 +40,7 @@ impl MockExecutionLayer { pub fn new( executor: TaskExecutor, terminal_block: u64, + genesis_time: u64, shanghai_time: Option, builder_threshold: Option, jwt_key: Option, @@ -54,6 +56,7 @@ impl MockExecutionLayer { spec.terminal_total_difficulty, terminal_block, spec.terminal_block_hash, + genesis_time, shanghai_time, ); @@ -226,6 +229,12 @@ impl MockExecutionLayer { self } + pub fn genesis_execution_payload_header(&self) -> Option> { + self.server + .execution_block_generator() + .genesis_execution_payload_header() + } + pub fn move_to_block_prior_to_terminal_block(self) -> Self { self.server .execution_block_generator() diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 99d264aa7b8..187abbed7d5 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -58,6 +58,7 @@ pub struct MockExecutionConfig { pub terminal_difficulty: Uint256, pub terminal_block: u64, pub terminal_block_hash: ExecutionBlockHash, + pub genesis_time: u64, pub shanghai_time: Option, } @@ -69,6 +70,7 @@ impl Default for MockExecutionConfig { terminal_block: DEFAULT_TERMINAL_BLOCK, terminal_block_hash: ExecutionBlockHash::zero(), server_config: Config::default(), + genesis_time: 0, shanghai_time: None, } } @@ -89,6 +91,7 @@ impl MockServer { DEFAULT_TERMINAL_DIFFICULTY.into(), DEFAULT_TERMINAL_BLOCK, ExecutionBlockHash::zero(), + 0, None, // FIXME(capella): should this be the default? ) } @@ -100,6 +103,7 @@ impl MockServer { terminal_block, terminal_block_hash, server_config, + genesis_time, shanghai_time, } = config; let last_echo_request = Arc::new(RwLock::new(None)); @@ -108,6 +112,7 @@ impl MockServer { terminal_difficulty, terminal_block, terminal_block_hash, + genesis_time, shanghai_time, ); @@ -167,6 +172,7 @@ impl MockServer { terminal_difficulty: Uint256, terminal_block: u64, terminal_block_hash: ExecutionBlockHash, + genesis_time: u64, shanghai_time: Option, ) -> Self { Self::new_with_config( @@ -177,6 +183,7 @@ impl MockServer { terminal_difficulty, terminal_block, terminal_block_hash, + genesis_time, shanghai_time, }, )