diff --git a/Cargo.lock b/Cargo.lock index 70c910aadc9..d3394fce293 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,16 +2,6 @@ # It is not intended for manual editing. version = 4 -[[package]] -name = "Inflector" -version = "0.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" -dependencies = [ - "lazy_static", - "regex", -] - [[package]] name = "account_manager" version = "0.3.5" @@ -861,7 +851,6 @@ dependencies = [ "bls", "criterion", "derivative", - "eth1", "eth2", "eth2_network_config", "ethereum_hashing", @@ -1322,20 +1311,6 @@ dependencies = [ "serde", ] -[[package]] -name = "cargo_metadata" -version = "0.15.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a" -dependencies = [ - "camino", - "cargo-platform", - "semver 1.0.26", - "serde", - "serde_json", - "thiserror 1.0.69", -] - [[package]] name = "cargo_metadata" version = "0.19.2" @@ -1550,7 +1525,6 @@ dependencies = [ "directory", "dirs", "environment", - "eth1", "eth2", "eth2_config", "ethereum_ssz", @@ -2498,12 +2472,6 @@ version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6add3b8cff394282be81f3fc1a0605db594ed69890078ca6e2cab1c408bcf04" -[[package]] -name = "dunce" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" - [[package]] name = "ecdsa" version = "0.14.8" @@ -2790,48 +2758,6 @@ dependencies = [ "uuid 0.8.2", ] -[[package]] -name = "eth1" -version = "0.2.0" -dependencies = [ - "environment", - "eth1_test_rig", - "eth2", - "ethereum_ssz", - "ethereum_ssz_derive", - "execution_layer", - "futures", - "logging", - "merkle_proof", - "metrics", - "parking_lot 0.12.3", - "sensitive_url", - "serde", - "serde_yaml", - "state_processing", - "superstruct", - "task_executor", - "tokio", - "tracing", - "tree_hash", - "types", -] - -[[package]] -name = "eth1_test_rig" -version = "0.2.0" -dependencies = [ - "deposit_contract", - "ethers-contract", - "ethers-core", - "ethers-providers", - "hex", - "serde_json", - "tokio", - "types", - "unused_port", -] - [[package]] name = "eth2" version = "0.1.0" @@ -3114,8 +3040,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e9c3c3e119a89f0a9a1e539e7faecea815f74ddcf7c90d0b00d1f524db2fdc9c" dependencies = [ - "ethers-contract-abigen", - "ethers-contract-derive", "ethers-core", "ethers-providers", "futures-util", @@ -3127,46 +3051,6 @@ dependencies = [ "thiserror 1.0.69", ] -[[package]] -name = "ethers-contract-abigen" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d4e5ad46aede34901f71afdb7bb555710ed9613d88d644245c657dc371aa228" -dependencies = [ - "Inflector", - "cfg-if", - "dunce", - "ethers-core", - "eyre", - "getrandom 0.2.16", - "hex", - "proc-macro2", - "quote", - "regex", - "reqwest", - "serde", - "serde_json", - "syn 1.0.109", - "toml", - "url", - "walkdir", -] - -[[package]] -name = "ethers-contract-derive" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f192e8e4cf2b038318aae01e94e7644e0659a76219e94bcd3203df744341d61f" -dependencies = [ - "ethers-contract-abigen", - "ethers-core", - "hex", - "proc-macro2", - "quote", - "serde_json", - "syn 1.0.109", -] - [[package]] name = "ethers-core" version = "1.0.2" @@ -3175,7 +3059,6 @@ checksum = "ade3e9c97727343984e1ceada4fdab11142d2ee3472d2c67027d56b1251d4f15" dependencies = [ "arrayvec", "bytes", - "cargo_metadata 0.15.4", "chrono", "convert_case 0.6.0", "elliptic-curve 0.12.3", @@ -3183,7 +3066,6 @@ dependencies = [ "generic-array 0.14.7", "hex", "k256 0.11.6", - "once_cell", "open-fastrlp", "proc-macro2", "rand 0.8.5", @@ -3409,16 +3291,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "eyre" -version = "0.6.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" -dependencies = [ - "indenter", - "once_cell", -] - [[package]] name = "fake-simd" version = "0.1.2" @@ -3833,19 +3705,12 @@ dependencies = [ name = "genesis" version = "0.2.0" dependencies = [ - "environment", - "eth1", - "eth1_test_rig", "ethereum_hashing", "ethereum_ssz", - "futures", "int_to_bytes", - "logging", "merkle_proof", "rayon", - "sensitive_url", "state_processing", - "tokio", "tracing", "tree_hash", "types", @@ -4325,7 +4190,6 @@ dependencies = [ "bytes", "directory", "either", - "eth1", "eth2", "ethereum_serde_utils", "ethereum_ssz", @@ -4808,12 +4672,6 @@ dependencies = [ "syn 2.0.101", ] -[[package]] -name = "indenter" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" - [[package]] name = "indexmap" version = "1.9.3" @@ -5636,7 +5494,6 @@ dependencies = [ "database_manager", "directory", "environment", - "eth1", "eth2", "eth2_network_config", "ethereum_hashing", @@ -9466,15 +9323,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "toml" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" -dependencies = [ - "serde", -] - [[package]] name = "toml_datetime" version = "0.6.9" @@ -10897,7 +10745,7 @@ dependencies = [ name = "workspace_members" version = "0.1.0" dependencies = [ - "cargo_metadata 0.19.2", + "cargo_metadata", "quote", ] diff --git a/Cargo.toml b/Cargo.toml index 4850b2f56c4..6a7b2f610e6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,6 @@ members = [ "beacon_node/beacon_processor", "beacon_node/builder_client", "beacon_node/client", - "beacon_node/eth1", "beacon_node/execution_layer", "beacon_node/genesis", "beacon_node/http_api", @@ -72,7 +71,6 @@ members = [ "slasher", "slasher/service", "testing/ef_tests", - "testing/eth1_test_rig", "testing/execution_engine_integration", "testing/node_test_rig", "testing/simulator", @@ -138,8 +136,6 @@ doppelganger_service = { path = "validator_client/doppelganger_service" } either = "1.9" env_logger = "0.9" environment = { path = "lighthouse/environment" } -eth1 = { path = "beacon_node/eth1" } -eth1_test_rig = { path = "testing/eth1_test_rig" } eth2 = { path = "common/eth2" } eth2_config = { path = "common/eth2_config" } eth2_key_derivation = { path = "crypto/eth2_key_derivation" } diff --git a/beacon_node/beacon_chain/Cargo.toml b/beacon_node/beacon_chain/Cargo.toml index 1bf6ab43267..fbc58eafc8a 100644 --- a/beacon_node/beacon_chain/Cargo.toml +++ b/beacon_node/beacon_chain/Cargo.toml @@ -19,7 +19,6 @@ alloy-primitives = { workspace = true } bitvec = { workspace = true } bls = { workspace = true } derivative = { workspace = true } -eth1 = { workspace = true } eth2 = { workspace = true } eth2_network_config = { workspace = true } ethereum_hashing = { workspace = true } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 82a42f2395f..e6cdd84b405 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -27,8 +27,6 @@ use crate::data_availability_checker::{ use crate::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; use crate::early_attester_cache::EarlyAttesterCache; use crate::errors::{BeaconChainError as Error, BlockProductionError}; -use crate::eth1_chain::{Eth1Chain, Eth1ChainBackend}; -use crate::eth1_finalization_cache::{Eth1FinalizationCache, Eth1FinalizationData}; use crate::events::ServerSentEventHandler; use crate::execution_payload::{get_execution_payload, NotifyExecutionLayer, PreparePayloadHandle}; use crate::fetch_blobs::EngineGetBlobsOutput; @@ -143,7 +141,6 @@ type HashBlockTuple = (Hash256, RpcBlock); // These keys are all zero because they get stored in different columns, see `DBColumn` type. pub const BEACON_CHAIN_DB_KEY: Hash256 = Hash256::ZERO; pub const OP_POOL_DB_KEY: Hash256 = Hash256::ZERO; -pub const ETH1_CACHE_DB_KEY: Hash256 = Hash256::ZERO; pub const FORK_CHOICE_DB_KEY: Hash256 = Hash256::ZERO; /// Defines how old a block can be before it's no longer a candidate for the early attester cache. @@ -312,7 +309,6 @@ pub trait BeaconChainTypes: Send + Sync + 'static { type HotStore: store::ItemStore; type ColdStore: store::ItemStore; type SlotClock: slot_clock::SlotClock; - type Eth1Chain: Eth1ChainBackend; type EthSpec: types::EthSpec; } @@ -436,8 +432,6 @@ pub struct BeaconChain { /// Maintains a record of which validators we've seen BLS to execution changes for. pub observed_bls_to_execution_changes: Mutex>, - /// Provides information from the Ethereum 1 (PoW) chain. - pub eth1_chain: Option>, /// Interfaces with the execution client. pub execution_layer: Option>, /// Stores information about the canonical head and finalized/justified checkpoints of the @@ -460,8 +454,6 @@ pub struct BeaconChain { pub event_handler: Option>, /// Caches the attester shuffling for a given epoch and shuffling key root. pub shuffling_cache: RwLock, - /// A cache of eth1 deposit data at epoch boundaries for deposit finalization - pub eth1_finalization_cache: RwLock, /// Caches the beacon block proposer shuffling for a given epoch and shuffling key root. pub beacon_proposer_cache: Arc>, /// Caches a map of `validator_index -> validator_pubkey`. @@ -660,18 +652,6 @@ impl BeaconChain { Ok(()) } - /// Persists `self.eth1_chain` and its caches to disk. - pub fn persist_eth1_cache(&self) -> Result<(), Error> { - let _timer = metrics::start_timer(&metrics::PERSIST_ETH1_CACHE); - - if let Some(eth1_chain) = self.eth1_chain.as_ref() { - self.store - .put_item(Ð1_CACHE_DB_KEY, ð1_chain.as_ssz_container())?; - } - - Ok(()) - } - /// Persists the custody information to disk. pub fn persist_custody_context(&self) -> Result<(), Error> { let custody_context: CustodyContextSsz = self @@ -2394,13 +2374,10 @@ impl BeaconChain { // If there's no eth1 chain then it's impossible to produce blocks and therefore // useless to put things in the op pool. - if self.eth1_chain.is_some() { - let (attestation, attesting_indices) = - verified_attestation.into_attestation_and_indices(); - self.op_pool - .insert_attestation(attestation, attesting_indices) - .map_err(Error::from)?; - } + let (attestation, attesting_indices) = verified_attestation.into_attestation_and_indices(); + self.op_pool + .insert_attestation(attestation, attesting_indices) + .map_err(Error::from)?; Ok(()) } @@ -2416,11 +2393,9 @@ impl BeaconChain { // If there's no eth1 chain then it's impossible to produce blocks and therefore // useless to put things in the op pool. - if self.eth1_chain.is_some() { - self.op_pool - .insert_sync_contribution(contribution.contribution()) - .map_err(Error::from)?; - } + self.op_pool + .insert_sync_contribution(contribution.contribution()) + .map_err(Error::from)?; Ok(()) } @@ -2556,9 +2531,7 @@ impl BeaconChain { /// Accept a pre-verified exit and queue it for inclusion in an appropriate block. pub fn import_voluntary_exit(&self, exit: SigVerifiedOp) { - if self.eth1_chain.is_some() { - self.op_pool.insert_voluntary_exit(exit) - } + self.op_pool.insert_voluntary_exit(exit) } /// Verify a proposer slashing before allowing it to propagate on the gossip network. @@ -2588,9 +2561,7 @@ impl BeaconChain { } } - if self.eth1_chain.is_some() { - self.op_pool.insert_proposer_slashing(proposer_slashing) - } + self.op_pool.insert_proposer_slashing(proposer_slashing) } /// Verify an attester slashing before allowing it to propagate on the gossip network. @@ -2629,9 +2600,7 @@ impl BeaconChain { } // Add to the op pool (if we have the ability to propose blocks). - if self.eth1_chain.is_some() { - self.op_pool.insert_attester_slashing(attester_slashing) - } + self.op_pool.insert_attester_slashing(attester_slashing) } /// Verify a signed BLS to execution change before allowing it to propagate on the gossip network. @@ -2703,12 +2672,8 @@ impl BeaconChain { } } - if self.eth1_chain.is_some() { - self.op_pool - .insert_bls_to_execution_change(bls_to_execution_change, received_pre_capella) - } else { - false - } + self.op_pool + .insert_bls_to_execution_change(bls_to_execution_change, received_pre_capella) } /// Attempt to obtain sync committee duties from the head. @@ -3792,7 +3757,6 @@ impl BeaconChain { block_root, state, parent_block, - parent_eth1_finalization_data, consensus_context, } = import_data; @@ -3818,7 +3782,6 @@ impl BeaconChain { state, payload_verification_outcome.payload_verification_status, parent_block, - parent_eth1_finalization_data, consensus_context, ) }, @@ -3855,7 +3818,6 @@ impl BeaconChain { mut state: BeaconState, payload_verification_status: PayloadVerificationStatus, parent_block: SignedBlindedBeaconBlock, - parent_eth1_finalization_data: Eth1FinalizationData, mut consensus_context: ConsensusContext, ) -> Result { // ----------------------------- BLOCK NOT YET ATTESTABLE ---------------------------------- @@ -4063,12 +4025,6 @@ impl BeaconChain { // about it. let block_time_imported = timestamp_now(); - let current_eth1_finalization_data = Eth1FinalizationData { - eth1_data: state.eth1_data().clone(), - eth1_deposit_index: state.eth1_deposit_index(), - }; - let current_finalized_checkpoint = state.finalized_checkpoint(); - // compute state proofs for light client updates before inserting the state into the // snapshot cache. if self.config.enable_light_client_server { @@ -4087,17 +4043,6 @@ impl BeaconChain { metrics::inc_counter(&metrics::BLOCK_PROCESSING_SUCCESSES); - // Update the deposit contract cache. - self.import_block_update_deposit_contract_finalization( - block, - block_root, - current_epoch, - current_finalized_checkpoint, - current_eth1_finalization_data, - parent_eth1_finalization_data, - parent_block.slot(), - ); - // Inform the unknown block cache, in case it was waiting on this block. self.pre_finalization_block_cache .block_processed(block_root); @@ -4494,65 +4439,6 @@ impl BeaconChain { Ok(()) } - #[allow(clippy::too_many_arguments)] - fn import_block_update_deposit_contract_finalization( - &self, - block: BeaconBlockRef, - block_root: Hash256, - current_epoch: Epoch, - current_finalized_checkpoint: Checkpoint, - current_eth1_finalization_data: Eth1FinalizationData, - parent_eth1_finalization_data: Eth1FinalizationData, - parent_block_slot: Slot, - ) { - // Do not write to eth1 finalization cache for blocks older than 5 epochs. - if block.epoch() + 5 < current_epoch { - return; - } - - let parent_block_epoch = parent_block_slot.epoch(T::EthSpec::slots_per_epoch()); - if parent_block_epoch < current_epoch { - // we've crossed epoch boundary, store Eth1FinalizationData - let (checkpoint, eth1_finalization_data) = - if block.slot() % T::EthSpec::slots_per_epoch() == 0 { - // current block is the checkpoint - ( - Checkpoint { - epoch: current_epoch, - root: block_root, - }, - current_eth1_finalization_data, - ) - } else { - // parent block is the checkpoint - ( - Checkpoint { - epoch: current_epoch, - root: block.parent_root(), - }, - parent_eth1_finalization_data, - ) - }; - - let finalized_eth1_data = { - let mut cache = self.eth1_finalization_cache.write(); - cache.insert(checkpoint, eth1_finalization_data); - cache.finalize(¤t_finalized_checkpoint) - }; - if let Some(finalized_eth1_data) = finalized_eth1_data { - if let Some(eth1_chain) = self.eth1_chain.as_ref() { - let finalized_deposit_count = finalized_eth1_data.deposit_count; - eth1_chain.finalize_eth1_data(finalized_eth1_data); - debug!( - epoch = %current_finalized_checkpoint.epoch, - deposit_count = %finalized_deposit_count, - "called eth1_chain.finalize_eth1_data()" - ); - } - } - } - } - /// If configured, wait for the fork choice run at the start of the slot to complete. fn wait_for_fork_choice_before_block_production( self: &Arc, @@ -5290,11 +5176,6 @@ impl BeaconChain { builder_boost_factor: Option, block_production_version: BlockProductionVersion, ) -> Result, BlockProductionError> { - let eth1_chain = self - .eth1_chain - .as_ref() - .ok_or(BlockProductionError::NoEth1ChainConnection)?; - // It is invalid to try to produce a block using a state from a future slot. if state.slot() > produce_at_slot { return Err(BlockProductionError::StateSlotTooHigh { @@ -5359,9 +5240,9 @@ impl BeaconChain { let (mut proposer_slashings, mut attester_slashings, mut voluntary_exits) = self.op_pool.get_slashings_and_exits(&state, &self.spec); - let eth1_data = eth1_chain.eth1_data_for_block_production(&state, &self.spec)?; + let eth1_data = state.eth1_data().clone(); - let deposits = eth1_chain.deposits_for_block_inclusion(&state, ð1_data, &self.spec)?; + let deposits = vec![]; let bls_to_execution_changes = self .op_pool @@ -7252,7 +7133,6 @@ impl Drop for BeaconChain { let drop = || -> Result<(), Error> { self.persist_fork_choice()?; self.persist_op_pool()?; - self.persist_eth1_cache()?; self.persist_custody_context() }; diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index a1b3abd610a..317ec02cc1e 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -53,7 +53,6 @@ use crate::blob_verification::GossipBlobError; use crate::block_verification_types::{AsBlock, BlockImportData, RpcBlock}; use crate::data_availability_checker::{AvailabilityCheckError, MaybeAvailableBlock}; use crate::data_column_verification::GossipDataColumnError; -use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::execution_payload::{ validate_execution_payload_for_gossip, validate_merge_block, AllowOptimisticImport, NotifyExecutionLayer, PayloadNotifier, @@ -1442,11 +1441,6 @@ impl ExecutionPendingBlock { .into()); } - let parent_eth1_finalization_data = Eth1FinalizationData { - eth1_data: state.eth1_data().clone(), - eth1_deposit_index: state.eth1_deposit_index(), - }; - // Transition the parent state to the block slot. // // It is important to note that we're using a "pre-state" here, one that has potentially @@ -1646,7 +1640,6 @@ impl ExecutionPendingBlock { block_root, state, parent_block: parent.beacon_block, - parent_eth1_finalization_data, consensus_context, }, payload_verification_handle, diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index f7002dcee1c..681e90aebc3 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -1,7 +1,6 @@ use crate::data_availability_checker::AvailabilityCheckError; pub use crate::data_availability_checker::{AvailableBlock, MaybeAvailableBlock}; use crate::data_column_verification::{CustodyDataColumn, CustodyDataColumnList}; -use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::{get_block_root, PayloadVerificationOutcome}; use derivative::Derivative; use state_processing::ConsensusContext; @@ -341,7 +340,6 @@ pub struct BlockImportData { pub block_root: Hash256, pub state: BeaconState, pub parent_block: SignedBeaconBlock>, - pub parent_eth1_finalization_data: Eth1FinalizationData, pub consensus_context: ConsensusContext, } @@ -355,10 +353,6 @@ impl BlockImportData { block_root, state, parent_block, - parent_eth1_finalization_data: Eth1FinalizationData { - eth1_data: <_>::default(), - eth1_deposit_index: 0, - }, consensus_context: ConsensusContext::new(Slot::new(0)), } } diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 5b5d876331f..ce4264d5508 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -1,10 +1,8 @@ use crate::beacon_chain::{ - CanonicalHead, LightClientProducerEvent, BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, OP_POOL_DB_KEY, + CanonicalHead, LightClientProducerEvent, BEACON_CHAIN_DB_KEY, OP_POOL_DB_KEY, }; use crate::beacon_proposer_cache::BeaconProposerCache; use crate::data_availability_checker::DataAvailabilityChecker; -use crate::eth1_chain::{CachingEth1Backend, SszEth1}; -use crate::eth1_finalization_cache::Eth1FinalizationCache; use crate::fork_choice_signal::ForkChoiceSignalTx; use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary}; use crate::graffiti_calculator::{GraffitiCalculator, GraffitiOrigin}; @@ -20,10 +18,8 @@ use crate::validator_pubkey_cache::ValidatorPubkeyCache; use crate::ChainConfig; use crate::CustodyContext; use crate::{ - BeaconChain, BeaconChainTypes, BeaconForkChoiceStore, BeaconSnapshot, Eth1Chain, - Eth1ChainBackend, ServerSentEventHandler, + BeaconChain, BeaconChainTypes, BeaconForkChoiceStore, BeaconSnapshot, ServerSentEventHandler, }; -use eth1::Config as Eth1Config; use execution_layer::ExecutionLayer; use fork_choice::{ForkChoice, ResetPayloadStatuses}; use futures::channel::mpsc::Sender; @@ -50,23 +46,21 @@ use types::{ /// An empty struct used to "witness" all the `BeaconChainTypes` traits. It has no user-facing /// functionality and only exists to satisfy the type system. -pub struct Witness( - PhantomData<(TSlotClock, TEth1Backend, E, THotStore, TColdStore)>, +pub struct Witness( + PhantomData<(TSlotClock, E, THotStore, TColdStore)>, ); -impl BeaconChainTypes - for Witness +impl BeaconChainTypes + for Witness where THotStore: ItemStore + 'static, TColdStore: ItemStore + 'static, TSlotClock: SlotClock + 'static, - TEth1Backend: Eth1ChainBackend + 'static, E: EthSpec + 'static, { type HotStore = THotStore; type ColdStore = TColdStore; type SlotClock = TSlotClock; - type Eth1Chain = TEth1Backend; type EthSpec = E; } @@ -90,7 +84,6 @@ pub struct BeaconChainBuilder { ForkChoice, T::EthSpec>, >, op_pool: Option>, - eth1_chain: Option>, execution_layer: Option>, event_handler: Option>, slot_clock: Option, @@ -111,13 +104,12 @@ pub struct BeaconChainBuilder { rng: Option>, } -impl - BeaconChainBuilder> +impl + BeaconChainBuilder> where THotStore: ItemStore + 'static, TColdStore: ItemStore + 'static, TSlotClock: SlotClock + 'static, - TEth1Backend: Eth1ChainBackend + 'static, E: EthSpec + 'static, { /// Returns a new builder. @@ -133,7 +125,6 @@ where genesis_state_root: None, fork_choice: None, op_pool: None, - eth1_chain: None, execution_layer: None, event_handler: None, slot_clock: None, @@ -226,18 +217,6 @@ where self } - /// Attempt to load an existing eth1 cache from the builder's `Store`. - pub fn get_persisted_eth1_backend(&self) -> Result, String> { - let store = self - .store - .clone() - .ok_or("get_persisted_eth1_backend requires a store.")?; - - store - .get_item::(Ð1_CACHE_DB_KEY) - .map_err(|e| format!("DB error whilst reading eth1 cache: {:?}", e)) - } - /// Returns true if `self.store` contains a persisted beacon chain. pub fn store_contains_beacon_chain(&self) -> Result { let store = self @@ -270,16 +249,15 @@ where .to_string() })?; - let fork_choice = - BeaconChain::>::load_fork_choice( - store.clone(), - ResetPayloadStatuses::always_reset_conditionally( - self.chain_config.always_reset_payload_statuses, - ), - &self.spec, - ) - .map_err(|e| format!("Unable to load fork choice from disk: {:?}", e))? - .ok_or("Fork choice not found in store")?; + let fork_choice = BeaconChain::>::load_fork_choice( + store.clone(), + ResetPayloadStatuses::always_reset_conditionally( + self.chain_config.always_reset_payload_statuses, + ), + &self.spec, + ) + .map_err(|e| format!("Unable to load fork choice from disk: {:?}", e))? + .ok_or("Fork choice not found in store")?; let genesis_block = store .get_blinded_block(&chain.genesis_block_root) @@ -653,12 +631,6 @@ where Ok(self.empty_op_pool()) } - /// Sets the `BeaconChain` eth1 backend. - pub fn eth1_backend(mut self, backend: Option) -> Self { - self.eth1_chain = backend.map(Eth1Chain::new); - self - } - /// Sets the `BeaconChain` execution layer. pub fn execution_layer(mut self, execution_layer: Option>) -> Self { self.execution_layer = execution_layer; @@ -749,8 +721,7 @@ where #[allow(clippy::type_complexity)] // I think there's nothing to be gained here from a type alias. pub fn build( mut self, - ) -> Result>, String> - { + ) -> Result>, String> { let slot_clock = self .slot_clock .ok_or("Cannot build without a slot_clock.")?; @@ -906,12 +877,12 @@ where // This *must* be stored before constructing the `BeaconChain`, so that its `Drop` instance // doesn't write a `PersistedBeaconChain` without the rest of the batch. self.pending_io_batch.push(BeaconChain::< - Witness, + Witness, >::persist_head_in_batch_standalone( genesis_block_root )); self.pending_io_batch.push(BeaconChain::< - Witness, + Witness, >::persist_fork_choice_in_batch_standalone( &fork_choice )); @@ -1003,7 +974,6 @@ where observed_proposer_slashings: <_>::default(), observed_attester_slashings: <_>::default(), observed_bls_to_execution_changes: <_>::default(), - eth1_chain: self.eth1_chain, execution_layer: self.execution_layer.clone(), genesis_validators_root, genesis_time, @@ -1017,7 +987,6 @@ where shuffling_cache_size, head_shuffling_ids, )), - eth1_finalization_cache: RwLock::new(Eth1FinalizationCache::default()), beacon_proposer_cache, block_times_cache: <_>::default(), pre_finalization_block_cache: <_>::default(), @@ -1122,35 +1091,11 @@ where } } -impl - BeaconChainBuilder, E, THotStore, TColdStore>> -where - THotStore: ItemStore + 'static, - TColdStore: ItemStore + 'static, - TSlotClock: SlotClock + 'static, - E: EthSpec + 'static, -{ - /// Do not use any eth1 backend. The client will not be able to produce beacon blocks. - pub fn no_eth1_backend(self) -> Self { - self.eth1_backend(None) - } - - /// Sets the `BeaconChain` eth1 back-end to produce predictably junk data when producing blocks. - pub fn dummy_eth1_backend(mut self) -> Result { - let backend = CachingEth1Backend::new(Eth1Config::default(), self.spec.clone())?; - - self.eth1_chain = Some(Eth1Chain::new_dummy(backend)); - - Ok(self) - } -} - -impl - BeaconChainBuilder> +impl + BeaconChainBuilder> where THotStore: ItemStore + 'static, TColdStore: ItemStore + 'static, - TEth1Backend: Eth1ChainBackend + 'static, E: EthSpec + 'static, { /// Sets the `BeaconChain` slot clock to `TestingSlotClock`. @@ -1298,8 +1243,6 @@ mod test { .task_executor(runtime.task_executor.clone()) .genesis_state(genesis_state) .expect("should build state using recent genesis") - .dummy_eth1_backend() - .expect("should build the dummy eth1 backend") .testing_slot_clock(Duration::from_secs(1)) .expect("should configure testing slot clock") .shutdown_sender(shutdown_tx) diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 36c4f2cdc1e..deaea3eb24b 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -699,7 +699,6 @@ mod test { block_verification::PayloadVerificationOutcome, block_verification_types::{AsBlock, BlockImportData}, data_availability_checker::STATE_LRU_CAPACITY, - eth1_finalization_cache::Eth1FinalizationData, test_utils::{BaseHarnessType, BeaconChainHarness, DiskHarnessType}, }; use fork_choice::PayloadVerificationStatus; @@ -809,11 +808,6 @@ mod test { .expect("should get block") .expect("should have block"); - let parent_eth1_finalization_data = Eth1FinalizationData { - eth1_data: parent_block.message().body().eth1_data().clone(), - eth1_deposit_index: 0, - }; - let (signed_beacon_block_hash, (block, maybe_blobs), state) = harness .add_block_at_slot(target_slot, parent_state) .await @@ -860,7 +854,6 @@ mod test { block_root, state, parent_block, - parent_eth1_finalization_data, consensus_context, }; @@ -1158,7 +1151,6 @@ mod test { mod pending_components_tests { use super::*; use crate::block_verification_types::BlockImportData; - use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::test_utils::{generate_rand_block_and_blobs, test_spec, NumBlobs}; use crate::PayloadVerificationOutcome; use fork_choice::PayloadVerificationStatus; @@ -1246,10 +1238,6 @@ mod pending_components_tests { block_root: Default::default(), state: BeaconState::new(0, Default::default(), &ChainSpec::minimal()), parent_block: dummy_parent, - parent_eth1_finalization_data: Eth1FinalizationData { - eth1_data: Default::default(), - eth1_deposit_index: 0, - }, consensus_context: ConsensusContext::new(Slot::new(0)), }, payload_verification_outcome: PayloadVerificationOutcome { diff --git a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs index f73857f4682..f16e1383839 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs @@ -2,7 +2,6 @@ use crate::block_verification_types::AsBlock; use crate::{ block_verification_types::BlockImportData, data_availability_checker::{AvailabilityCheckError, STATE_LRU_CAPACITY_NON_ZERO}, - eth1_finalization_cache::Eth1FinalizationData, AvailabilityPendingExecutedBlock, BeaconChainTypes, BeaconStore, PayloadVerificationOutcome, }; use lru::LruCache; @@ -21,7 +20,6 @@ pub struct DietAvailabilityPendingExecutedBlock { block: Arc>, state_root: Hash256, parent_block: SignedBeaconBlock>, - parent_eth1_finalization_data: Eth1FinalizationData, consensus_context: OnDiskConsensusContext, payload_verification_outcome: PayloadVerificationOutcome, } @@ -97,7 +95,6 @@ impl StateLRUCache { block: executed_block.block, state_root, parent_block: executed_block.import_data.parent_block, - parent_eth1_finalization_data: executed_block.import_data.parent_eth1_finalization_data, consensus_context: OnDiskConsensusContext::from_consensus_context( executed_block.import_data.consensus_context, ), @@ -125,7 +122,6 @@ impl StateLRUCache { block_root, state, parent_block: diet_executed_block.parent_block, - parent_eth1_finalization_data: diet_executed_block.parent_eth1_finalization_data, consensus_context: diet_executed_block .consensus_context .into_consensus_context(), @@ -212,7 +208,6 @@ impl From> block: value.block, state_root: value.import_data.state.canonical_root().unwrap(), parent_block: value.import_data.parent_block, - parent_eth1_finalization_data: value.import_data.parent_eth1_finalization_data, consensus_context: OnDiskConsensusContext::from_consensus_context( value.import_data.consensus_context, ), diff --git a/beacon_node/beacon_chain/src/errors.rs b/beacon_node/beacon_chain/src/errors.rs index 2e6de463ccf..b6db3fa84f2 100644 --- a/beacon_node/beacon_chain/src/errors.rs +++ b/beacon_node/beacon_chain/src/errors.rs @@ -3,7 +3,6 @@ use crate::beacon_block_streamer::Error as BlockStreamerError; use crate::beacon_chain::ForkChoiceError; use crate::beacon_fork_choice_store::Error as ForkChoiceStoreError; use crate::data_availability_checker::AvailabilityCheckError; -use crate::eth1_chain::Error as Eth1ChainError; use crate::migrate::PruningError; use crate::naive_aggregation_pool::Error as NaiveAggregationError; use crate::observed_aggregates::Error as ObservedAttestationsError; @@ -271,7 +270,6 @@ pub enum BlockProductionError { BlockProcessingError(BlockProcessingError), EpochCacheError(EpochCacheError), ForkChoiceError(ForkChoiceError), - Eth1ChainError(Eth1ChainError), BeaconStateError(BeaconStateError), StateAdvanceError(StateAdvanceError), OpPoolError(OpPoolError), @@ -307,7 +305,6 @@ pub enum BlockProductionError { easy_from_to!(BlockProcessingError, BlockProductionError); easy_from_to!(BeaconStateError, BlockProductionError); easy_from_to!(SlotProcessingError, BlockProductionError); -easy_from_to!(Eth1ChainError, BlockProductionError); easy_from_to!(StateAdvanceError, BlockProductionError); easy_from_to!(ForkChoiceError, BlockProductionError); easy_from_to!(EpochCacheError, BlockProductionError); diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs deleted file mode 100644 index 8a79bff4c7a..00000000000 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ /dev/null @@ -1,1208 +0,0 @@ -use crate::metrics; -use eth1::{Config as Eth1Config, Eth1Block, Service as HttpService}; -use eth2::lighthouse::Eth1SyncStatusData; -use ethereum_hashing::hash; -use int_to_bytes::int_to_bytes32; -use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; -use state_processing::per_block_processing::get_new_eth1_data; -use std::cmp::Ordering; -use std::collections::HashMap; -use std::marker::PhantomData; -use std::sync::Arc; -use std::time::{SystemTime, UNIX_EPOCH}; -use store::{DBColumn, Error as StoreError, StoreItem}; -use task_executor::TaskExecutor; -use tracing::{debug, error, trace}; -use types::{ - BeaconState, BeaconStateError, ChainSpec, Deposit, Eth1Data, EthSpec, Hash256, Slot, Unsigned, -}; - -type BlockNumber = u64; -type Eth1DataVoteCount = HashMap<(Eth1Data, BlockNumber), u64>; - -/// We will declare ourself synced with the Eth1 chain, even if we are this many blocks behind. -/// -/// This number (8) was chosen somewhat arbitrarily. -const ETH1_SYNC_TOLERANCE: u64 = 8; - -#[derive(Debug)] -pub enum Error { - /// Unable to return an Eth1Data for the given epoch. - EpochUnavailable, - /// An error from the backend service (e.g., the web3 data fetcher). - BackendError(String), - /// The deposit index of the state is higher than the deposit contract. This is a critical - /// consensus error. - DepositIndexTooHigh, - /// The current state was unable to return the root for the state at the start of the eth1 - /// voting period. - UnableToGetPreviousStateRoot(BeaconStateError), - /// The state required to find the previous eth1 block was not found in the store. - PreviousStateNotInDB(Hash256), - /// There was an error accessing an object in the database. - StoreError(StoreError), - /// The eth1 head block at the start of the eth1 voting period is unknown. - /// - /// The eth1 caches are likely stale. - UnknownVotingPeriodHead, - /// The block that was previously voted into the state is unknown. - /// - /// The eth1 caches are stale, or a junk value was voted into the chain. - UnknownPreviousEth1BlockHash, - /// An arithmetic error occurred. - ArithError(safe_arith::ArithError), -} - -impl From for Error { - fn from(e: safe_arith::ArithError) -> Self { - Self::ArithError(e) - } -} - -/// Returns an `Eth1SyncStatusData` given some parameters: -/// -/// - `latest_cached_block`: The latest eth1 block in our cache, if any. -/// - `head_block`: The block at the very head of our eth1 node (ignoring follow distance, etc). -/// - `genesis_time`: beacon chain genesis time. -/// - `current_slot`: current beacon chain slot. -/// - `spec`: current beacon chain specification. -fn get_sync_status( - latest_cached_block: Option<&Eth1Block>, - head_block: Option<&Eth1Block>, - genesis_time: u64, - current_slot: Option, - spec: &ChainSpec, -) -> Option { - let eth1_follow_distance_seconds = spec - .seconds_per_eth1_block - .saturating_mul(spec.eth1_follow_distance); - - // The voting target timestamp needs to be special-cased when we're before - // genesis (as defined by `current_slot == None`). - // - // For the sake of this status, when prior to genesis we want to invent some voting periods - // that are *before* genesis, so that we can indicate to users that we're actually adequately - // cached for where they are in time. - let voting_target_timestamp = if let Some(current_slot) = current_slot { - let period = E::SlotsPerEth1VotingPeriod::to_u64(); - let voting_period_start_slot = (current_slot / period) * period; - - let period_start = slot_start_seconds( - genesis_time, - spec.seconds_per_slot, - voting_period_start_slot, - ); - - period_start.saturating_sub(eth1_follow_distance_seconds) - } else { - // The number of seconds in an eth1 voting period. - let voting_period_duration = - E::slots_per_eth1_voting_period() as u64 * spec.seconds_per_slot; - - let now = SystemTime::now().duration_since(UNIX_EPOCH).ok()?.as_secs(); - - // The number of seconds between now and genesis. - let seconds_till_genesis = genesis_time.saturating_sub(now); - - // Determine how many voting periods are contained in distance between - // now and genesis, rounding up. - let voting_periods_past = seconds_till_genesis.div_ceil(voting_period_duration); - - // Return the start time of the current voting period*. - // - // *: This voting period doesn't *actually* exist, we're just using it to - // give useful logs prior to genesis. - genesis_time - .saturating_sub(voting_periods_past * voting_period_duration) - .saturating_sub(eth1_follow_distance_seconds) - }; - - let latest_cached_block_number = latest_cached_block.map(|b| b.number); - let latest_cached_block_timestamp = latest_cached_block.map(|b| b.timestamp); - let head_block_number = head_block.map(|b| b.number); - let head_block_timestamp = head_block.map(|b| b.timestamp); - - let eth1_node_sync_status_percentage = if let Some(head_block) = head_block { - let now = SystemTime::now().duration_since(UNIX_EPOCH).ok()?.as_secs(); - let head_age = now.saturating_sub(head_block.timestamp); - - if head_age < ETH1_SYNC_TOLERANCE * spec.seconds_per_eth1_block { - // Always indicate we are fully synced if it's within the sync threshold. - 100.0 - } else { - let blocks_behind = head_age - .checked_div(spec.seconds_per_eth1_block) - .unwrap_or(0); - - let part = f64::from(head_block.number as u32); - let whole = f64::from(head_block.number.saturating_add(blocks_behind) as u32); - - if whole > 0.0 { - (part / whole) * 100.0 - } else { - // Avoids a divide-by-zero. - 0.0 - } - } - } else { - // Always return 0% synced if the head block of the eth1 chain is unknown. - 0.0 - }; - - // Lighthouse is "cached and ready" when it has cached enough blocks to cover the start of the - // current voting period. - let lighthouse_is_cached_and_ready = - latest_cached_block_timestamp.is_some_and(|t| t >= voting_target_timestamp); - - Some(Eth1SyncStatusData { - head_block_number, - head_block_timestamp, - latest_cached_block_number, - latest_cached_block_timestamp, - voting_target_timestamp, - eth1_node_sync_status_percentage, - lighthouse_is_cached_and_ready, - }) -} - -#[derive(Encode, Decode, Clone)] -pub struct SszEth1 { - pub use_dummy_backend: bool, - pub backend_bytes: Vec, -} - -impl StoreItem for SszEth1 { - fn db_column() -> DBColumn { - DBColumn::Eth1Cache - } - - fn as_store_bytes(&self) -> Vec { - self.as_ssz_bytes() - } - - fn from_store_bytes(bytes: &[u8]) -> Result { - Self::from_ssz_bytes(bytes).map_err(Into::into) - } -} - -/// Holds an `Eth1ChainBackend` and serves requests from the `BeaconChain`. -pub struct Eth1Chain -where - T: Eth1ChainBackend, - E: EthSpec, -{ - backend: T, - /// When `true`, the backend will be ignored and dummy data from the 2019 Canada interop method - /// will be used instead. - use_dummy_backend: bool, - _phantom: PhantomData, -} - -impl Eth1Chain -where - T: Eth1ChainBackend, - E: EthSpec, -{ - pub fn new(backend: T) -> Self { - Self { - backend, - use_dummy_backend: false, - _phantom: PhantomData, - } - } - - pub fn new_dummy(backend: T) -> Self { - Self { - use_dummy_backend: true, - ..Self::new(backend) - } - } - - /// Returns `true` if the "dummy" backend is being used. - pub fn is_dummy_backend(&self) -> bool { - self.use_dummy_backend - } - - /// Returns the `Eth1Data` that should be included in a block being produced for the given - /// `state`. - pub fn eth1_data_for_block_production( - &self, - state: &BeaconState, - spec: &ChainSpec, - ) -> Result { - if self.use_dummy_backend { - let dummy_backend: DummyEth1ChainBackend = DummyEth1ChainBackend::default(); - dummy_backend.eth1_data(state, spec) - } else { - self.backend.eth1_data(state, spec) - } - } - - /// Returns a list of `Deposits` that may be included in a block. - /// - /// Including all of the returned `Deposits` in a block should _not_ cause it to become - /// invalid (i.e., this function should respect the maximum). - /// - /// `eth1_data_vote` is the `Eth1Data` that the block producer would include in their - /// block. This vote may change the `state.eth1_data` value, which would change the deposit - /// count and therefore change the output of this function. - pub fn deposits_for_block_inclusion( - &self, - state: &BeaconState, - eth1_data_vote: &Eth1Data, - spec: &ChainSpec, - ) -> Result, Error> { - if self.use_dummy_backend { - let dummy_backend: DummyEth1ChainBackend = DummyEth1ChainBackend::default(); - dummy_backend.queued_deposits(state, eth1_data_vote, spec) - } else { - self.backend.queued_deposits(state, eth1_data_vote, spec) - } - } - - /// Returns a status indicating how synced our caches are with the eth1 chain. - pub fn sync_status( - &self, - genesis_time: u64, - current_slot: Option, - spec: &ChainSpec, - ) -> Option { - get_sync_status::( - self.backend.latest_cached_block().as_ref(), - self.backend.head_block().as_ref(), - genesis_time, - current_slot, - spec, - ) - } - - /// Instantiate `Eth1Chain` from a persisted `SszEth1`. - /// - /// The `Eth1Chain` will have the same caches as the persisted `SszEth1`. - pub fn from_ssz_container( - ssz_container: &SszEth1, - config: Eth1Config, - spec: Arc, - ) -> Result { - let backend = Eth1ChainBackend::from_bytes(&ssz_container.backend_bytes, config, spec)?; - Ok(Self { - use_dummy_backend: ssz_container.use_dummy_backend, - backend, - _phantom: PhantomData, - }) - } - - /// Return a `SszEth1` containing the state of `Eth1Chain`. - pub fn as_ssz_container(&self) -> SszEth1 { - SszEth1 { - use_dummy_backend: self.use_dummy_backend, - backend_bytes: self.backend.as_bytes(), - } - } - - /// Set in motion the finalization of `Eth1Data`. This method is called during block import - /// so it should be fast. - pub fn finalize_eth1_data(&self, eth1_data: Eth1Data) { - self.backend.finalize_eth1_data(eth1_data); - } - - /// Consumes `self`, returning the backend. - pub fn into_backend(self) -> T { - self.backend - } -} - -pub trait Eth1ChainBackend: Sized + Send + Sync { - /// Returns the `Eth1Data` that should be included in a block being produced for the given - /// `state`. - fn eth1_data(&self, beacon_state: &BeaconState, spec: &ChainSpec) - -> Result; - - /// Returns all `Deposits` between `state.eth1_deposit_index` and - /// `state.eth1_data.deposit_count`. - /// - /// # Note: - /// - /// It is possible that not all returned `Deposits` can be included in a block. E.g., there may - /// be more than `MAX_DEPOSIT_COUNT` or the churn may be too high. - fn queued_deposits( - &self, - beacon_state: &BeaconState, - eth1_data_vote: &Eth1Data, - spec: &ChainSpec, - ) -> Result, Error>; - - /// Returns the latest block stored in the cache. Used to obtain an idea of how up-to-date the - /// beacon node eth1 cache is. - fn latest_cached_block(&self) -> Option; - - /// Set in motion the finalization of `Eth1Data`. This method is called during block import - /// so it should be fast. - fn finalize_eth1_data(&self, eth1_data: Eth1Data); - - /// Returns the block at the head of the chain (ignoring follow distance, etc). Used to obtain - /// an idea of how up-to-date the remote eth1 node is. - fn head_block(&self) -> Option; - - /// Encode the `Eth1ChainBackend` instance to bytes. - fn as_bytes(&self) -> Vec; - - /// Create a `Eth1ChainBackend` instance given encoded bytes. - fn from_bytes(bytes: &[u8], config: Eth1Config, spec: Arc) -> Result; -} - -/// Provides a simple, testing-only backend that generates deterministic, meaningless eth1 data. -/// -/// Never creates deposits, therefore the validator set is static. -/// -/// This was used in the 2019 Canada interop workshops. -pub struct DummyEth1ChainBackend(PhantomData); - -impl Eth1ChainBackend for DummyEth1ChainBackend { - /// Produce some deterministic junk based upon the current epoch. - fn eth1_data(&self, state: &BeaconState, _spec: &ChainSpec) -> Result { - // [New in Electra:EIP6110] - if let Ok(deposit_requests_start_index) = state.deposit_requests_start_index() { - if state.eth1_deposit_index() == deposit_requests_start_index { - return Ok(state.eth1_data().clone()); - } - } - let current_epoch = state.current_epoch(); - let slots_per_voting_period = E::slots_per_eth1_voting_period() as u64; - let current_voting_period: u64 = current_epoch.as_u64() / slots_per_voting_period; - - let deposit_root = hash(&int_to_bytes32(current_voting_period)); - let block_hash = hash(&deposit_root); - - Ok(Eth1Data { - deposit_root: Hash256::from_slice(&deposit_root), - deposit_count: state.eth1_deposit_index(), - block_hash: Hash256::from_slice(&block_hash), - }) - } - - /// The dummy back-end never produces deposits. - fn queued_deposits( - &self, - _: &BeaconState, - _: &Eth1Data, - _: &ChainSpec, - ) -> Result, Error> { - Ok(vec![]) - } - - fn latest_cached_block(&self) -> Option { - None - } - - fn finalize_eth1_data(&self, _eth1_data: Eth1Data) {} - - fn head_block(&self) -> Option { - None - } - - /// Return empty Vec for dummy backend. - fn as_bytes(&self) -> Vec { - Vec::new() - } - - /// Create dummy eth1 backend. - fn from_bytes( - _bytes: &[u8], - _config: Eth1Config, - _spec: Arc, - ) -> Result { - Ok(Self(PhantomData)) - } -} - -impl Default for DummyEth1ChainBackend { - fn default() -> Self { - Self(PhantomData) - } -} - -/// Maintains a cache of eth1 blocks and deposits and provides functions to allow block producers -/// to include new deposits and vote on `Eth1Data`. -/// -/// The `core` connects to some external eth1 client (e.g., Parity/Geth) and polls it for -/// information. -#[derive(Clone)] -pub struct CachingEth1Backend { - pub core: HttpService, - _phantom: PhantomData, -} - -impl CachingEth1Backend { - /// Instantiates `self` with empty caches. - /// - /// Does not connect to the eth1 node or start any tasks to keep the cache updated. - pub fn new(config: Eth1Config, spec: Arc) -> Result { - Ok(Self { - core: HttpService::new(config, spec) - .map_err(|e| format!("Failed to create eth1 http service: {:?}", e))?, - _phantom: PhantomData, - }) - } - - /// Starts the routine which connects to the external eth1 node and updates the caches. - pub fn start(&self, handle: TaskExecutor) { - HttpService::auto_update(self.core.clone(), handle); - } - - /// Instantiates `self` from an existing service. - pub fn from_service(service: HttpService) -> Self { - Self { - core: service, - _phantom: PhantomData, - } - } -} - -impl Eth1ChainBackend for CachingEth1Backend { - fn eth1_data(&self, state: &BeaconState, spec: &ChainSpec) -> Result { - // [New in Electra:EIP6110] - if let Ok(deposit_requests_start_index) = state.deposit_requests_start_index() { - if state.eth1_deposit_index() == deposit_requests_start_index { - return Ok(state.eth1_data().clone()); - } - } - let period = E::SlotsPerEth1VotingPeriod::to_u64(); - let voting_period_start_slot = (state.slot() / period) * period; - let voting_period_start_seconds = slot_start_seconds( - state.genesis_time(), - spec.seconds_per_slot, - voting_period_start_slot, - ); - - let votes_to_consider = { - let blocks = self.core.blocks().read(); - get_votes_to_consider(blocks.iter(), voting_period_start_seconds, spec) - }; - - trace!( - votes_to_consider = votes_to_consider.len(), - "Found eth1 data votes_to_consider" - ); - let valid_votes = collect_valid_votes(state, &votes_to_consider); - - let eth1_data = if let Some(eth1_data) = find_winning_vote(valid_votes) { - eth1_data - } else { - // In this case, there are no valid votes available. - // - // Here we choose the eth1_data corresponding to the latest block in our voting window. - // If no votes exist, choose `state.eth1_data` as default vote. - votes_to_consider - .iter() - .max_by_key(|(_, block_number)| *block_number) - .map(|vote| { - let vote = vote.0.clone(); - debug!( - outcome = "Casting vote corresponding to last candidate eth1 block", - ?vote, - "No valid eth1_data votes" - ); - vote - }) - .unwrap_or_else(|| { - let vote = state.eth1_data().clone(); - error!( - lowest_block_number = self.core.lowest_block_number(), - earliest_block_timestamp = self.core.earliest_block_timestamp(), - genesis_time = state.genesis_time(), - outcome = "casting `state.eth1_data` as eth1 vote", - "No valid eth1_data votes, `votes_to_consider` empty" - ); - metrics::inc_counter(&metrics::DEFAULT_ETH1_VOTES); - vote - }) - }; - - debug!( - deposit_root = ?eth1_data.deposit_root, - deposit_count = eth1_data.deposit_count, - block_hash = ?eth1_data.block_hash, - "Produced vote for eth1 chain" - ); - - Ok(eth1_data) - } - - fn queued_deposits( - &self, - state: &BeaconState, - eth1_data_vote: &Eth1Data, - _spec: &ChainSpec, - ) -> Result, Error> { - let deposit_index = state.eth1_deposit_index(); - let deposit_count = if let Some(new_eth1_data) = get_new_eth1_data(state, eth1_data_vote)? { - new_eth1_data.deposit_count - } else { - state.eth1_data().deposit_count - }; - - // [New in Electra:EIP6110] - let deposit_index_limit = - if let Ok(deposit_requests_start_index) = state.deposit_requests_start_index() { - std::cmp::min(deposit_count, deposit_requests_start_index) - } else { - deposit_count - }; - - match deposit_index.cmp(&deposit_index_limit) { - Ordering::Greater => Err(Error::DepositIndexTooHigh), - Ordering::Equal => Ok(vec![]), - Ordering::Less => { - let next = deposit_index; - let last = std::cmp::min(deposit_index_limit, next + E::MaxDeposits::to_u64()); - - self.core - .deposits() - .read() - .cache - .get_deposits(next, last, deposit_count) - .map_err(|e| Error::BackendError(format!("Failed to get deposits: {:?}", e))) - .map(|(_deposit_root, deposits)| deposits) - } - } - } - - fn latest_cached_block(&self) -> Option { - self.core.latest_cached_block() - } - - /// This only writes the eth1_data to a temporary cache so that the service - /// thread can later do the actual finalizing of the deposit tree. - fn finalize_eth1_data(&self, eth1_data: Eth1Data) { - self.core.set_to_finalize(Some(eth1_data)); - } - - fn head_block(&self) -> Option { - self.core.head_block() - } - - /// Return encoded byte representation of the block and deposit caches. - fn as_bytes(&self) -> Vec { - self.core.as_bytes() - } - - /// Recover the cached backend from encoded bytes. - fn from_bytes(bytes: &[u8], config: Eth1Config, spec: Arc) -> Result { - let inner = HttpService::from_bytes(bytes, config, spec)?; - Ok(Self { - core: inner, - _phantom: PhantomData, - }) - } -} - -/// Get all votes from eth1 blocks which are in the list of candidate blocks for the -/// current eth1 voting period. -/// -/// Returns a hashmap of `Eth1Data` to its associated eth1 `block_number`. -fn get_votes_to_consider<'a, I>( - blocks: I, - voting_period_start_seconds: u64, - spec: &ChainSpec, -) -> HashMap -where - I: DoubleEndedIterator + Clone, -{ - blocks - .rev() - .skip_while(|eth1_block| !is_candidate_block(eth1_block, voting_period_start_seconds, spec)) - .take_while(|eth1_block| is_candidate_block(eth1_block, voting_period_start_seconds, spec)) - .filter_map(|eth1_block| { - eth1_block - .clone() - .eth1_data() - .map(|eth1_data| (eth1_data, eth1_block.number)) - }) - .collect() -} - -/// Collect all valid votes that are cast during the current voting period. -/// Return hashmap with count of each vote cast. -fn collect_valid_votes( - state: &BeaconState, - votes_to_consider: &HashMap, -) -> Eth1DataVoteCount { - let mut valid_votes = HashMap::new(); - state - .eth1_data_votes() - .iter() - .filter_map(|vote| { - votes_to_consider - .get(vote) - .map(|block_num| (vote.clone(), *block_num)) - }) - .for_each(|(eth1_data, block_number)| { - valid_votes - .entry((eth1_data, block_number)) - .and_modify(|count| *count += 1) - .or_insert(1_u64); - }); - valid_votes -} - -/// Selects the winning vote from `valid_votes`. -fn find_winning_vote(valid_votes: Eth1DataVoteCount) -> Option { - valid_votes - .iter() - .max_by_key(|((_eth1_data, block_number), vote_count)| (*vote_count, block_number)) - .map(|((eth1_data, _), _)| eth1_data.clone()) -} - -/// Returns the unix-epoch seconds at the start of the given `slot`. -fn slot_start_seconds(genesis_unix_seconds: u64, seconds_per_slot: u64, slot: Slot) -> u64 { - genesis_unix_seconds + slot.as_u64() * seconds_per_slot -} - -/// Returns a boolean denoting if a given `Eth1Block` is a candidate for `Eth1Data` calculation -/// at the timestamp `period_start`. -/// -/// Note: `period_start` needs to be atleast (`spec.seconds_per_eth1_block * spec.eth1_follow_distance * 2`) -/// for this function to return meaningful values. -fn is_candidate_block(block: &Eth1Block, period_start: u64, spec: &ChainSpec) -> bool { - block.timestamp - <= period_start.saturating_sub(spec.seconds_per_eth1_block * spec.eth1_follow_distance) - && block.timestamp - >= period_start - .saturating_sub(spec.seconds_per_eth1_block * spec.eth1_follow_distance * 2) -} - -#[cfg(test)] -mod test { - use super::*; - use types::{DepositData, FixedBytesExtended, MinimalEthSpec, Signature}; - - type E = MinimalEthSpec; - - fn get_eth1_data(i: u64) -> Eth1Data { - Eth1Data { - block_hash: Hash256::from_low_u64_be(i), - deposit_root: Hash256::from_low_u64_be(u64::MAX - i), - deposit_count: i, - } - } - - fn get_voting_period_start_seconds(state: &BeaconState, spec: &ChainSpec) -> u64 { - let period = ::SlotsPerEth1VotingPeriod::to_u64(); - let voting_period_start_slot = (state.slot() / period) * period; - slot_start_seconds( - state.genesis_time(), - spec.seconds_per_slot, - voting_period_start_slot, - ) - } - - #[test] - fn slot_start_time() { - let zero_sec = 0; - assert_eq!(slot_start_seconds(100, zero_sec, Slot::new(2)), 100); - - let one_sec = 1; - assert_eq!(slot_start_seconds(100, one_sec, Slot::new(0)), 100); - assert_eq!(slot_start_seconds(100, one_sec, Slot::new(1)), 101); - assert_eq!(slot_start_seconds(100, one_sec, Slot::new(2)), 102); - - let three_sec = 3; - assert_eq!(slot_start_seconds(100, three_sec, Slot::new(0)), 100); - assert_eq!(slot_start_seconds(100, three_sec, Slot::new(1)), 103); - assert_eq!(slot_start_seconds(100, three_sec, Slot::new(2)), 106); - - let five_sec = 5; - assert_eq!(slot_start_seconds(100, five_sec, Slot::new(0)), 100); - assert_eq!(slot_start_seconds(100, five_sec, Slot::new(1)), 105); - assert_eq!(slot_start_seconds(100, five_sec, Slot::new(2)), 110); - assert_eq!(slot_start_seconds(100, five_sec, Slot::new(3)), 115); - } - - fn get_eth1_block(timestamp: u64, number: u64) -> Eth1Block { - Eth1Block { - number, - timestamp, - hash: Hash256::from_low_u64_be(number), - deposit_root: Some(Hash256::from_low_u64_be(number)), - deposit_count: Some(number), - } - } - - mod eth1_chain_json_backend { - use super::*; - use eth1::DepositLog; - use logging::create_test_tracing_subscriber; - use types::{test_utils::generate_deterministic_keypair, MainnetEthSpec}; - - fn get_eth1_chain() -> Eth1Chain, E> { - create_test_tracing_subscriber(); - - let eth1_config = Eth1Config { - ..Eth1Config::default() - }; - - Eth1Chain::new( - CachingEth1Backend::new(eth1_config, Arc::new(MainnetEthSpec::default_spec())) - .unwrap(), - ) - } - - fn get_deposit_log(i: u64, spec: &ChainSpec) -> DepositLog { - let keypair = generate_deterministic_keypair(i as usize); - let mut deposit = DepositData { - pubkey: keypair.pk.into(), - withdrawal_credentials: Hash256::zero(), - amount: spec.max_effective_balance, - signature: Signature::empty().into(), - }; - - deposit.signature = deposit.create_signature(&keypair.sk, &E::default_spec()); - - DepositLog { - deposit_data: deposit, - block_number: i, - index: i, - signature_is_valid: true, - } - } - - #[test] - fn deposits_empty_cache() { - let spec = &E::default_spec(); - - let eth1_chain = get_eth1_chain(); - - assert!( - !eth1_chain.use_dummy_backend, - "test should not use dummy backend" - ); - - let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); - *state.eth1_deposit_index_mut() = 0; - state.eth1_data_mut().deposit_count = 0; - - assert!( - eth1_chain - .deposits_for_block_inclusion(&state, &Eth1Data::default(), spec) - .is_ok(), - "should succeed if cache is empty but no deposits are required" - ); - - state.eth1_data_mut().deposit_count = 1; - - assert!( - eth1_chain - .deposits_for_block_inclusion(&state, &Eth1Data::default(), spec) - .is_err(), - "should fail to get deposits if required, but cache is empty" - ); - } - - #[test] - fn deposits_with_cache() { - let spec = &E::default_spec(); - - let eth1_chain = get_eth1_chain(); - let max_deposits = ::MaxDeposits::to_u64(); - - assert!( - !eth1_chain.use_dummy_backend, - "test should not use dummy backend" - ); - - let deposits: Vec<_> = (0..max_deposits + 2) - .map(|i| get_deposit_log(i, spec)) - .inspect(|log| { - eth1_chain - .backend - .core - .deposits() - .write() - .cache - .insert_log(log.clone()) - .expect("should insert log"); - }) - .collect(); - - assert_eq!( - eth1_chain.backend.core.deposits().write().cache.len(), - deposits.len(), - "cache should store all logs" - ); - - let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); - *state.eth1_deposit_index_mut() = 0; - state.eth1_data_mut().deposit_count = 0; - - assert!( - eth1_chain - .deposits_for_block_inclusion(&state, &Eth1Data::default(), spec) - .is_ok(), - "should succeed if no deposits are required" - ); - - (0..3).for_each(|initial_deposit_index| { - *state.eth1_deposit_index_mut() = initial_deposit_index as u64; - - (initial_deposit_index..deposits.len()).for_each(|i| { - state.eth1_data_mut().deposit_count = i as u64; - - let deposits_for_inclusion = eth1_chain - .deposits_for_block_inclusion(&state, &Eth1Data::default(), spec) - .unwrap_or_else(|_| panic!("should find deposit for {}", i)); - - let expected_len = - std::cmp::min(i - initial_deposit_index, max_deposits as usize); - - assert_eq!( - deposits_for_inclusion.len(), - expected_len, - "should find {} deposits", - expected_len - ); - - let deposit_data_for_inclusion: Vec<_> = deposits_for_inclusion - .into_iter() - .map(|deposit| deposit.data) - .collect(); - - let expected_deposit_data: Vec<_> = deposits[initial_deposit_index - ..std::cmp::min(initial_deposit_index + expected_len, deposits.len())] - .iter() - .map(|log| log.deposit_data.clone()) - .collect(); - - assert_eq!( - deposit_data_for_inclusion, expected_deposit_data, - "should find the correct deposits for {}", - i - ); - }); - }) - } - - #[test] - fn eth1_data_empty_cache() { - let spec = &E::default_spec(); - - let eth1_chain = get_eth1_chain(); - - assert!( - !eth1_chain.use_dummy_backend, - "test should not use dummy backend" - ); - - let state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); - - let a = eth1_chain - .eth1_data_for_block_production(&state, spec) - .expect("should produce default eth1 data vote"); - assert_eq!( - a, - *state.eth1_data(), - "default vote should be same as state.eth1_data" - ); - } - - #[test] - fn default_vote() { - let spec = &E::default_spec(); - let slots_per_eth1_voting_period = ::SlotsPerEth1VotingPeriod::to_u64(); - let eth1_follow_distance = spec.eth1_follow_distance; - - let eth1_chain = get_eth1_chain(); - - assert!( - !eth1_chain.use_dummy_backend, - "test should not use dummy backend" - ); - - let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); - - *state.slot_mut() = Slot::from(slots_per_eth1_voting_period * 10); - let follow_distance_seconds = eth1_follow_distance * spec.seconds_per_eth1_block; - let voting_period_start = get_voting_period_start_seconds(&state, spec); - let start_eth1_block = voting_period_start - follow_distance_seconds * 2; - let end_eth1_block = voting_period_start - follow_distance_seconds; - - // Populate blocks cache with candidate eth1 blocks - let blocks = (start_eth1_block..end_eth1_block) - .map(|i| get_eth1_block(i, i)) - .collect::>(); - - blocks.iter().for_each(|block| { - eth1_chain - .backend - .core - .blocks() - .write() - .insert_root_or_child(block.clone()) - .expect("should add blocks to cache"); - }); - - let vote = eth1_chain - .eth1_data_for_block_production(&state, spec) - .expect("should produce default eth1 data vote"); - - assert_eq!( - vote, - blocks - .last() - .expect("should have blocks") - .clone() - .eth1_data() - .expect("should have valid eth1 data"), - "default vote must correspond to last block in candidate blocks" - ); - } - } - - mod eth1_data_sets { - use super::*; - - #[test] - fn empty_cache() { - let spec = &E::default_spec(); - let state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); - - let blocks = []; - - assert_eq!( - get_votes_to_consider( - blocks.iter(), - get_voting_period_start_seconds(&state, spec), - spec, - ), - HashMap::new() - ); - } - - #[test] - fn ideal_scenario() { - let spec = E::default_spec(); - - let slots_per_eth1_voting_period = ::SlotsPerEth1VotingPeriod::to_u64(); - let eth1_follow_distance = spec.eth1_follow_distance; - - let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), &spec); - *state.genesis_time_mut() = 0; - *state.slot_mut() = Slot::from(slots_per_eth1_voting_period * 10); - - let follow_distance_seconds = eth1_follow_distance * spec.seconds_per_eth1_block; - let voting_period_start = get_voting_period_start_seconds(&state, &spec); - let start_eth1_block = voting_period_start - follow_distance_seconds * 2; - let end_eth1_block = voting_period_start - follow_distance_seconds; - let blocks = (start_eth1_block..end_eth1_block) - .map(|i| get_eth1_block(i, i)) - .collect::>(); - - let votes_to_consider = - get_votes_to_consider(blocks.iter(), voting_period_start, &spec); - assert_eq!( - votes_to_consider.len() as u64, - end_eth1_block - start_eth1_block, - "all produced eth1 blocks should be in votes to consider" - ); - - (start_eth1_block..end_eth1_block) - .map(|i| get_eth1_block(i, i)) - .for_each(|eth1_block| { - assert_eq!( - eth1_block.number, - *votes_to_consider - .get(ð1_block.clone().eth1_data().unwrap()) - .expect("votes_to_consider should have expected block numbers") - ) - }); - } - } - - mod collect_valid_votes { - use super::*; - use types::List; - - fn get_eth1_data_vec(n: u64, block_number_offset: u64) -> Vec<(Eth1Data, BlockNumber)> { - (0..n) - .map(|i| (get_eth1_data(i), i + block_number_offset)) - .collect() - } - - macro_rules! assert_votes { - ($votes: expr, $expected: expr, $text: expr) => { - let expected: Vec<(Eth1Data, BlockNumber)> = $expected; - assert_eq!( - $votes.len(), - expected.len(), - "map should have the same number of elements" - ); - expected.iter().for_each(|(eth1_data, block_number)| { - $votes - .get(&(eth1_data.clone(), *block_number)) - .expect("should contain eth1 data"); - }) - }; - } - - #[test] - fn no_votes_in_state() { - let slots = ::SlotsPerEth1VotingPeriod::to_u64(); - let spec = &E::default_spec(); - let state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); - - let votes_to_consider = get_eth1_data_vec(slots, 0); - - let votes = collect_valid_votes(&state, &votes_to_consider.into_iter().collect()); - assert_eq!( - votes.len(), - 0, - "should not find any votes when state has no votes" - ); - } - - #[test] - fn distinct_votes_in_state() { - let slots = ::SlotsPerEth1VotingPeriod::to_u64(); - let spec = &E::default_spec(); - let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); - - let votes_to_consider = get_eth1_data_vec(slots, 0); - - *state.eth1_data_votes_mut() = List::new( - votes_to_consider[0..slots as usize / 4] - .iter() - .map(|(eth1_data, _)| eth1_data) - .cloned() - .collect::>(), - ) - .unwrap(); - - let votes = - collect_valid_votes(&state, &votes_to_consider.clone().into_iter().collect()); - assert_votes!( - votes, - votes_to_consider[0..slots as usize / 4].to_vec(), - "should find as many votes as were in the state" - ); - } - - #[test] - fn duplicate_votes_in_state() { - let slots = ::SlotsPerEth1VotingPeriod::to_u64(); - let spec = &E::default_spec(); - let mut state: BeaconState = BeaconState::new(0, get_eth1_data(0), spec); - - let votes_to_consider = get_eth1_data_vec(slots, 0); - - let duplicate_eth1_data = votes_to_consider - .last() - .expect("should have some eth1 data") - .clone(); - - *state.eth1_data_votes_mut() = List::new( - vec![duplicate_eth1_data.clone(); 4] - .iter() - .map(|(eth1_data, _)| eth1_data) - .cloned() - .collect::>(), - ) - .unwrap(); - - let votes = collect_valid_votes(&state, &votes_to_consider.into_iter().collect()); - assert_votes!( - votes, - // There should only be one value if there's a duplicate - vec![duplicate_eth1_data.clone()], - "should find as many votes as were in the state" - ); - - assert_eq!( - *votes - .get(&duplicate_eth1_data) - .expect("should contain vote"), - 4, - "should have four votes" - ); - } - } - - mod winning_vote { - use super::*; - - type Vote = ((Eth1Data, u64), u64); - - fn vote(block_number: u64, vote_count: u64) -> Vote { - ( - ( - Eth1Data { - deposit_root: Hash256::from_low_u64_be(block_number), - deposit_count: block_number, - block_hash: Hash256::from_low_u64_be(block_number), - }, - block_number, - ), - vote_count, - ) - } - - fn vote_data(vote: &Vote) -> Eth1Data { - (vote.0).0.clone() - } - - #[test] - fn no_votes() { - let no_votes = vec![vote(0, 0), vote(1, 0), vote(3, 0), vote(2, 0)]; - - assert_eq!( - // Favour the highest block number when there are no votes. - vote_data(&no_votes[2]), - find_winning_vote(no_votes.into_iter().collect()).expect("should find winner") - ); - } - - #[test] - fn equal_votes() { - let votes = vec![vote(0, 1), vote(1, 1), vote(3, 1), vote(2, 1)]; - - assert_eq!( - // Favour the highest block number when there are equal votes. - vote_data(&votes[2]), - find_winning_vote(votes.into_iter().collect()).expect("should find winner") - ); - } - - #[test] - fn some_votes() { - let votes = vec![vote(0, 0), vote(1, 1), vote(3, 1), vote(2, 2)]; - - assert_eq!( - // Favour the highest vote over the highest block number. - vote_data(&votes[3]), - find_winning_vote(votes.into_iter().collect()).expect("should find winner") - ); - } - - #[test] - fn tying_votes() { - let votes = vec![vote(0, 0), vote(1, 1), vote(2, 2), vote(3, 2)]; - - assert_eq!( - // Favour the highest block number for tying votes. - vote_data(&votes[3]), - find_winning_vote(votes.into_iter().collect()).expect("should find winner") - ); - } - - #[test] - fn all_tying_votes() { - let votes = vec![vote(3, 42), vote(2, 42), vote(1, 42), vote(0, 42)]; - - assert_eq!( - // Favour the highest block number for tying votes. - vote_data(&votes[0]), - find_winning_vote(votes.into_iter().collect()).expect("should find winner") - ); - } - } -} diff --git a/beacon_node/beacon_chain/src/eth1_finalization_cache.rs b/beacon_node/beacon_chain/src/eth1_finalization_cache.rs deleted file mode 100644 index 8c3bb8c483f..00000000000 --- a/beacon_node/beacon_chain/src/eth1_finalization_cache.rs +++ /dev/null @@ -1,482 +0,0 @@ -use ssz_derive::{Decode, Encode}; -use std::cmp; -use std::collections::BTreeMap; -use tracing::debug; -use types::{Checkpoint, Epoch, Eth1Data, Hash256 as Root}; - -/// The default size of the cache. -/// The beacon chain only looks at the last 4 epochs for finalization. -/// Add 1 for current epoch and 4 earlier epochs. -pub const DEFAULT_ETH1_CACHE_SIZE: usize = 5; - -/// These fields are named the same as the corresponding fields in the `BeaconState` -/// as this structure stores these values from the `BeaconState` at a `Checkpoint` -#[derive(Clone, Debug, PartialEq, Encode, Decode)] -pub struct Eth1FinalizationData { - pub eth1_data: Eth1Data, - pub eth1_deposit_index: u64, -} - -impl Eth1FinalizationData { - /// Ensures the deposit finalization conditions have been met. See: - /// https://eips.ethereum.org/EIPS/eip-4881#deposit-finalization-conditions - fn fully_imported(&self) -> bool { - self.eth1_deposit_index >= self.eth1_data.deposit_count - } -} - -/// Implements map from Checkpoint -> Eth1CacheData -pub struct CheckpointMap { - capacity: usize, - // There shouldn't be more than a couple of potential checkpoints at the same - // epoch. Searching through a vector for the matching Root should be faster - // than using another map from Root->Eth1CacheData - store: BTreeMap>, -} - -impl Default for CheckpointMap { - fn default() -> Self { - Self::new() - } -} - -/// Provides a map of `Eth1CacheData` referenced by `Checkpoint` -/// -/// ## Cache Queuing -/// -/// The cache keeps a maximum number of (`capacity`) epochs. Because there may be -/// forks at the epoch boundary, it's possible that there exists more than one -/// `Checkpoint` for the same `Epoch`. This cache will store all checkpoints for -/// a given `Epoch`. When adding data for a new `Checkpoint` would cause the number -/// of `Epoch`s stored to exceed `capacity`, the data for oldest `Epoch` is dropped -impl CheckpointMap { - pub fn new() -> Self { - CheckpointMap { - capacity: DEFAULT_ETH1_CACHE_SIZE, - store: BTreeMap::new(), - } - } - - pub fn with_capacity(capacity: usize) -> Self { - CheckpointMap { - capacity: cmp::max(1, capacity), - store: BTreeMap::new(), - } - } - - pub fn insert(&mut self, checkpoint: Checkpoint, eth1_finalization_data: Eth1FinalizationData) { - self.store - .entry(checkpoint.epoch) - .or_default() - .push((checkpoint.root, eth1_finalization_data)); - - // faster to reduce size after the fact than do pre-checking to see - // if the current data would increase the size of the BTreeMap - while self.store.len() > self.capacity { - let oldest_stored_epoch = self.store.keys().next().cloned().unwrap(); - self.store.remove(&oldest_stored_epoch); - } - } - - pub fn get(&self, checkpoint: &Checkpoint) -> Option<&Eth1FinalizationData> { - match self.store.get(&checkpoint.epoch) { - Some(vec) => { - for (root, data) in vec { - if *root == checkpoint.root { - return Some(data); - } - } - None - } - None => None, - } - } - - #[cfg(test)] - pub fn len(&self) -> usize { - self.store.len() - } -} - -/// This cache stores `Eth1CacheData` that could potentially be finalized within 4 -/// future epochs. -#[derive(Default)] -pub struct Eth1FinalizationCache { - by_checkpoint: CheckpointMap, - pending_eth1: BTreeMap, - last_finalized: Option, -} - -/// Provides a cache of `Eth1CacheData` at epoch boundaries. This is used to -/// finalize deposits when a new epoch is finalized. -/// -impl Eth1FinalizationCache { - pub fn with_capacity(capacity: usize) -> Self { - Eth1FinalizationCache { - by_checkpoint: CheckpointMap::with_capacity(capacity), - pending_eth1: BTreeMap::new(), - last_finalized: None, - } - } - - pub fn insert(&mut self, checkpoint: Checkpoint, eth1_finalization_data: Eth1FinalizationData) { - if !eth1_finalization_data.fully_imported() { - self.pending_eth1.insert( - eth1_finalization_data.eth1_data.deposit_count, - eth1_finalization_data.eth1_data.clone(), - ); - debug!( - eth1_data.deposit_count = eth1_finalization_data.eth1_data.deposit_count, - eth1_deposit_index = eth1_finalization_data.eth1_deposit_index, - "Eth1Cache: inserted pending eth1" - ); - } - self.by_checkpoint - .insert(checkpoint, eth1_finalization_data); - } - - pub fn finalize(&mut self, checkpoint: &Checkpoint) -> Option { - if let Some(eth1_finalized_data) = self.by_checkpoint.get(checkpoint) { - let finalized_deposit_index = eth1_finalized_data.eth1_deposit_index; - let mut result = None; - while let Some(pending_count) = self.pending_eth1.keys().next().cloned() { - if finalized_deposit_index >= pending_count { - result = self.pending_eth1.remove(&pending_count); - debug!( - pending_count, - finalized_deposit_index, "Eth1Cache: dropped pending eth1" - ); - } else { - break; - } - } - if eth1_finalized_data.fully_imported() { - result = Some(eth1_finalized_data.eth1_data.clone()) - } - if result.is_some() { - self.last_finalized = result; - } - self.last_finalized.clone() - } else { - debug!( - epoch = %checkpoint.epoch, - "Eth1Cache: cache miss" - ); - None - } - } - - #[cfg(test)] - pub fn by_checkpoint(&self) -> &CheckpointMap { - &self.by_checkpoint - } - - #[cfg(test)] - pub fn pending_eth1(&self) -> &BTreeMap { - &self.pending_eth1 - } -} - -#[cfg(test)] -pub mod tests { - use super::*; - use std::collections::HashMap; - - const SLOTS_PER_EPOCH: u64 = 32; - const MAX_DEPOSITS: u64 = 16; - const EPOCHS_PER_ETH1_VOTING_PERIOD: u64 = 64; - - fn eth1cache() -> Eth1FinalizationCache { - Eth1FinalizationCache::default() - } - - fn random_eth1_data(deposit_count: u64) -> Eth1Data { - Eth1Data { - deposit_root: Root::random(), - deposit_count, - block_hash: Root::random(), - } - } - - fn random_checkpoint(epoch: u64) -> Checkpoint { - Checkpoint { - epoch: epoch.into(), - root: Root::random(), - } - } - - fn random_checkpoints(n: usize) -> Vec { - let mut result = Vec::with_capacity(n); - for epoch in 0..n { - result.push(random_checkpoint(epoch as u64)) - } - result - } - - #[test] - fn fully_imported_deposits() { - let epochs = 16; - let deposits_imported = 128; - - let eth1data = random_eth1_data(deposits_imported); - let checkpoints = random_checkpoints(epochs as usize); - let mut eth1cache = eth1cache(); - - for epoch in 4..epochs { - assert_eq!( - eth1cache.by_checkpoint().len(), - cmp::min((epoch - 4) as usize, DEFAULT_ETH1_CACHE_SIZE), - "Unexpected cache size" - ); - - let checkpoint = checkpoints - .get(epoch as usize) - .expect("should get checkpoint"); - eth1cache.insert( - *checkpoint, - Eth1FinalizationData { - eth1_data: eth1data.clone(), - eth1_deposit_index: deposits_imported, - }, - ); - - let finalized_checkpoint = checkpoints - .get((epoch - 4) as usize) - .expect("should get finalized checkpoint"); - assert!( - eth1cache.pending_eth1().is_empty(), - "Deposits are fully imported so pending cache should be empty" - ); - if epoch < 8 { - assert_eq!( - eth1cache.finalize(finalized_checkpoint), - None, - "Should have cache miss" - ); - } else { - assert_eq!( - eth1cache.finalize(finalized_checkpoint), - Some(eth1data.clone()), - "Should have cache hit" - ) - } - } - } - - #[test] - fn partially_imported_deposits() { - let epochs = 16; - let initial_deposits_imported = 1024; - let deposits_imported_per_epoch = MAX_DEPOSITS * SLOTS_PER_EPOCH; - let full_import_epoch = 13; - let total_deposits = - initial_deposits_imported + deposits_imported_per_epoch * full_import_epoch; - - let eth1data = random_eth1_data(total_deposits); - let checkpoints = random_checkpoints(epochs as usize); - let mut eth1cache = eth1cache(); - - for epoch in 0..epochs { - assert_eq!( - eth1cache.by_checkpoint().len(), - cmp::min(epoch as usize, DEFAULT_ETH1_CACHE_SIZE), - "Unexpected cache size" - ); - - let checkpoint = checkpoints - .get(epoch as usize) - .expect("should get checkpoint"); - let deposits_imported = cmp::min( - total_deposits, - initial_deposits_imported + deposits_imported_per_epoch * epoch, - ); - eth1cache.insert( - *checkpoint, - Eth1FinalizationData { - eth1_data: eth1data.clone(), - eth1_deposit_index: deposits_imported, - }, - ); - - if epoch >= 4 { - let finalized_epoch = epoch - 4; - let finalized_checkpoint = checkpoints - .get(finalized_epoch as usize) - .expect("should get finalized checkpoint"); - if finalized_epoch < full_import_epoch { - assert_eq!( - eth1cache.finalize(finalized_checkpoint), - None, - "Deposits not fully finalized so cache should return no Eth1Data", - ); - assert_eq!( - eth1cache.pending_eth1().len(), - 1, - "Deposits not fully finalized. Pending eth1 cache should have 1 entry" - ); - } else { - assert_eq!( - eth1cache.finalize(finalized_checkpoint), - Some(eth1data.clone()), - "Deposits fully imported and finalized. Cache should return Eth1Data. finalized_deposits[{}]", - (initial_deposits_imported + deposits_imported_per_epoch * finalized_epoch), - ); - assert!( - eth1cache.pending_eth1().is_empty(), - "Deposits fully imported and finalized. Pending cache should be empty" - ); - } - } - } - } - - #[test] - fn fork_at_epoch_boundary() { - let epochs = 12; - let deposits_imported = 128; - - let eth1data = random_eth1_data(deposits_imported); - let checkpoints = random_checkpoints(epochs as usize); - let mut forks = HashMap::new(); - let mut eth1cache = eth1cache(); - - for epoch in 0..epochs { - assert_eq!( - eth1cache.by_checkpoint().len(), - cmp::min(epoch as usize, DEFAULT_ETH1_CACHE_SIZE), - "Unexpected cache size" - ); - - let checkpoint = checkpoints - .get(epoch as usize) - .expect("should get checkpoint"); - eth1cache.insert( - *checkpoint, - Eth1FinalizationData { - eth1_data: eth1data.clone(), - eth1_deposit_index: deposits_imported, - }, - ); - // lets put a fork at every third epoch - if epoch % 3 == 0 { - let fork = random_checkpoint(epoch); - eth1cache.insert( - fork, - Eth1FinalizationData { - eth1_data: eth1data.clone(), - eth1_deposit_index: deposits_imported, - }, - ); - forks.insert(epoch as usize, fork); - } - - assert!( - eth1cache.pending_eth1().is_empty(), - "Deposits are fully imported so pending cache should be empty" - ); - if epoch >= 4 { - let finalized_epoch = (epoch - 4) as usize; - let finalized_checkpoint = if finalized_epoch % 3 == 0 { - forks.get(&finalized_epoch).expect("should get fork") - } else { - checkpoints - .get(finalized_epoch) - .expect("should get checkpoint") - }; - assert_eq!( - eth1cache.finalize(finalized_checkpoint), - Some(eth1data.clone()), - "Should have cache hit" - ); - if finalized_epoch >= 3 { - let dropped_epoch = finalized_epoch - 3; - if let Some(dropped_checkpoint) = forks.get(&dropped_epoch) { - // got checkpoint for an old fork that should no longer - // be in the cache because it is from too long ago - assert_eq!( - eth1cache.finalize(dropped_checkpoint), - None, - "Should have cache miss" - ); - } - } - } - } - } - - #[test] - fn massive_deposit_queue() { - // Simulating a situation where deposits don't get imported within an eth1 voting period - let eth1_voting_periods = 8; - let initial_deposits_imported = 1024; - let deposits_imported_per_epoch = MAX_DEPOSITS * SLOTS_PER_EPOCH; - let initial_deposit_queue = - deposits_imported_per_epoch * EPOCHS_PER_ETH1_VOTING_PERIOD * 2 + 32; - let new_deposits_per_voting_period = - EPOCHS_PER_ETH1_VOTING_PERIOD * deposits_imported_per_epoch / 2; - - let mut epoch_data = BTreeMap::new(); - let mut eth1s_by_count = BTreeMap::new(); - let mut eth1cache = eth1cache(); - let mut last_period_deposits = initial_deposits_imported; - for period in 0..eth1_voting_periods { - let period_deposits = initial_deposits_imported - + initial_deposit_queue - + period * new_deposits_per_voting_period; - let period_eth1_data = random_eth1_data(period_deposits); - eth1s_by_count.insert(period_eth1_data.deposit_count, period_eth1_data.clone()); - - for epoch_mod_period in 0..EPOCHS_PER_ETH1_VOTING_PERIOD { - let epoch = period * EPOCHS_PER_ETH1_VOTING_PERIOD + epoch_mod_period; - let checkpoint = random_checkpoint(epoch); - let deposits_imported = cmp::min( - period_deposits, - last_period_deposits + deposits_imported_per_epoch * epoch_mod_period, - ); - eth1cache.insert( - checkpoint, - Eth1FinalizationData { - eth1_data: period_eth1_data.clone(), - eth1_deposit_index: deposits_imported, - }, - ); - epoch_data.insert(epoch, (checkpoint, deposits_imported)); - - if epoch >= 4 { - let finalized_epoch = epoch - 4; - let (finalized_checkpoint, finalized_deposits) = epoch_data - .get(&finalized_epoch) - .expect("should get epoch data"); - - let pending_eth1s = eth1s_by_count.range((finalized_deposits + 1)..).count(); - let last_finalized_eth1 = eth1s_by_count - .range(0..(finalized_deposits + 1)) - .map(|(_, eth1)| eth1) - .next_back() - .cloned(); - assert_eq!( - eth1cache.finalize(finalized_checkpoint), - last_finalized_eth1, - "finalized checkpoint mismatch", - ); - assert_eq!( - eth1cache.pending_eth1().len(), - pending_eth1s, - "pending eth1 mismatch" - ); - } - } - - // remove unneeded stuff from old epochs - while epoch_data.len() > DEFAULT_ETH1_CACHE_SIZE { - let oldest_stored_epoch = epoch_data - .keys() - .next() - .cloned() - .expect("should get oldest epoch"); - epoch_data.remove(&oldest_stored_epoch); - } - last_period_deposits = period_deposits; - } - } -} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index 0eec6dc770f..4a7a430532f 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -24,8 +24,6 @@ pub mod deneb_readiness; mod early_attester_cache; pub mod electra_readiness; mod errors; -pub mod eth1_chain; -mod eth1_finalization_cache; pub mod events; pub mod execution_payload; pub mod fetch_blobs; @@ -86,7 +84,6 @@ pub use block_verification::{ pub use block_verification_types::AvailabilityPendingExecutedBlock; pub use block_verification_types::ExecutedBlock; pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock}; -pub use eth1_chain::{Eth1Chain, Eth1ChainBackend}; pub use events::ServerSentEventHandler; pub use execution_layer::EngineState; pub use execution_payload::NotifyExecutionLayer; diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 57012161eca..fae11de6447 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -607,12 +607,6 @@ pub static PERSIST_OP_POOL: LazyLock> = LazyLock::new(|| { "Time taken to persist the operations pool", ) }); -pub static PERSIST_ETH1_CACHE: LazyLock> = LazyLock::new(|| { - try_create_histogram( - "beacon_persist_eth1_cache", - "Time taken to persist the eth1 caches", - ) -}); pub static PERSIST_FORK_CHOICE: LazyLock> = LazyLock::new(|| { try_create_histogram( "beacon_persist_fork_choice", diff --git a/beacon_node/beacon_chain/src/schema_change.rs b/beacon_node/beacon_chain/src/schema_change.rs index fcca286b522..0abb48494a7 100644 --- a/beacon_node/beacon_chain/src/schema_change.rs +++ b/beacon_node/beacon_chain/src/schema_change.rs @@ -1,6 +1,7 @@ //! Utilities for managing database schema changes. mod migration_schema_v23; mod migration_schema_v24; +mod migration_schema_v25; use crate::beacon_chain::BeaconChainTypes; use std::sync::Arc; @@ -49,6 +50,14 @@ pub fn migrate_schema( let ops = migration_schema_v24::downgrade_from_v24::(db.clone())?; db.store_schema_version_atomically(to, ops) } + (SchemaVersion(24), SchemaVersion(25)) => { + let ops = migration_schema_v25::upgrade_to_v25()?; + db.store_schema_version_atomically(to, ops) + } + (SchemaVersion(25), SchemaVersion(24)) => { + let ops = migration_schema_v25::downgrade_from_v25()?; + db.store_schema_version_atomically(to, ops) + } // Anything else is an error. (_, _) => Err(HotColdDBError::UnsupportedSchemaVersion { target_version: to, diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v25.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v25.rs new file mode 100644 index 00000000000..44e8894d6fc --- /dev/null +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v25.rs @@ -0,0 +1,20 @@ +use store::{DBColumn, Error, KeyValueStoreOp}; +use tracing::info; +use types::Hash256; + +pub const ETH1_CACHE_DB_KEY: Hash256 = Hash256::ZERO; + +/// Delete the on-disk eth1 data. +pub fn upgrade_to_v25() -> Result, Error> { + info!("Deleting eth1 data from disk for v25 DB upgrade"); + Ok(vec![KeyValueStoreOp::DeleteKey( + DBColumn::Eth1Cache, + ETH1_CACHE_DB_KEY.as_slice().to_vec(), + )]) +} + +/// No-op: we don't need to recreate on-disk eth1 data, as previous versions gracefully handle +/// data missing from disk. +pub fn downgrade_from_v25() -> Result, Error> { + Ok(vec![]) +} diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 99341f54afc..db4e2fab264 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -5,7 +5,7 @@ use crate::kzg_utils::build_data_column_sidecars; use crate::observed_operations::ObservationOutcome; pub use crate::persisted_beacon_chain::PersistedBeaconChain; pub use crate::{ - beacon_chain::{BEACON_CHAIN_DB_KEY, ETH1_CACHE_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}, + beacon_chain::{BEACON_CHAIN_DB_KEY, FORK_CHOICE_DB_KEY, OP_POOL_DB_KEY}, migrate::MigratorConfig, single_attestation::single_attestation_to_attestation, sync_committee_verification::Error as SyncCommitteeError, @@ -14,7 +14,6 @@ pub use crate::{ }; use crate::{ builder::{BeaconChainBuilder, Witness}, - eth1_chain::CachingEth1Backend, BeaconChain, BeaconChainTypes, BlockError, ChainConfig, ServerSentEventHandler, StateSkipConfig, }; @@ -116,7 +115,7 @@ pub fn get_kzg(spec: &ChainSpec) -> Arc { } pub type BaseHarnessType = - Witness, E, THotStore, TColdStore>; + Witness; pub type DiskHarnessType = BaseHarnessType, BeaconNodeBackend>; pub type EphemeralHarnessType = BaseHarnessType, MemoryStore>; @@ -575,8 +574,6 @@ where ) .task_executor(self.runtime.task_executor.clone()) .execution_layer(self.execution_layer) - .dummy_eth1_backend() - .expect("should build dummy backend") .shutdown_sender(shutdown_tx) .chain_config(chain_config) .import_all_data_columns(self.import_all_data_columns) diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index afee860c26a..1be2879e1ab 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -2379,8 +2379,6 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { ) .unwrap() .store_migrator_config(MigratorConfig::default().blocking()) - .dummy_eth1_backend() - .expect("should build dummy backend") .slot_clock(slot_clock) .shutdown_sender(shutdown_tx) .chain_config(ChainConfig::default()) @@ -2804,10 +2802,6 @@ async fn finalizes_after_resuming_from_db() { .chain .persist_op_pool() .expect("should persist the op pool"); - harness - .chain - .persist_eth1_cache() - .expect("should persist the eth1 cache"); let original_chain = harness.chain; diff --git a/beacon_node/client/Cargo.toml b/beacon_node/client/Cargo.toml index 379b46b4b16..3c4b2572c9a 100644 --- a/beacon_node/client/Cargo.toml +++ b/beacon_node/client/Cargo.toml @@ -10,7 +10,6 @@ beacon_processor = { workspace = true } directory = { workspace = true } dirs = { workspace = true } environment = { workspace = true } -eth1 = { workspace = true } eth2 = { workspace = true } eth2_config = { workspace = true } ethereum_ssz = { workspace = true } diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index baea0c06e54..479b4b3192a 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -11,17 +11,15 @@ use beacon_chain::proposer_prep_service::start_proposer_prep_service; use beacon_chain::schema_change::migrate_schema; use beacon_chain::{ builder::{BeaconChainBuilder, Witness}, - eth1_chain::{CachingEth1Backend, Eth1Chain}, slot_clock::{SlotClock, SystemTimeSlotClock}, state_advance_timer::spawn_state_advance_timer, store::{HotColdDB, ItemStore, StoreConfig}, - BeaconChain, BeaconChainTypes, Eth1ChainBackend, MigratorConfig, ServerSentEventHandler, + BeaconChain, BeaconChainTypes, MigratorConfig, ServerSentEventHandler, }; use beacon_chain::{Kzg, LightClientProducerEvent}; use beacon_processor::{BeaconProcessor, BeaconProcessorChannels}; use beacon_processor::{BeaconProcessorConfig, BeaconProcessorQueueLengths}; use environment::RuntimeContext; -use eth1::{Config as Eth1Config, Service as Eth1Service}; use eth2::{ types::{BlockId, StateId}, BeaconNodeHttpClient, Error as ApiError, Timeouts, @@ -29,7 +27,7 @@ use eth2::{ use execution_layer::test_utils::generate_genesis_header; use execution_layer::ExecutionLayer; use futures::channel::mpsc::Receiver; -use genesis::{interop_genesis_state, Eth1GenesisService, DEFAULT_ETH1_BLOCK_HASH}; +use genesis::{interop_genesis_state, DEFAULT_ETH1_BLOCK_HASH}; use lighthouse_network::{prometheus_client::registry::Registry, NetworkGlobals}; use monitoring_api::{MonitoringHttpClient, ProcessType}; use network::{NetworkConfig, NetworkSenders, NetworkService}; @@ -37,14 +35,12 @@ use rand::rngs::{OsRng, StdRng}; use rand::SeedableRng; use slasher::Slasher; use slasher_service::SlasherService; -use std::net::TcpListener; use std::path::{Path, PathBuf}; use std::sync::Arc; use std::time::Duration; use std::time::{SystemTime, UNIX_EPOCH}; use store::database::interface::BeaconNodeBackend; use timer::spawn_timer; -use tokio::sync::oneshot; use tracing::{debug, info, warn}; use types::{ test_utils::generate_deterministic_keypairs, BeaconState, BlobSidecarList, ChainSpec, EthSpec, @@ -80,7 +76,6 @@ pub struct ClientBuilder { chain_spec: Option>, beacon_chain_builder: Option>, beacon_chain: Option>>, - eth1_service: Option, network_globals: Option>>, network_senders: Option>, libp2p_registry: Option, @@ -95,11 +90,10 @@ pub struct ClientBuilder { eth_spec_instance: T::EthSpec, } -impl - ClientBuilder> +impl + ClientBuilder> where TSlotClock: SlotClock + Clone + 'static, - TEth1Backend: Eth1ChainBackend + 'static, E: EthSpec + 'static, THotStore: ItemStore + 'static, TColdStore: ItemStore + 'static, @@ -115,7 +109,6 @@ where chain_spec: None, beacon_chain_builder: None, beacon_chain: None, - eth1_service: None, network_globals: None, network_senders: None, libp2p_registry: None, @@ -261,7 +254,7 @@ where client_genesis }; - let (beacon_chain_builder, eth1_service_option) = match client_genesis { + let beacon_chain_builder = match client_genesis { ClientGenesis::Interop { validator_count, genesis_time, @@ -274,7 +267,7 @@ where None, &spec, )?; - builder.genesis_state(genesis_state).map(|v| (v, None))? + builder.genesis_state(genesis_state)? } ClientGenesis::InteropMerge { validator_count, @@ -289,7 +282,7 @@ where execution_payload_header, &spec, )?; - builder.genesis_state(genesis_state).map(|v| (v, None))? + builder.genesis_state(genesis_state)? } ClientGenesis::GenesisState => { info!("Starting from known genesis state"); @@ -337,7 +330,7 @@ where } } - builder.genesis_state(genesis_state).map(|v| (v, None))? + builder.genesis_state(genesis_state)? } ClientGenesis::WeakSubjSszBytes { anchor_state_bytes, @@ -366,14 +359,12 @@ where }; let genesis_state = genesis_state(&runtime_context, &config).await?; - builder - .weak_subjectivity_state( - anchor_state, - anchor_block, - anchor_blobs, - genesis_state, - ) - .map(|v| (v, None))? + builder.weak_subjectivity_state( + anchor_state, + anchor_block, + anchor_blobs, + genesis_state, + )? } ClientGenesis::CheckpointSyncUrl { url } => { info!( @@ -391,47 +382,6 @@ where )), ); - let deposit_snapshot = if config.sync_eth1_chain { - // We want to fetch deposit snapshot before fetching the finalized beacon state to - // ensure that the snapshot is not newer than the beacon state that satisfies the - // deposit finalization conditions - debug!("Downloading deposit snapshot"); - let deposit_snapshot_result = remote - .get_deposit_snapshot() - .await - .map_err(|e| match e { - ApiError::InvalidSsz(e) => format!( - "Unable to parse SSZ: {:?}. Ensure the checkpoint-sync-url refers to a \ - node for the correct network", - e - ), - e => format!("Error fetching deposit snapshot from remote: {:?}", e), - }); - match deposit_snapshot_result { - Ok(Some(deposit_snapshot)) => { - if deposit_snapshot.is_valid() { - Some(deposit_snapshot) - } else { - warn!("Remote BN sent invalid deposit snapshot!"); - None - } - } - Ok(None) => { - warn!("Remote BN does not support EIP-4881 fast deposit sync"); - None - } - Err(e) => { - warn!( - error = e, - "Remote BN does not support EIP-4881 fast deposit sync" - ); - None - } - } - } else { - None - }; - debug!("Downloading finalized state"); let state = remote .get_debug_beacon_states_ssz::(StateId::Finalized, &spec) @@ -491,119 +441,14 @@ where "Loaded checkpoint block and state" ); - let service = - deposit_snapshot.and_then(|snapshot| match Eth1Service::from_deposit_snapshot( - config.eth1, - spec.clone(), - &snapshot, - ) { - Ok(service) => { - info!( - deposits_loaded = snapshot.deposit_count, - "Loaded deposit tree snapshot" - ); - Some(service) - } - Err(e) => { - warn!(error = ?e, - "Unable to load deposit snapshot" - ); - None - } - }); - - builder - .weak_subjectivity_state(state, block, blobs, genesis_state) - .map(|v| (v, service))? + builder.weak_subjectivity_state(state, block, blobs, genesis_state)? } ClientGenesis::DepositContract => { - info!( - eth1_endpoints = ?config.eth1.endpoint, - contract_deploy_block = config.eth1.deposit_contract_deploy_block, - deposit_contract = &config.eth1.deposit_contract_address, - "Waiting for eth2 genesis from eth1" - ); - - let genesis_service = - Eth1GenesisService::new(config.eth1, context.eth2_config().spec.clone())?; - - // If the HTTP API server is enabled, start an instance of it where it only - // contains a reference to the eth1 service (all non-eth1 endpoints will fail - // gracefully). - // - // Later in this function we will shutdown this temporary "waiting for genesis" - // server so the real one can be started later. - let (exit_tx, exit_rx) = oneshot::channel::<()>(); - let http_listen_opt = if self.http_api_config.enabled { - #[allow(clippy::type_complexity)] - let ctx: Arc< - http_api::Context< - Witness, - >, - > = Arc::new(http_api::Context { - config: self.http_api_config.clone(), - chain: None, - network_senders: None, - network_globals: None, - beacon_processor_send: None, - eth1_service: Some(genesis_service.eth1_service.clone()), - sse_logging_components: runtime_context.sse_logging_components.clone(), - }); - - // Discard the error from the oneshot. - let exit_future = async { - let _ = exit_rx.await; - }; - - let (listen_addr, server) = http_api::serve(ctx, exit_future) - .map_err(|e| format!("Unable to start HTTP API server: {:?}", e))?; - - let http_api_task = async move { - server.await; - debug!("HTTP API server task ended"); - }; - - context - .clone() - .executor - .spawn_without_exit(http_api_task, "http-api"); - - Some(listen_addr) - } else { - None - }; - - let genesis_state = genesis_service - .wait_for_genesis_state(Duration::from_millis( - ETH1_GENESIS_UPDATE_INTERVAL_MILLIS, - )) - .await?; - - let _ = exit_tx.send(()); - - if let Some(http_listen) = http_listen_opt { - // This is a bit of a hack to ensure that the HTTP server has indeed shutdown. - // - // We will restart it again after we've finished setting up for genesis. - while TcpListener::bind(http_listen).is_err() { - warn!( - port = %http_listen, - "Waiting for HTTP server port to open" - ); - tokio::time::sleep(Duration::from_secs(1)).await; - } - } - - builder - .genesis_state(genesis_state) - .map(|v| (v, Some(genesis_service.into_core_service())))? + return Err("Loading genesis from deposit contract no longer supported".to_string()) } - ClientGenesis::FromStore => builder.resume_from_db().map(|v| (v, None))?, + ClientGenesis::FromStore => builder.resume_from_db()?, }; - if config.sync_eth1_chain { - self.eth1_service = eth1_service_option; - } self.beacon_chain_builder = Some(beacon_chain_builder); Ok(self) } @@ -753,7 +598,7 @@ where #[allow(clippy::type_complexity)] pub fn build( mut self, - ) -> Result>, String> { + ) -> Result>, String> { let runtime_context = self .runtime_context .as_ref() @@ -773,7 +618,6 @@ where chain: self.beacon_chain.clone(), network_senders: self.network_senders.clone(), network_globals: self.network_globals.clone(), - eth1_service: self.eth1_service.clone(), beacon_processor_send: Some(beacon_processor_channels.beacon_processor_tx.clone()), sse_logging_components: runtime_context.sse_logging_components.clone(), }); @@ -943,11 +787,10 @@ where } } -impl - ClientBuilder> +impl + ClientBuilder> where TSlotClock: SlotClock + Clone + 'static, - TEth1Backend: Eth1ChainBackend + 'static, E: EthSpec + 'static, THotStore: ItemStore + 'static, TColdStore: ItemStore + 'static, @@ -980,11 +823,10 @@ where } } -impl - ClientBuilder, BeaconNodeBackend>> +impl + ClientBuilder, BeaconNodeBackend>> where TSlotClock: SlotClock + 'static, - TEth1Backend: Eth1ChainBackend + 'static, E: EthSpec + 'static, { /// Specifies that the `Client` should use a `HotColdDB` database. @@ -1003,9 +845,8 @@ where self.db_path = Some(hot_path.into()); self.freezer_db_path = Some(cold_path.into()); - let schema_upgrade = |db, from, to| { - migrate_schema::>(db, from, to) - }; + let schema_upgrade = + |db, from, to| migrate_schema::>(db, from, to); let store = HotColdDB::open( hot_path, @@ -1021,102 +862,8 @@ where } } -impl - ClientBuilder, E, THotStore, TColdStore>> -where - TSlotClock: SlotClock + 'static, - E: EthSpec + 'static, - THotStore: ItemStore + 'static, - TColdStore: ItemStore + 'static, -{ - /// Specifies that the `BeaconChain` should cache eth1 blocks/logs from a remote eth1 node - /// (e.g., Parity/Geth) and refer to that cache when collecting deposits or eth1 votes during - /// block production. - pub async fn caching_eth1_backend(mut self, config: Eth1Config) -> Result { - let context = self - .runtime_context - .as_ref() - .ok_or("caching_eth1_backend requires a runtime_context")? - .service_context("deposit_contract_rpc".into()); - let beacon_chain_builder = self - .beacon_chain_builder - .ok_or("caching_eth1_backend requires a beacon_chain_builder")?; - let spec = self - .chain_spec - .clone() - .ok_or("caching_eth1_backend requires a chain spec")?; - - let backend = if let Some(eth1_service_from_genesis) = self.eth1_service { - eth1_service_from_genesis.update_config(config)?; - - // This cache is not useful because it's first (earliest) block likely the block that - // triggered genesis. - // - // In order to vote we need to be able to go back at least 2 * `ETH1_FOLLOW_DISTANCE` - // from the genesis-triggering block. Presently the block cache does not support - // importing blocks with decreasing block numbers, it only accepts them in increasing - // order. If this turns out to be a bottleneck we can update the block cache to allow - // adding earlier blocks too. - eth1_service_from_genesis.drop_block_cache(); - - CachingEth1Backend::from_service(eth1_service_from_genesis) - } else if config.purge_cache { - CachingEth1Backend::new(config, spec)? - } else { - beacon_chain_builder - .get_persisted_eth1_backend()? - .map(|persisted| { - Eth1Chain::from_ssz_container(&persisted, config.clone(), spec.clone()) - .map(|chain| chain.into_backend()) - }) - .unwrap_or_else(|| CachingEth1Backend::new(config, spec.clone()))? - }; - - self.eth1_service = Some(backend.core.clone()); - - // Starts the service that connects to an eth1 node and periodically updates caches. - backend.start(context.executor); - - self.beacon_chain_builder = Some(beacon_chain_builder.eth1_backend(Some(backend))); - - Ok(self) - } - - /// Do not use any eth1 backend. The client will not be able to produce beacon blocks. - pub fn no_eth1_backend(mut self) -> Result { - let beacon_chain_builder = self - .beacon_chain_builder - .ok_or("caching_eth1_backend requires a beacon_chain_builder")?; - - self.beacon_chain_builder = Some(beacon_chain_builder.no_eth1_backend()); - - Ok(self) - } - - /// Use an eth1 backend that can produce blocks but is not connected to an Eth1 node. - /// - /// This backend will never produce deposits so it's impossible to add validators after - /// genesis. The `Eth1Data` votes will be deterministic junk data. - /// - /// ## Notes - /// - /// The client is given the `CachingEth1Backend` type, but the http backend is never started and the - /// caches are never used. - pub fn dummy_eth1_backend(mut self) -> Result { - let beacon_chain_builder = self - .beacon_chain_builder - .ok_or("caching_eth1_backend requires a beacon_chain_builder")?; - - self.beacon_chain_builder = Some(beacon_chain_builder.dummy_eth1_backend()?); - - Ok(self) - } -} - -impl - ClientBuilder> +impl ClientBuilder> where - TEth1Backend: Eth1ChainBackend + 'static, E: EthSpec + 'static, THotStore: ItemStore + 'static, TColdStore: ItemStore + 'static, diff --git a/beacon_node/client/src/config.rs b/beacon_node/client/src/config.rs index becc781ed32..495df7d5f7d 100644 --- a/beacon_node/client/src/config.rs +++ b/beacon_node/client/src/config.rs @@ -59,7 +59,6 @@ pub struct Config { /// Path where the blobs database will be located if blobs should be in a separate database. pub blobs_db_path: Option, pub log_file: PathBuf, - pub sync_eth1_chain: bool, /// Graffiti to be inserted everytime we create a block if the validator doesn't specify. pub beacon_graffiti: GraffitiOrigin, pub validator_monitor: ValidatorMonitorConfig, @@ -70,7 +69,6 @@ pub struct Config { pub store: store::StoreConfig, pub network: network::NetworkConfig, pub chain: beacon_chain::ChainConfig, - pub eth1: eth1::Config, pub execution_layer: Option, pub trusted_setup: TrustedSetup, pub http_api: http_api::Config, @@ -99,8 +97,6 @@ impl Default for Config { store: <_>::default(), network: NetworkConfig::default(), chain: <_>::default(), - sync_eth1_chain: true, - eth1: <_>::default(), execution_layer: None, trusted_setup, beacon_graffiti: GraffitiOrigin::default(), diff --git a/beacon_node/client/src/lib.rs b/beacon_node/client/src/lib.rs index 0b6550c208d..916dae6db06 100644 --- a/beacon_node/client/src/lib.rs +++ b/beacon_node/client/src/lib.rs @@ -10,7 +10,7 @@ use lighthouse_network::{Enr, Multiaddr, NetworkGlobals}; use std::net::SocketAddr; use std::sync::Arc; -pub use beacon_chain::{BeaconChainTypes, Eth1ChainBackend}; +pub use beacon_chain::BeaconChainTypes; pub use builder::ClientBuilder; pub use config::{ClientGenesis, Config as ClientConfig}; pub use eth2_config::Eth2Config; diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 53c9c85c001..ea9fbe2894a 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -60,7 +60,6 @@ pub fn spawn_notifier( wait_time = estimated_time_pretty(Some(next_slot.as_secs() as f64)), "Waiting for genesis" ); - eth1_logging(&beacon_chain); bellatrix_readiness_logging(Slot::new(0), &beacon_chain).await; capella_readiness_logging(Slot::new(0), &beacon_chain).await; genesis_execution_payload_logging(&beacon_chain).await; @@ -309,7 +308,6 @@ pub fn spawn_notifier( ); } - eth1_logging(&beacon_chain); bellatrix_readiness_logging(current_slot, &beacon_chain).await; capella_readiness_logging(current_slot, &beacon_chain).await; deneb_readiness_logging(current_slot, &beacon_chain).await; @@ -677,53 +675,6 @@ async fn genesis_execution_payload_logging(beacon_chain: &B } } -fn eth1_logging(beacon_chain: &BeaconChain) { - let current_slot_opt = beacon_chain.slot().ok(); - - // Perform some logging about the eth1 chain - if let Some(eth1_chain) = beacon_chain.eth1_chain.as_ref() { - // No need to do logging if using the dummy backend. - if eth1_chain.is_dummy_backend() { - return; - } - - if let Some(status) = eth1_chain.sync_status( - beacon_chain.genesis_time, - current_slot_opt, - &beacon_chain.spec, - ) { - debug!( - eth1_head_block = status.head_block_number, - latest_cached_block_number = status.latest_cached_block_number, - latest_cached_timestamp = status.latest_cached_block_timestamp, - voting_target_timestamp = status.voting_target_timestamp, - ready = status.lighthouse_is_cached_and_ready, - "Eth1 cache sync status" - ); - - if !status.lighthouse_is_cached_and_ready { - let voting_target_timestamp = status.voting_target_timestamp; - - let distance = status - .latest_cached_block_timestamp - .map(|latest| { - voting_target_timestamp.saturating_sub(latest) - / beacon_chain.spec.seconds_per_eth1_block - }) - .map(|distance| distance.to_string()) - .unwrap_or_else(|| "initializing deposits".to_string()); - - warn!( - est_blocks_remaining = distance, - "Syncing deposit contract block cache" - ); - } - } else { - error!("Unable to determine deposit contract sync status"); - } - } -} - /// Returns the peer count, returning something helpful if it's `usize::MAX` (effectively a /// `None` value). fn peer_count_pretty(peer_count: usize) -> String { diff --git a/beacon_node/eth1/Cargo.toml b/beacon_node/eth1/Cargo.toml deleted file mode 100644 index f834ad7eef5..00000000000 --- a/beacon_node/eth1/Cargo.toml +++ /dev/null @@ -1,30 +0,0 @@ -[package] -name = "eth1" -version = "0.2.0" -authors = ["Paul Hauner "] -edition = { workspace = true } - -[dependencies] -eth2 = { workspace = true } -ethereum_ssz = { workspace = true } -ethereum_ssz_derive = { workspace = true } -execution_layer = { workspace = true } -futures = { workspace = true } -logging = { workspace = true } -merkle_proof = { workspace = true } -metrics = { workspace = true } -parking_lot = { workspace = true } -sensitive_url = { workspace = true } -serde = { workspace = true } -state_processing = { workspace = true } -superstruct = { workspace = true } -task_executor = { workspace = true } -tokio = { workspace = true } -tracing = { workspace = true } -tree_hash = { workspace = true } -types = { workspace = true } - -[dev-dependencies] -environment = { workspace = true } -eth1_test_rig = { workspace = true } -serde_yaml = { workspace = true } diff --git a/beacon_node/eth1/src/block_cache.rs b/beacon_node/eth1/src/block_cache.rs deleted file mode 100644 index 9c840aea210..00000000000 --- a/beacon_node/eth1/src/block_cache.rs +++ /dev/null @@ -1,303 +0,0 @@ -use ssz_derive::{Decode, Encode}; -use std::collections::HashMap; -use std::ops::RangeInclusive; - -pub use eth2::lighthouse::Eth1Block; -use eth2::types::Hash256; -use std::sync::Arc; - -#[derive(Debug, PartialEq, Clone)] -pub enum Error { - /// The timestamp of each block equal to or later than the block prior to it. - InconsistentTimestamp { parent: u64, child: u64 }, - /// Some `Eth1Block` was provided with the same block number but different data. The source - /// of eth1 data is inconsistent. - Conflicting(u64), - /// The given block was not one block number higher than the highest known block number. - NonConsecutive { given: u64, expected: u64 }, - /// Some invariant was violated, there is a likely bug in the code. - Internal(String), -} - -/// Stores block and deposit contract information and provides queries based upon the block -/// timestamp. -#[derive(Debug, PartialEq, Clone, Default, Encode, Decode)] -pub struct BlockCache { - blocks: Vec>, - #[ssz(skip_serializing, skip_deserializing)] - by_hash: HashMap>, -} - -impl BlockCache { - /// Returns the number of blocks stored in `self`. - pub fn len(&self) -> usize { - self.blocks.len() - } - - /// True if the cache does not store any blocks. - pub fn is_empty(&self) -> bool { - self.blocks.is_empty() - } - - /// Returns the earliest (lowest timestamp) block, if any. - pub fn earliest_block(&self) -> Option<&Eth1Block> { - self.blocks.first().map(|ptr| ptr.as_ref()) - } - - /// Returns the latest (highest timestamp) block, if any. - pub fn latest_block(&self) -> Option<&Eth1Block> { - self.blocks.last().map(|ptr| ptr.as_ref()) - } - - /// Returns the timestamp of the earliest block in the cache (if any). - pub fn earliest_block_timestamp(&self) -> Option { - self.blocks.first().map(|block| block.timestamp) - } - - /// Returns the timestamp of the latest block in the cache (if any). - pub fn latest_block_timestamp(&self) -> Option { - self.blocks.last().map(|block| block.timestamp) - } - - /// Returns the lowest block number stored. - pub fn lowest_block_number(&self) -> Option { - self.blocks.first().map(|block| block.number) - } - - /// Returns the highest block number stored. - pub fn highest_block_number(&self) -> Option { - self.blocks.last().map(|block| block.number) - } - - /// Returns an iterator over all blocks. - /// - /// Blocks a guaranteed to be returned with; - /// - /// - Monotonically increasing block numbers. - /// - Non-uniformly increasing block timestamps. - pub fn iter(&self) -> impl DoubleEndedIterator + Clone { - self.blocks.iter().map(|ptr| ptr.as_ref()) - } - - /// Shortens the cache, keeping the latest (by block number) `len` blocks while dropping the - /// rest. - /// - /// If `len` is greater than the vector's current length, this has no effect. - pub fn truncate(&mut self, len: usize) { - if len < self.blocks.len() { - let remaining = self.blocks.split_off(self.blocks.len() - len); - for block in &self.blocks { - self.by_hash.remove(&block.hash); - } - self.blocks = remaining; - } - } - - /// Returns the range of block numbers stored in the block cache. All blocks in this range can - /// be accessed. - fn available_block_numbers(&self) -> Option> { - Some(self.blocks.first()?.number..=self.blocks.last()?.number) - } - - /// Returns a block with the corresponding number, if any. - pub fn block_by_number(&self, block_number: u64) -> Option<&Eth1Block> { - self.blocks - .get( - self.blocks - .as_slice() - .binary_search_by(|block| block.number.cmp(&block_number)) - .ok()?, - ) - .map(|ptr| ptr.as_ref()) - } - - /// Returns a block with the corresponding hash, if any. - pub fn block_by_hash(&self, block_hash: &Hash256) -> Option<&Eth1Block> { - self.by_hash.get(block_hash).map(|ptr| ptr.as_ref()) - } - - /// Rebuilds the by_hash map - pub fn rebuild_by_hash_map(&mut self) { - self.by_hash.clear(); - for block in self.blocks.iter() { - self.by_hash.insert(block.hash, block.clone()); - } - } - - /// Insert an `Eth1Snapshot` into `self`, allowing future queries. - /// - /// Allows inserting either: - /// - /// - The root block (i.e., any block if there are no existing blocks), or, - /// - An immediate child of the most recent (highest block number) block. - /// - /// ## Errors - /// - /// - If the cache is not empty and `item.block.block_number - 1` is not already in `self`. - /// - If `item.block.block_number` is in `self`, but is not identical to the supplied - /// `Eth1Snapshot`. - /// - If `item.block.timestamp` is prior to the parent. - pub fn insert_root_or_child(&mut self, block: Eth1Block) -> Result<(), Error> { - let expected_block_number = self - .highest_block_number() - .map(|n| n + 1) - .unwrap_or_else(|| block.number); - - // If there are already some cached blocks, check to see if the new block number is one of - // them. - // - // If the block is already known, check to see the given block is identical to it. If not, - // raise an inconsistency error. This is mostly likely caused by some fork on the eth1 - // chain. - if let Some(local) = self.available_block_numbers() { - if local.contains(&block.number) { - let known_block = self.block_by_number(block.number).ok_or_else(|| { - Error::Internal("An expected block was not present".to_string()) - })?; - - if known_block == &block { - return Ok(()); - } else { - return Err(Error::Conflicting(block.number)); - }; - } - } - - // Only permit blocks when it's either: - // - // - The first block inserted. - // - Exactly one block number higher than the highest known block number. - if block.number != expected_block_number { - return Err(Error::NonConsecutive { - given: block.number, - expected: expected_block_number, - }); - } - - // If the block is not the first block inserted, ensure that its timestamp is not higher - // than its parents. - if let Some(previous_block) = self.blocks.last() { - if previous_block.timestamp > block.timestamp { - return Err(Error::InconsistentTimestamp { - parent: previous_block.timestamp, - child: block.timestamp, - }); - } - } - - let ptr = Arc::new(block); - self.by_hash.insert(ptr.hash, ptr.clone()); - self.blocks.push(ptr); - - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use types::FixedBytesExtended; - - use super::*; - - fn get_block(i: u64, interval_secs: u64) -> Eth1Block { - Eth1Block { - hash: Hash256::from_low_u64_be(i), - timestamp: i * interval_secs, - number: i, - deposit_root: Some(Hash256::from_low_u64_be(i << 32)), - deposit_count: Some(i), - } - } - - fn get_blocks(n: usize, interval_secs: u64) -> Vec { - (0..n as u64).map(|i| get_block(i, interval_secs)).collect() - } - - fn insert(cache: &mut BlockCache, s: Eth1Block) -> Result<(), Error> { - cache.insert_root_or_child(s) - } - - #[test] - fn truncate() { - let n = 16; - let blocks = get_blocks(n, 10); - - let mut cache = BlockCache::default(); - - for block in blocks { - insert(&mut cache, block.clone()).expect("should add consecutive blocks"); - } - - for len in &[0, 1, 2, 3, 4, 8, 15, 16] { - let mut cache = cache.clone(); - - cache.truncate(*len); - - assert_eq!( - cache.blocks.len(), - *len, - "should truncate to length: {}", - *len - ); - } - - let mut cache_2 = cache; - cache_2.truncate(17); - assert_eq!( - cache_2.blocks.len(), - n, - "truncate to larger than n should be a no-op" - ); - } - - #[test] - fn inserts() { - let n = 16; - let blocks = get_blocks(n, 10); - - let mut cache = BlockCache::default(); - - for block in blocks { - insert(&mut cache, block.clone()).expect("should add consecutive blocks"); - } - - // No error for re-adding a block identical to one that exists. - assert!(insert(&mut cache, get_block(n as u64 - 1, 10)).is_ok()); - - // Error for re-adding a block that is different to the one that exists. - assert!(insert(&mut cache, get_block(n as u64 - 1, 11)).is_err()); - - // Error for adding non-consecutive blocks. - assert!(insert(&mut cache, get_block(n as u64 + 1, 10)).is_err()); - assert!(insert(&mut cache, get_block(n as u64 + 2, 10)).is_err()); - - // Error for adding timestamp prior to previous. - assert!(insert(&mut cache, get_block(n as u64, 1)).is_err()); - // Double check to make sure previous test was only affected by timestamp. - assert!(insert(&mut cache, get_block(n as u64, 10)).is_ok()); - } - - #[test] - fn duplicate_timestamp() { - let mut blocks = get_blocks(7, 10); - - blocks[0].timestamp = 0; - blocks[1].timestamp = 10; - blocks[2].timestamp = 10; - blocks[3].timestamp = 20; - blocks[4].timestamp = 30; - blocks[5].timestamp = 40; - blocks[6].timestamp = 40; - - let mut cache = BlockCache::default(); - - for block in &blocks { - insert(&mut cache, block.clone()) - .expect("should add consecutive blocks with duplicate timestamps"); - } - - let blocks = blocks.into_iter().map(Arc::new).collect::>(); - - assert_eq!(cache.blocks, blocks, "should have added all blocks"); - } -} diff --git a/beacon_node/eth1/src/deposit_cache.rs b/beacon_node/eth1/src/deposit_cache.rs deleted file mode 100644 index a2d4a1cf06d..00000000000 --- a/beacon_node/eth1/src/deposit_cache.rs +++ /dev/null @@ -1,1090 +0,0 @@ -use crate::{DepositLog, Eth1Block}; -use ssz_derive::{Decode, Encode}; -use state_processing::common::DepositDataTree; -use std::cmp::Ordering; -use superstruct::superstruct; -use tree_hash::TreeHash; -use types::{Deposit, DepositTreeSnapshot, Hash256, DEPOSIT_TREE_DEPTH}; - -#[derive(Debug, PartialEq)] -pub enum Error { - /// A deposit log was added when a prior deposit was not already in the cache. - /// - /// Logs have to be added with monotonically-increasing block numbers. - NonConsecutive { log_index: u64, expected: usize }, - /// The eth1 event log data was unable to be parsed. - LogParse(String), - /// There are insufficient deposits in the cache to fulfil the request. - InsufficientDeposits { - known_deposits: usize, - requested: u64, - }, - /// A log with the given index is already present in the cache and it does not match the one - /// provided. - DuplicateDistinctLog(u64), - /// Attempted to insert log with given index after the log had been finalized - FinalizedLogInsert { - log_index: u64, - finalized_index: u64, - }, - /// The deposit count must always be large enough to account for the requested deposit range. - /// - /// E.g., you cannot request deposit 10 when the deposit count is 9. - DepositCountInvalid { deposit_count: u64, range_end: u64 }, - /// You can't request deposits on or before the finalized deposit - DepositRangeInvalid { - range_start: u64, - finalized_count: u64, - }, - /// You can't finalize what's already been finalized and the cache must have the logs - /// that you wish to finalize - InvalidFinalizeIndex { - requested_count: u64, - currently_finalized: u64, - deposit_count: u64, - }, - /// Error with the merkle tree for deposits. - DepositTree(merkle_proof::MerkleTreeError), - /// An unexpected condition was encountered. - Internal(String), - /// This is for errors that should never occur - PleaseNotifyTheDevs, -} - -pub type SszDepositCache = SszDepositCacheV13; - -#[superstruct( - variants(V13), - variant_attributes(derive(Encode, Decode, Clone)), - no_enum -)] -pub struct SszDepositCache { - pub logs: Vec, - pub leaves: Vec, - pub deposit_contract_deploy_block: u64, - pub finalized_deposit_count: u64, - pub finalized_block_height: u64, - pub deposit_tree_snapshot: Option, - pub deposit_roots: Vec, -} - -impl SszDepositCache { - pub fn from_deposit_cache(cache: &DepositCache) -> Self { - Self { - logs: cache.logs.clone(), - leaves: cache.leaves.clone(), - deposit_contract_deploy_block: cache.deposit_contract_deploy_block, - finalized_deposit_count: cache.finalized_deposit_count, - finalized_block_height: cache.finalized_block_height, - deposit_tree_snapshot: cache.deposit_tree.get_snapshot(), - deposit_roots: cache.deposit_roots.clone(), - } - } - - pub fn to_deposit_cache(&self) -> Result { - let deposit_tree = self - .deposit_tree_snapshot - .as_ref() - .map(|snapshot| { - let mut tree = DepositDataTree::from_snapshot(snapshot, DEPOSIT_TREE_DEPTH) - .map_err(|e| format!("Invalid SszDepositCache: {:?}", e))?; - for leaf in &self.leaves { - tree.push_leaf(*leaf).map_err(|e| { - format!("Invalid SszDepositCache: unable to push leaf: {:?}", e) - })?; - } - Ok::<_, String>(tree) - }) - .unwrap_or_else(|| { - // deposit_tree_snapshot = None (tree was never finalized) - // Create DepositDataTree from leaves - Ok(DepositDataTree::create( - &self.leaves, - self.leaves.len(), - DEPOSIT_TREE_DEPTH, - )) - })?; - - // Check for invalid SszDepositCache conditions - if self.leaves.len() != self.logs.len() { - return Err("Invalid SszDepositCache: logs and leaves should have equal length".into()); - } - // `deposit_roots` also includes the zero root - if self.leaves.len() + 1 != self.deposit_roots.len() { - return Err( - "Invalid SszDepositCache: deposit_roots length must be only one more than leaves" - .into(), - ); - } - Ok(DepositCache { - logs: self.logs.clone(), - leaves: self.leaves.clone(), - deposit_contract_deploy_block: self.deposit_contract_deploy_block, - finalized_deposit_count: self.finalized_deposit_count, - finalized_block_height: self.finalized_block_height, - deposit_tree, - deposit_roots: self.deposit_roots.clone(), - }) - } -} - -/// Mirrors the merkle tree of deposits in the eth1 deposit contract. -/// -/// Provides `Deposit` objects with merkle proofs included. -#[cfg_attr(test, derive(PartialEq))] -pub struct DepositCache { - logs: Vec, - leaves: Vec, - deposit_contract_deploy_block: u64, - finalized_deposit_count: u64, - finalized_block_height: u64, - /// An incremental merkle tree which represents the current state of the - /// deposit contract tree. - deposit_tree: DepositDataTree, - /// Vector of deposit roots. `deposit_roots[i]` denotes `deposit_root` at - /// `deposit_index` `i`. - deposit_roots: Vec, -} - -impl Default for DepositCache { - fn default() -> Self { - let deposit_tree = DepositDataTree::create(&[], 0, DEPOSIT_TREE_DEPTH); - let deposit_roots = vec![deposit_tree.root()]; - DepositCache { - logs: Vec::new(), - leaves: Vec::new(), - deposit_contract_deploy_block: 1, - finalized_deposit_count: 0, - finalized_block_height: 0, - deposit_tree, - deposit_roots, - } - } -} - -#[derive(Debug, PartialEq)] -pub enum DepositCacheInsertOutcome { - Inserted, - Duplicate, -} - -impl DepositCache { - /// Create new `DepositCache` given block number at which deposit - /// contract was deployed. - pub fn new(deposit_contract_deploy_block: u64) -> Self { - DepositCache { - deposit_contract_deploy_block, - finalized_block_height: deposit_contract_deploy_block.saturating_sub(1), - ..Self::default() - } - } - - pub fn from_deposit_snapshot( - deposit_contract_deploy_block: u64, - snapshot: &DepositTreeSnapshot, - ) -> Result { - let deposit_tree = DepositDataTree::from_snapshot(snapshot, DEPOSIT_TREE_DEPTH) - .map_err(|e| format!("Invalid DepositSnapshot: {:?}", e))?; - Ok(DepositCache { - logs: Vec::new(), - leaves: Vec::new(), - deposit_contract_deploy_block, - finalized_deposit_count: snapshot.deposit_count, - finalized_block_height: snapshot.execution_block_height, - deposit_tree, - deposit_roots: vec![snapshot.deposit_root], - }) - } - - /// Returns the number of deposits the cache stores - pub fn len(&self) -> usize { - self.finalized_deposit_count as usize + self.logs.len() - } - - /// True if the cache does not store any blocks. - pub fn is_empty(&self) -> bool { - self.finalized_deposit_count != 0 && self.logs.is_empty() - } - - /// Returns the block number for the most recent deposit in the cache. - pub fn latest_block_number(&self) -> u64 { - self.logs - .last() - .map(|log| log.block_number) - .unwrap_or(self.finalized_block_height) - } - - /// Returns an iterator over all the logs in `self` that aren't finalized. - pub fn iter(&self) -> impl Iterator { - self.logs.iter() - } - - /// Returns the deposit log with INDEX i. - pub fn get_log(&self, i: usize) -> Option<&DepositLog> { - let finalized_deposit_count = self.finalized_deposit_count as usize; - if i < finalized_deposit_count { - None - } else { - self.logs.get(i - finalized_deposit_count) - } - } - - /// Returns the deposit root with DEPOSIT COUNT (not index) i - pub fn get_root(&self, i: usize) -> Option<&Hash256> { - let finalized_deposit_count = self.finalized_deposit_count as usize; - if i < finalized_deposit_count { - None - } else { - self.deposit_roots.get(i - finalized_deposit_count) - } - } - - /// Returns the finalized deposit count - pub fn finalized_deposit_count(&self) -> u64 { - self.finalized_deposit_count - } - - /// Finalizes the cache up to `eth1_block.deposit_count`. - pub fn finalize(&mut self, eth1_block: Eth1Block) -> Result<(), Error> { - let deposits_to_finalize = eth1_block.deposit_count.ok_or_else(|| { - Error::Internal("Eth1Block did not contain deposit_count".to_string()) - })?; - - let currently_finalized = self.finalized_deposit_count; - if deposits_to_finalize > self.len() as u64 || deposits_to_finalize <= currently_finalized { - Err(Error::InvalidFinalizeIndex { - requested_count: deposits_to_finalize, - currently_finalized, - deposit_count: self.len() as u64, - }) - } else { - let finalized_log = self - .get_log((deposits_to_finalize - 1) as usize) - .cloned() - .ok_or(Error::PleaseNotifyTheDevs)?; - let drop = (deposits_to_finalize - currently_finalized) as usize; - self.deposit_tree - .finalize(eth1_block.into()) - .map_err(Error::DepositTree)?; - self.logs.drain(0..drop); - self.leaves.drain(0..drop); - self.deposit_roots.drain(0..drop); - self.finalized_deposit_count = deposits_to_finalize; - self.finalized_block_height = finalized_log.block_number; - - Ok(()) - } - } - - /// Returns the deposit tree snapshot (if tree is finalized) - pub fn get_deposit_snapshot(&self) -> Option { - self.deposit_tree.get_snapshot() - } - - /// Adds `log` to self. - /// - /// This function enforces that `logs` are imported one-by-one with no gaps between - /// `log.index`, starting at `log.index == 0`. - /// - /// ## Errors - /// - /// - If a log with index `log.index - 1` is not already present in `self` (ignored when empty). - /// - If a log with `log.index` is already known, but the given `log` is distinct to it. - pub fn insert_log(&mut self, log: DepositLog) -> Result { - match log.index.cmp(&(self.len() as u64)) { - Ordering::Equal => { - let deposit = log.deposit_data.tree_hash_root(); - // should push to deposit_tree first because it's fallible - self.deposit_tree - .push_leaf(deposit) - .map_err(Error::DepositTree)?; - self.leaves.push(deposit); - self.logs.push(log); - self.deposit_roots.push(self.deposit_tree.root()); - Ok(DepositCacheInsertOutcome::Inserted) - } - Ordering::Less => { - let mut compare_index = log.index as usize; - if log.index < self.finalized_deposit_count { - return Err(Error::FinalizedLogInsert { - log_index: log.index, - finalized_index: self.finalized_deposit_count - 1, - }); - } else { - compare_index -= self.finalized_deposit_count as usize; - } - if self.logs[compare_index] == log { - Ok(DepositCacheInsertOutcome::Duplicate) - } else { - Err(Error::DuplicateDistinctLog(log.index)) - } - } - Ordering::Greater => Err(Error::NonConsecutive { - log_index: log.index, - expected: self.logs.len(), - }), - } - } - - /// Returns a list of `Deposit` objects, within the given deposit index `range`. - /// - /// The `deposit_count` is used to generate the proofs for the `Deposits`. For example, if we - /// have 100 proofs, but the eth2 chain only acknowledges 50 of them, we must produce our - /// proofs with respect to a tree size of 50. - /// - /// - /// ## Errors - /// - /// - If `deposit_count` is less than `end`. - /// - There are not sufficient deposits in the tree to generate the proof. - pub fn get_deposits( - &self, - start: u64, - end: u64, - deposit_count: u64, - ) -> Result<(Hash256, Vec), Error> { - if deposit_count < end { - // It's invalid to ask for more deposits than should exist. - Err(Error::DepositCountInvalid { - deposit_count, - range_end: end, - }) - } else if end > self.len() as u64 { - // The range of requested deposits exceeds the deposits stored locally. - Err(Error::InsufficientDeposits { - requested: end, - known_deposits: self.logs.len(), - }) - } else if self.finalized_deposit_count > start { - // Can't ask for deposits before or on the finalized deposit - Err(Error::DepositRangeInvalid { - range_start: start, - finalized_count: self.finalized_deposit_count, - }) - } else { - let (start, end, deposit_count) = ( - start - self.finalized_deposit_count, - end - self.finalized_deposit_count, - deposit_count - self.finalized_deposit_count, - ); - let leaves = self - .leaves - .get(0..deposit_count as usize) - .ok_or_else(|| Error::Internal("Unable to get known leaves".into()))?; - - let tree = self - .deposit_tree - .get_snapshot() - .map(|snapshot| { - // The tree has already been finalized. So we can just start from the snapshot - // and replay the deposits up to `deposit_count` - let mut tree = DepositDataTree::from_snapshot(&snapshot, DEPOSIT_TREE_DEPTH) - .map_err(Error::DepositTree)?; - for leaf in leaves { - tree.push_leaf(*leaf).map_err(Error::DepositTree)?; - } - Ok(tree) - }) - .unwrap_or_else(|| { - // Deposit tree hasn't been finalized yet, will have to re-create the whole tree - Ok(DepositDataTree::create( - leaves, - leaves.len(), - DEPOSIT_TREE_DEPTH, - )) - })?; - - let mut deposits = vec![]; - self.logs - .get(start as usize..end as usize) - .ok_or_else(|| Error::Internal("Unable to get known log".into()))? - .iter() - .try_for_each(|deposit_log| { - let (_leaf, proof) = tree - .generate_proof(deposit_log.index as usize) - .map_err(Error::DepositTree)?; - deposits.push(Deposit { - proof: proof.into(), - data: deposit_log.deposit_data.clone(), - }); - Ok(()) - })?; - - Ok((tree.root(), deposits)) - } - } - - /// Returns the number of deposits with valid signatures that have been observed up to and - /// including the block at `block_number`. - /// - /// Returns `None` if the `block_number` is zero or prior to contract deployment. - pub fn get_valid_signature_count(&self, block_number: u64) -> Option { - if block_number == 0 || block_number < self.deposit_contract_deploy_block { - None - } else { - Some( - self.logs - .iter() - .take_while(|deposit| deposit.block_number <= block_number) - .filter(|deposit| deposit.signature_is_valid) - .count(), - ) - } - } - - /// Returns the number of deposits that have been observed up to and - /// including the block at `block_number`. - /// - /// Returns `None` if the `block_number` is zero or prior to contract deployment - /// or prior to last finalized deposit. - pub fn get_deposit_count_from_cache(&self, block_number: u64) -> Option { - if block_number == 0 - || block_number < self.deposit_contract_deploy_block - || block_number < self.finalized_block_height - { - None - } else if block_number == self.finalized_block_height { - Some(self.finalized_deposit_count) - } else { - Some( - self.finalized_deposit_count - + self - .logs - .iter() - .take_while(|deposit| deposit.block_number <= block_number) - .count() as u64, - ) - } - } - - /// Gets the deposit root at block height = block_number. - /// - /// Fetches the `deposit_count` on or just before the queried `block_number` - /// and queries the `deposit_roots` map to get the corresponding `deposit_root`. - pub fn get_deposit_root_from_cache(&self, block_number: u64) -> Option { - let count = self.get_deposit_count_from_cache(block_number)?; - self.get_root(count as usize).cloned() - } -} - -#[cfg(test)] -pub mod tests { - use super::*; - use execution_layer::http::deposit_log::Log; - use types::{EthSpec, FixedBytesExtended, MainnetEthSpec}; - - /// The data from a deposit event, using the v0.8.3 version of the deposit contract. - pub const EXAMPLE_LOG: &[u8] = &[ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1, 64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1, 128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 48, 167, 108, 6, 69, 88, 17, 3, 51, 6, 4, 158, 232, 82, - 248, 218, 2, 71, 219, 55, 102, 86, 125, 136, 203, 36, 77, 64, 213, 43, 52, 175, 154, 239, - 50, 142, 52, 201, 77, 54, 239, 0, 229, 22, 46, 139, 120, 62, 240, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 64, 89, 115, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 96, 140, 74, 175, 158, 209, 20, 206, - 30, 63, 215, 238, 113, 60, 132, 216, 211, 100, 186, 202, 71, 34, 200, 160, 225, 212, 213, - 119, 88, 51, 80, 101, 74, 2, 45, 78, 153, 12, 192, 44, 51, 77, 40, 10, 72, 246, 34, 193, - 187, 22, 95, 4, 211, 245, 224, 13, 162, 21, 163, 54, 225, 22, 124, 3, 56, 14, 81, 122, 189, - 149, 250, 251, 159, 22, 77, 94, 157, 197, 196, 253, 110, 201, 88, 193, 246, 136, 226, 221, - 18, 113, 232, 105, 100, 114, 103, 237, 189, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ]; - - fn example_log() -> DepositLog { - let spec = MainnetEthSpec::default_spec(); - - let log = Log { - block_number: 42, - data: EXAMPLE_LOG.to_vec(), - }; - log.to_deposit_log(&spec).expect("should decode log") - } - - fn get_cache_with_deposits(n: u64) -> DepositCache { - let mut deposit_cache = DepositCache::default(); - for i in 0..n { - let mut log = example_log(); - log.index = i; - log.block_number = i; - log.deposit_data.withdrawal_credentials = Hash256::from_low_u64_be(i); - deposit_cache - .insert_log(log) - .expect("should add consecutive logs"); - } - assert_eq!(deposit_cache.len() as u64, n, "should have {} deposits", n); - - deposit_cache - } - - #[test] - fn insert_log_valid() { - let mut deposit_cache = DepositCache::default(); - - for i in 0..16 { - let mut log = example_log(); - log.index = i; - deposit_cache - .insert_log(log) - .expect("should add consecutive logs"); - } - } - - #[test] - fn insert_log_invalid() { - let mut deposit_cache = DepositCache::default(); - - for i in 0..4 { - let mut log = example_log(); - log.index = i; - deposit_cache - .insert_log(log) - .expect("should add consecutive logs"); - } - - // Add duplicate, when given is the same as the one known. - let mut log = example_log(); - log.index = 3; - assert_eq!( - deposit_cache.insert_log(log).unwrap(), - DepositCacheInsertOutcome::Duplicate - ); - - // Add duplicate, when given is different to the one known. - let mut log = example_log(); - log.index = 3; - log.block_number = 99; - assert!(deposit_cache.insert_log(log).is_err()); - - // Skip inserting a log. - let mut log = example_log(); - log.index = 5; - assert!(deposit_cache.insert_log(log).is_err()); - } - - #[test] - fn get_deposit_valid() { - let n = 1_024; - let deposit_cache = get_cache_with_deposits(n); - - // Get 0 deposits, with max deposit count. - let (_, deposits) = deposit_cache - .get_deposits(0, 0, n) - .expect("should get the full tree"); - assert_eq!(deposits.len(), 0, "should return no deposits"); - - // Get 0 deposits, with 0 deposit count. - let (_, deposits) = deposit_cache - .get_deposits(0, 0, 0) - .expect("should get the full tree"); - assert_eq!(deposits.len(), 0, "should return no deposits"); - - // Get all deposits, with max deposit count. - let (full_root, deposits) = deposit_cache - .get_deposits(0, n, n) - .expect("should get the full tree"); - assert_eq!(deposits.len(), n as usize, "should return all deposits"); - - // Get 4 deposits, with max deposit count. - let (root, deposits) = deposit_cache - .get_deposits(0, 4, n) - .expect("should get the four from the full tree"); - assert_eq!( - deposits.len(), - 4_usize, - "should get 4 deposits from full tree" - ); - assert_eq!( - root, full_root, - "should still return full root when getting deposit subset" - ); - - // Get half of the deposits, with half deposit count. - let half = n / 2; - let (half_root, deposits) = deposit_cache - .get_deposits(0, half, half) - .expect("should get the half tree"); - assert_eq!(deposits.len(), half as usize, "should return half deposits"); - - // Get 4 deposits, with half deposit count. - let (root, deposits) = deposit_cache - .get_deposits(0, 4, n / 2) - .expect("should get the half tree"); - assert_eq!( - deposits.len(), - 4_usize, - "should get 4 deposits from half tree" - ); - assert_eq!( - root, half_root, - "should still return half root when getting deposit subset" - ); - assert_ne!( - full_root, half_root, - "should get different root when pinning deposit count" - ); - } - - #[test] - fn get_deposit_invalid() { - let n = 16; - let mut tree = get_cache_with_deposits(n); - - // Range too high. - assert!(tree.get_deposits(0, n + 1, n).is_err()); - - // Count too high. - assert!(tree.get_deposits(0, n, n + 1).is_err()); - - // Range higher than count. - assert!(tree.get_deposits(0, 4, 2).is_err()); - - let block7 = fake_eth1_block(&tree, 7).expect("should create fake eth1 block"); - tree.finalize(block7).expect("should finalize"); - // Range starts <= finalized deposit - assert!(tree.get_deposits(6, 9, 11).is_err()); - assert!(tree.get_deposits(7, 9, 11).is_err()); - // Range start > finalized deposit should be OK - assert!(tree.get_deposits(8, 9, 11).is_ok()); - } - - // returns an eth1 block that can be used to finalize the cache at `deposit_index` - // this will ensure the `deposit_root` on the `Eth1Block` is correct - fn fake_eth1_block(deposit_cache: &DepositCache, deposit_index: usize) -> Option { - let deposit_log = deposit_cache.get_log(deposit_index)?; - Some(Eth1Block { - hash: Hash256::from_low_u64_be(deposit_log.block_number), - timestamp: 0, - number: deposit_log.block_number, - deposit_root: deposit_cache.get_root(deposit_index + 1).cloned(), - deposit_count: Some(deposit_log.index + 1), - }) - } - - #[test] - fn test_finalization_boundaries() { - let n = 8; - let half = n / 2; - - let mut deposit_cache = get_cache_with_deposits(n as u64); - - let full_root_before_finalization = deposit_cache.deposit_tree.root(); - let half_log_plus1_before_finalization = deposit_cache - .get_log(half + 1) - .expect("log should exist") - .clone(); - let half_root_plus1_before_finalization = - *deposit_cache.get_root(half + 1).expect("root should exist"); - - let (root_before_finalization, proof_before_finalization) = deposit_cache - .get_deposits((half + 1) as u64, (half + 2) as u64, (half + 2) as u64) - .expect("should return 1 deposit with proof"); - - // finalize on the tree at half - let half_block = - fake_eth1_block(&deposit_cache, half).expect("fake block should be created"); - assert!( - deposit_cache.get_deposit_snapshot().is_none(), - "snapshot should not exist as tree has not been finalized" - ); - deposit_cache - .finalize(half_block) - .expect("tree should_finalize"); - - // check boundary conditions for get_log - assert!( - deposit_cache.get_log(half).is_none(), - "log at finalized deposit should NOT exist" - ); - assert_eq!( - *deposit_cache.get_log(half + 1).expect("log should exist"), - half_log_plus1_before_finalization, - "log after finalized deposit should match before finalization" - ); - // check boundary conditions for get_root - assert!( - deposit_cache.get_root(half).is_none(), - "root at finalized deposit should NOT exist" - ); - assert_eq!( - *deposit_cache.get_root(half + 1).expect("root should exist"), - half_root_plus1_before_finalization, - "root after finalized deposit should match before finalization" - ); - // full root should match before and after finalization - assert_eq!( - deposit_cache.deposit_tree.root(), - full_root_before_finalization, - "full root should match before and after finalization" - ); - // check boundary conditions for get_deposits (proof) - assert!( - deposit_cache - .get_deposits(half as u64, (half + 1) as u64, (half + 1) as u64) - .is_err(), - "cannot prove the finalized deposit" - ); - let (root_after_finalization, proof_after_finalization) = deposit_cache - .get_deposits((half + 1) as u64, (half + 2) as u64, (half + 2) as u64) - .expect("should return 1 deposit with proof"); - assert_eq!( - root_before_finalization, root_after_finalization, - "roots before and after finalization should match" - ); - assert_eq!( - proof_before_finalization, proof_after_finalization, - "proof before and after finalization should match" - ); - - // recover tree from snapshot by replaying deposits - let snapshot = deposit_cache - .get_deposit_snapshot() - .expect("snapshot should exist"); - let mut recovered = DepositCache::from_deposit_snapshot(1, &snapshot) - .expect("should recover finalized tree"); - for i in half + 1..n { - let mut log = example_log(); - log.index = i as u64; - log.block_number = i as u64; - log.deposit_data.withdrawal_credentials = Hash256::from_low_u64_be(i as u64); - recovered - .insert_log(log) - .expect("should add consecutive logs"); - } - - // check the same boundary conditions above for the recovered tree - assert!( - recovered.get_log(half).is_none(), - "log at finalized deposit should NOT exist" - ); - assert_eq!( - *recovered.get_log(half + 1).expect("log should exist"), - half_log_plus1_before_finalization, - "log after finalized deposit should match before finalization in recovered tree" - ); - // check boundary conditions for get_root - assert!( - recovered.get_root(half).is_none(), - "root at finalized deposit should NOT exist" - ); - assert_eq!( - *recovered.get_root(half + 1).expect("root should exist"), - half_root_plus1_before_finalization, - "root after finalized deposit should match before finalization in recovered tree" - ); - // full root should match before and after finalization - assert_eq!( - recovered.deposit_tree.root(), - full_root_before_finalization, - "full root should match before and after finalization" - ); - // check boundary conditions for get_deposits (proof) - assert!( - recovered - .get_deposits(half as u64, (half + 1) as u64, (half + 1) as u64) - .is_err(), - "cannot prove the finalized deposit" - ); - let (recovered_root_after_finalization, recovered_proof_after_finalization) = recovered - .get_deposits((half + 1) as u64, (half + 2) as u64, (half + 2) as u64) - .expect("should return 1 deposit with proof"); - assert_eq!( - root_before_finalization, recovered_root_after_finalization, - "recovered roots before and after finalization should match" - ); - assert_eq!( - proof_before_finalization, recovered_proof_after_finalization, - "recovered proof before and after finalization should match" - ); - } - - #[test] - fn test_finalization() { - let n = 1024; - let half = n / 2; - let quarter = half / 2; - let mut deposit_cache = get_cache_with_deposits(n); - - let full_root_before_finalization = deposit_cache.deposit_tree.root(); - let q3_root_before_finalization = deposit_cache - .get_root((half + quarter) as usize) - .cloned() - .expect("root should exist"); - let q3_log_before_finalization = deposit_cache - .get_log((half + quarter) as usize) - .cloned() - .expect("log should exist"); - // get_log(half+quarter) should return log with index `half+quarter` - assert_eq!( - q3_log_before_finalization.index, - half + quarter, - "log index should be {}", - half + quarter, - ); - - // get lower quarter of deposits with max deposit count - let (lower_quarter_root_before_finalization, lower_quarter_deposits_before_finalization) = - deposit_cache - .get_deposits(quarter, half, n) - .expect("should get lower quarter"); - assert_eq!( - lower_quarter_deposits_before_finalization.len(), - quarter as usize, - "should get {} deposits from lower quarter", - quarter, - ); - // since the lower quarter was done with full deposits, root should be the same as full_root_before_finalization - assert_eq!( - lower_quarter_root_before_finalization, full_root_before_finalization, - "should still get full root with deposit subset", - ); - - // get upper quarter of deposits with slightly reduced deposit count - let (upper_quarter_root_before_finalization, upper_quarter_deposits_before_finalization) = - deposit_cache - .get_deposits(half, half + quarter, n - 2) - .expect("should get upper quarter"); - assert_eq!( - upper_quarter_deposits_before_finalization.len(), - quarter as usize, - "should get {} deposits from upper quarter", - quarter, - ); - // since upper quarter was with subset of nodes, it should differ from full root - assert_ne!( - full_root_before_finalization, upper_quarter_root_before_finalization, - "subtree root should differ from full root", - ); - - let f0_log = deposit_cache - .get_log((quarter - 1) as usize) - .cloned() - .expect("should return log"); - let f0_block = fake_eth1_block(&deposit_cache, (quarter - 1) as usize) - .expect("fake eth1 block should be created"); - - // finalize first quarter - deposit_cache - .finalize(f0_block) - .expect("should finalize first quarter"); - // finalized count and block number should match log - assert_eq!( - deposit_cache.finalized_deposit_count, - f0_log.index + 1, - "after calling finalize(eth1block) finalized_deposit_count should equal eth1_block.deposit_count", - ); - assert_eq!( - deposit_cache.finalized_block_height, - f0_log.block_number, - "after calling finalize(eth1block) finalized_block_number should equal eth1block.block_number" - ); - // check get_log boundaries - assert!( - deposit_cache.get_log((quarter - 1) as usize).is_none(), - "get_log() should return None for index <= finalized log index", - ); - assert!( - deposit_cache.get_log(quarter as usize).is_some(), - "get_log() should return Some(log) for index >= finalized_deposit_count", - ); - - // full root should remain the same after finalization - assert_eq!( - full_root_before_finalization, - deposit_cache.deposit_tree.root(), - "root should be the same before and after finalization", - ); - // get_root should return the same root before and after finalization - assert_eq!( - q3_root_before_finalization, - deposit_cache - .get_root((half + quarter) as usize) - .cloned() - .expect("root should exist"), - "get_root should return the same root before and after finalization", - ); - // get_log should return the same log before and after finalization - assert_eq!( - q3_log_before_finalization, - deposit_cache - .get_log((half + quarter) as usize) - .cloned() - .expect("log should exist"), - "get_log should return the same log before and after finalization", - ); - - // again get lower quarter of deposits with max deposit count after finalization - let (f0_lower_quarter_root, f0_lower_quarter_deposits) = deposit_cache - .get_deposits(quarter, half, n) - .expect("should get lower quarter"); - assert_eq!( - f0_lower_quarter_deposits.len(), - quarter as usize, - "should get {} deposits from lower quarter", - quarter, - ); - // again get upper quarter of deposits with slightly reduced deposit count after finalization - let (f0_upper_quarter_root, f0_upper_quarter_deposits) = deposit_cache - .get_deposits(half, half + quarter, n - 2) - .expect("should get upper quarter"); - assert_eq!( - f0_upper_quarter_deposits.len(), - quarter as usize, - "should get {} deposits from upper quarter", - quarter, - ); - - // lower quarter root and deposits should be the same - assert_eq!( - lower_quarter_root_before_finalization, f0_lower_quarter_root, - "root should be the same before and after finalization", - ); - for i in 0..lower_quarter_deposits_before_finalization.len() { - assert_eq!( - lower_quarter_deposits_before_finalization[i], f0_lower_quarter_deposits[i], - "get_deposits() should be the same before and after finalization", - ); - } - // upper quarter root and deposits should be the same - assert_eq!( - upper_quarter_root_before_finalization, f0_upper_quarter_root, - "subtree root should be the same before and after finalization", - ); - for i in 0..upper_quarter_deposits_before_finalization.len() { - assert_eq!( - upper_quarter_deposits_before_finalization[i], f0_upper_quarter_deposits[i], - "get_deposits() should be the same before and after finalization", - ); - } - - let f1_log = deposit_cache - .get_log((half - 2) as usize) - .cloned() - .expect("should return log"); - // finalize a little less than half to test multiple finalization - let f1_block = fake_eth1_block(&deposit_cache, (half - 2) as usize) - .expect("should create fake eth1 block"); - deposit_cache - .finalize(f1_block) - .expect("should finalize a little less than half"); - // finalized count and block number should match f1_log - assert_eq!( - deposit_cache.finalized_deposit_count, - f1_log.index + 1, - "after calling finalize(eth1block) finalized_deposit_count should equal eth1_block.deposit_count", - ); - assert_eq!( - deposit_cache.finalized_block_height, - f1_log.block_number, - "after calling finalize(eth1block) finalized_block_number should equal eth1block.block_number" - ); - // check get_log boundaries - assert!( - deposit_cache.get_log((half - 2) as usize).is_none(), - "get_log() should return None for index <= finalized log index", - ); - assert!( - deposit_cache.get_log((half - 1) as usize).is_some(), - "get_log() should return Some(log) for index >= finalized_deposit_count", - ); - - // full root should still be unchanged - assert_eq!( - full_root_before_finalization, - deposit_cache.deposit_tree.root(), - "root should be the same before and after finalization", - ); - - // again get upper quarter of deposits with slightly reduced deposit count after second finalization - let (f1_upper_quarter_root, f1_upper_quarter_deposits) = deposit_cache - .get_deposits(half, half + quarter, n - 2) - .expect("should get upper quarter"); - - // upper quarter root and deposits should be the same after second finalization - assert_eq!( - f0_upper_quarter_root, f1_upper_quarter_root, - "subtree root should be the same after multiple finalization", - ); - for i in 0..f0_upper_quarter_deposits.len() { - assert_eq!( - f0_upper_quarter_deposits[i], f1_upper_quarter_deposits[i], - "get_deposits() should be the same before and after finalization", - ); - } - } - - fn verify_equality(original: &DepositCache, copy: &DepositCache) { - // verify each field individually so that if one field should - // fail to recover, this test will point right to it - assert_eq!(original.deposit_contract_deploy_block, copy.deposit_contract_deploy_block, "DepositCache: deposit_contract_deploy_block should remain the same after encoding and decoding from ssz" ); - assert_eq!( - original.leaves, copy.leaves, - "DepositCache: leaves should remain the same after encoding and decoding from ssz" - ); - assert_eq!( - original.logs, copy.logs, - "DepositCache: logs should remain the same after encoding and decoding from ssz" - ); - assert_eq!(original.finalized_deposit_count, copy.finalized_deposit_count, "DepositCache: finalized_deposit_count should remain the same after encoding and decoding from ssz"); - assert_eq!(original.finalized_block_height, copy.finalized_block_height, "DepositCache: finalized_block_height should remain the same after encoding and decoding from ssz"); - assert_eq!(original.deposit_roots, copy.deposit_roots, "DepositCache: deposit_roots should remain the same before and after encoding and decoding from ssz"); - assert!(original.deposit_tree == copy.deposit_tree, "DepositCache: deposit_tree should remain the same before and after encoding and decoding from ssz"); - // verify all together for good measure - assert!( - original == copy, - "Deposit cache should remain the same after encoding and decoding from ssz" - ); - } - - fn ssz_round_trip(original: &DepositCache) -> DepositCache { - use ssz::{Decode, Encode}; - let bytes = SszDepositCache::from_deposit_cache(original).as_ssz_bytes(); - let ssz_cache = - SszDepositCache::from_ssz_bytes(&bytes).expect("should decode from ssz bytes"); - - SszDepositCache::to_deposit_cache(&ssz_cache).expect("should recover cache") - } - - #[test] - fn ssz_encode_decode() { - let deposit_cache = get_cache_with_deposits(512); - let recovered_cache = ssz_round_trip(&deposit_cache); - - verify_equality(&deposit_cache, &recovered_cache); - } - - #[test] - fn ssz_encode_decode_with_finalization() { - let mut deposit_cache = get_cache_with_deposits(512); - let block383 = fake_eth1_block(&deposit_cache, 383).expect("should create fake eth1 block"); - deposit_cache.finalize(block383).expect("should finalize"); - let mut first_recovery = ssz_round_trip(&deposit_cache); - - verify_equality(&deposit_cache, &first_recovery); - // finalize again to verify equality after multiple finalizations - let block447 = fake_eth1_block(&deposit_cache, 447).expect("should create fake eth1 block"); - first_recovery.finalize(block447).expect("should finalize"); - - let mut second_recovery = ssz_round_trip(&first_recovery); - verify_equality(&first_recovery, &second_recovery); - - // verify equality of a tree that finalized block383, block447, block479 - // with a tree that finalized block383, block479 - let block479 = fake_eth1_block(&deposit_cache, 479).expect("should create fake eth1 block"); - second_recovery - .finalize(block479.clone()) - .expect("should finalize"); - let third_recovery = ssz_round_trip(&second_recovery); - deposit_cache.finalize(block479).expect("should finalize"); - - verify_equality(&deposit_cache, &third_recovery); - } -} diff --git a/beacon_node/eth1/src/inner.rs b/beacon_node/eth1/src/inner.rs deleted file mode 100644 index 1f45346256b..00000000000 --- a/beacon_node/eth1/src/inner.rs +++ /dev/null @@ -1,130 +0,0 @@ -use crate::service::endpoint_from_config; -use crate::Config; -use crate::{ - block_cache::{BlockCache, Eth1Block}, - deposit_cache::{DepositCache, SszDepositCache, SszDepositCacheV13}, -}; -use execution_layer::HttpJsonRpc; -use parking_lot::RwLock; -use ssz::four_byte_option_impl; -use ssz::{Decode, Encode}; -use ssz_derive::{Decode, Encode}; -use std::sync::Arc; -use superstruct::superstruct; -use types::{ChainSpec, DepositTreeSnapshot, Eth1Data}; - -// Define "legacy" implementations of `Option` which use four bytes for encoding the union -// selector. -four_byte_option_impl!(four_byte_option_u64, u64); - -#[derive(Default)] -pub struct DepositUpdater { - pub cache: DepositCache, - pub last_processed_block: Option, -} - -impl DepositUpdater { - pub fn new(deposit_contract_deploy_block: u64) -> Self { - let cache = DepositCache::new(deposit_contract_deploy_block); - DepositUpdater { - cache, - last_processed_block: None, - } - } - - pub fn from_snapshot( - deposit_contract_deploy_block: u64, - snapshot: &DepositTreeSnapshot, - ) -> Result { - let last_processed_block = Some(snapshot.execution_block_height); - Ok(Self { - cache: DepositCache::from_deposit_snapshot(deposit_contract_deploy_block, snapshot)?, - last_processed_block, - }) - } -} - -pub struct Inner { - pub block_cache: RwLock, - pub deposit_cache: RwLock, - pub endpoint: HttpJsonRpc, - // this gets set to Some(Eth1Data) when the deposit finalization conditions are met - pub to_finalize: RwLock>, - pub config: RwLock, - pub remote_head_block: RwLock>, - pub spec: Arc, -} - -impl Inner { - /// Prunes the block cache to `self.target_block_cache_len`. - /// - /// Is a no-op if `self.target_block_cache_len` is `None`. - pub fn prune_blocks(&self) { - if let Some(block_cache_truncation) = self.config.read().block_cache_truncation { - self.block_cache.write().truncate(block_cache_truncation); - } - } - - /// Encode the eth1 block and deposit cache as bytes. - pub fn as_bytes(&self) -> Vec { - let ssz_eth1_cache = SszEth1Cache::from_inner(self); - ssz_eth1_cache.as_ssz_bytes() - } - - /// Recover `Inner` given byte representation of eth1 deposit and block caches. - pub fn from_bytes(bytes: &[u8], config: Config, spec: Arc) -> Result { - SszEth1Cache::from_ssz_bytes(bytes) - .map_err(|e| format!("Ssz decoding error: {:?}", e))? - .to_inner(config, spec) - .inspect(|inner| inner.block_cache.write().rebuild_by_hash_map()) - } - - /// Returns a reference to the specification. - pub fn spec(&self) -> &ChainSpec { - &self.spec - } -} - -pub type SszEth1Cache = SszEth1CacheV13; - -#[superstruct( - variants(V13), - variant_attributes(derive(Encode, Decode, Clone)), - no_enum -)] -pub struct SszEth1Cache { - pub block_cache: BlockCache, - pub deposit_cache: SszDepositCacheV13, - #[ssz(with = "four_byte_option_u64")] - pub last_processed_block: Option, -} - -impl SszEth1Cache { - pub fn from_inner(inner: &Inner) -> Self { - let deposit_updater = inner.deposit_cache.read(); - let block_cache = inner.block_cache.read(); - Self { - block_cache: (*block_cache).clone(), - deposit_cache: SszDepositCache::from_deposit_cache(&deposit_updater.cache), - last_processed_block: deposit_updater.last_processed_block, - } - } - - pub fn to_inner(&self, config: Config, spec: Arc) -> Result { - Ok(Inner { - block_cache: RwLock::new(self.block_cache.clone()), - deposit_cache: RwLock::new(DepositUpdater { - cache: self.deposit_cache.to_deposit_cache()?, - last_processed_block: self.last_processed_block, - }), - endpoint: endpoint_from_config(&config) - .map_err(|e| format!("Failed to create endpoint: {:?}", e))?, - to_finalize: RwLock::new(None), - // Set the remote head_block zero when creating a new instance. We only care about - // present and future eth1 nodes. - remote_head_block: RwLock::new(None), - config: RwLock::new(config), - spec, - }) - } -} diff --git a/beacon_node/eth1/src/lib.rs b/beacon_node/eth1/src/lib.rs deleted file mode 100644 index 9c4f9a1d8d5..00000000000 --- a/beacon_node/eth1/src/lib.rs +++ /dev/null @@ -1,14 +0,0 @@ -mod block_cache; -mod deposit_cache; -mod inner; -mod metrics; -mod service; - -pub use block_cache::{BlockCache, Eth1Block}; -pub use deposit_cache::{DepositCache, SszDepositCache, SszDepositCacheV13}; -pub use execution_layer::http::deposit_log::DepositLog; -pub use inner::{SszEth1Cache, SszEth1CacheV13}; -pub use service::{ - BlockCacheUpdateOutcome, Config, DepositCacheUpdateOutcome, Error, Eth1Endpoint, Service, - DEFAULT_CHAIN_ID, -}; diff --git a/beacon_node/eth1/src/metrics.rs b/beacon_node/eth1/src/metrics.rs deleted file mode 100644 index 1df4ba0df9a..00000000000 --- a/beacon_node/eth1/src/metrics.rs +++ /dev/null @@ -1,41 +0,0 @@ -pub use metrics::*; -use std::sync::LazyLock; - -/* - * Eth1 blocks - */ -pub static BLOCK_CACHE_LEN: LazyLock> = - LazyLock::new(|| try_create_int_gauge("eth1_block_cache_len", "Count of eth1 blocks in cache")); -pub static LATEST_CACHED_BLOCK_TIMESTAMP: LazyLock> = LazyLock::new(|| { - try_create_int_gauge( - "eth1_latest_cached_block_timestamp", - "Timestamp of latest block in eth1 cache", - ) -}); - -/* - * Eth1 deposits - */ -pub static DEPOSIT_CACHE_LEN: LazyLock> = LazyLock::new(|| { - try_create_int_gauge( - "eth1_deposit_cache_len", - "Number of deposits in the eth1 cache", - ) -}); -pub static HIGHEST_PROCESSED_DEPOSIT_BLOCK: LazyLock> = LazyLock::new(|| { - try_create_int_gauge( - "eth1_highest_processed_deposit_block", - "Number of the last block checked for deposits", - ) -}); - -/* - * Eth1 rpc connection - */ - -pub static ETH1_CONNECTED: LazyLock> = LazyLock::new(|| { - try_create_int_gauge( - "sync_eth1_connected", - "Set to 1 if connected to an eth1 node, otherwise set to 0", - ) -}); diff --git a/beacon_node/eth1/src/service.rs b/beacon_node/eth1/src/service.rs deleted file mode 100644 index 6b10bd2215c..00000000000 --- a/beacon_node/eth1/src/service.rs +++ /dev/null @@ -1,1243 +0,0 @@ -use crate::metrics; -use crate::{ - block_cache::{BlockCache, Error as BlockCacheError, Eth1Block}, - deposit_cache::{DepositCacheInsertOutcome, Error as DepositCacheError}, - inner::{DepositUpdater, Inner}, -}; -use execution_layer::auth::Auth; -use execution_layer::http::{ - deposit_methods::{BlockQuery, Eth1Id}, - HttpJsonRpc, -}; -use futures::future::TryFutureExt; -use parking_lot::{RwLock, RwLockReadGuard}; -use sensitive_url::SensitiveUrl; -use serde::{Deserialize, Serialize}; -use std::fmt::Debug; -use std::ops::{Range, RangeInclusive}; -use std::path::PathBuf; -use std::sync::Arc; -use std::time::{SystemTime, UNIX_EPOCH}; -use tokio::time::{interval_at, Duration, Instant}; -use tracing::{debug, error, info, trace, warn}; -use types::{ChainSpec, DepositTreeSnapshot, Eth1Data, EthSpec, Unsigned}; - -/// Indicates the default eth1 chain id we use for the deposit contract. -pub const DEFAULT_CHAIN_ID: Eth1Id = Eth1Id::Mainnet; -/// Indicates the default eth1 endpoint. -pub const DEFAULT_ETH1_ENDPOINT: &str = "http://localhost:8545"; - -const STANDARD_TIMEOUT_MILLIS: u64 = 15_000; - -/// Timeout when doing a eth_blockNumber call. -const BLOCK_NUMBER_TIMEOUT_MILLIS: u64 = STANDARD_TIMEOUT_MILLIS; -/// Timeout when doing an eth_getBlockByNumber call. -const GET_BLOCK_TIMEOUT_MILLIS: u64 = STANDARD_TIMEOUT_MILLIS; -/// Timeout when doing an eth_getLogs to read the deposit contract logs. -const GET_DEPOSIT_LOG_TIMEOUT_MILLIS: u64 = 60_000; - -/// Number of blocks to download if the node detects it is lagging behind due to an inaccurate -/// relationship between block-number-based follow distance and time-based follow distance. -const CATCHUP_BATCH_SIZE: u64 = 128; - -/// The absolute minimum follow distance to enforce when downloading catchup batches. -const CATCHUP_MIN_FOLLOW_DISTANCE: u64 = 64; - -/// To account for fast PoW blocks requiring more blocks in the cache than the block-based follow -/// distance would imply, we store `CACHE_FACTOR` more blocks in our cache. -const CACHE_FACTOR: u64 = 2; - -#[derive(Debug, PartialEq, Clone)] -pub enum EndpointError { - RequestFailed(String), - WrongChainId, - FarBehind, -} - -type EndpointState = Result<(), EndpointError>; - -/// Returns `Ok` if the endpoint is usable, i.e. is reachable and has a correct network id and -/// chain id. Otherwise it returns `Err`. -async fn endpoint_state(endpoint: &HttpJsonRpc, config_chain_id: &Eth1Id) -> EndpointState { - let error_connecting = |e: String| { - debug!( - %endpoint, - error = &e, - "eth1 endpoint error" - ); - warn!( - %endpoint, - "Error connecting to eth1 node endpoint" - ); - EndpointError::RequestFailed(e) - }; - - let chain_id = endpoint - .get_chain_id(Duration::from_millis(STANDARD_TIMEOUT_MILLIS)) - .await - .map_err(error_connecting)?; - // Eth1 nodes return chain_id = 0 if the node is not synced - // Handle the special case - if chain_id == Eth1Id::Custom(0) { - warn!( - %endpoint, - "Remote execution node is not synced" - ); - return Err(EndpointError::FarBehind); - } - if &chain_id != config_chain_id { - warn!( - %endpoint, - expected = ?config_chain_id, - received = ?chain_id, - "Invalid execution chain ID. Please switch to correct chain ID on endpoint" - ); - Err(EndpointError::WrongChainId) - } else { - Ok(()) - } -} - -/// Enum for the two internal (maybe different) cached heads for cached deposits and for the block -/// cache. -pub enum HeadType { - Deposit, - BlockCache, -} - -/// Returns the head block and the new block ranges relevant for deposits and the block cache -/// from the given endpoint. -async fn get_remote_head_and_new_block_ranges( - endpoint: &HttpJsonRpc, - service: &Service, - node_far_behind_seconds: u64, -) -> Result< - ( - Eth1Block, - Option>, - Option>, - ), - Error, -> { - let remote_head_block = download_eth1_block(endpoint, service.inner.clone(), None).await?; - let now = SystemTime::now() - .duration_since(UNIX_EPOCH) - .map(|d| d.as_secs()) - .unwrap_or(u64::MAX); - if remote_head_block.timestamp + node_far_behind_seconds < now { - warn!( - %endpoint, - last_seen_block_unix_timestamp = remote_head_block.timestamp, - "Execution endpoint is not synced" - ); - return Err(Error::EndpointError(EndpointError::FarBehind)); - } - - let handle_remote_not_synced = |e| { - if let Error::RemoteNotSynced { .. } = e { - warn!( - %endpoint, - "Execution endpoint is not synced" - ); - } - e - }; - let new_deposit_block_numbers = service - .relevant_new_block_numbers( - remote_head_block.number, - Some(remote_head_block.timestamp), - HeadType::Deposit, - ) - .map_err(handle_remote_not_synced)?; - let new_block_cache_numbers = service - .relevant_new_block_numbers( - remote_head_block.number, - Some(remote_head_block.timestamp), - HeadType::BlockCache, - ) - .map_err(handle_remote_not_synced)?; - Ok(( - remote_head_block, - new_deposit_block_numbers, - new_block_cache_numbers, - )) -} - -/// Returns the range of new block numbers to be considered for the given head type from the given -/// endpoint. -async fn relevant_new_block_numbers_from_endpoint( - endpoint: &HttpJsonRpc, - service: &Service, - head_type: HeadType, -) -> Result>, Error> { - let remote_highest_block = endpoint - .get_block_number(Duration::from_millis(BLOCK_NUMBER_TIMEOUT_MILLIS)) - .map_err(Error::GetBlockNumberFailed) - .await?; - service.relevant_new_block_numbers(remote_highest_block, None, head_type) -} - -#[derive(Debug, PartialEq)] -pub enum Error { - /// There was an inconsistency when adding a block to the cache. - FailedToInsertEth1Block(BlockCacheError), - /// There was an inconsistency when adding a deposit to the cache. - FailedToInsertDeposit(DepositCacheError), - /// A log downloaded from the eth1 contract was not well formed. - FailedToParseDepositLog { - block_range: Range, - error: String, - }, - /// Endpoint is currently not functional. - EndpointError(EndpointError), - /// The remote node is less synced that we expect, it is not useful until has done more - /// syncing. - RemoteNotSynced { - next_required_block: u64, - remote_highest_block: u64, - cache_follow_distance: u64, - }, - /// Failed to download a block from the eth1 node. - BlockDownloadFailed(String), - /// Failed to get the current block number from the eth1 node. - GetBlockNumberFailed(String), - /// Failed to read the deposit contract root from the eth1 node. - GetDepositRootFailed(String), - /// Failed to read the deposit contract deposit count from the eth1 node. - GetDepositCountFailed(String), - /// Failed to read the deposit contract root from the eth1 node. - GetDepositLogsFailed(String), - /// There was an unexpected internal error. - Internal(String), - /// Error finalizing deposit - FailedToFinalizeDeposit(String), - /// There was a problem Initializing from deposit snapshot - FailedToInitializeFromSnapshot(String), -} - -/// The success message for an Eth1Data cache update. -#[derive(Debug, PartialEq, Clone)] -pub struct BlockCacheUpdateOutcome { - pub blocks_imported: usize, - pub head_block_number: Option, -} - -/// The success message for an Eth1 deposit cache update. -#[derive(Debug, PartialEq, Clone)] -pub struct DepositCacheUpdateOutcome { - pub logs_imported: usize, -} - -/// Supports either one authenticated jwt JSON-RPC endpoint **or** -/// multiple non-authenticated endpoints with fallback. -#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] -pub enum Eth1Endpoint { - Auth { - endpoint: SensitiveUrl, - jwt_path: PathBuf, - jwt_id: Option, - jwt_version: Option, - }, - NoAuth(SensitiveUrl), -} - -impl Eth1Endpoint { - pub fn get_endpoint(&self) -> SensitiveUrl { - match &self { - Self::Auth { endpoint, .. } => endpoint.clone(), - Self::NoAuth(endpoint) => endpoint.clone(), - } - } -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Config { - /// An Eth1 node (e.g., Geth) running a HTTP JSON-RPC endpoint. - pub endpoint: Eth1Endpoint, - /// The address the `BlockCache` and `DepositCache` should assume is the canonical deposit contract. - pub deposit_contract_address: String, - /// The eth1 chain id where the deposit contract is deployed (Holesky/Mainnet). - pub chain_id: Eth1Id, - /// Defines the first block that the `DepositCache` will start searching for deposit logs. - /// - /// Setting too high can result in missed logs. Setting too low will result in unnecessary - /// calls to the Eth1 node's HTTP JSON RPC. - pub deposit_contract_deploy_block: u64, - /// Defines the lowest block number that should be downloaded and added to the `BlockCache`. - pub lowest_cached_block_number: u64, - /// Defines how far behind the Eth1 node's head we should follow. - /// - /// Note: this should be less than or equal to the specification's `ETH1_FOLLOW_DISTANCE`. - pub follow_distance: u64, - /// The follow distance to use for blocks in our cache. - /// - /// This can be set lower than the true follow distance in order to correct for poor timing - /// of eth1 blocks. - pub cache_follow_distance: Option, - /// Specifies the seconds when we consider the head of a node far behind. - /// This should be less than `ETH1_FOLLOW_DISTANCE * SECONDS_PER_ETH1_BLOCK`. - pub node_far_behind_seconds: u64, - /// Defines the number of blocks that should be retained each time the `BlockCache` calls truncate on - /// itself. - pub block_cache_truncation: Option, - /// The interval between updates when using the `auto_update` function. - pub auto_update_interval_millis: u64, - /// The span of blocks we should query for logs, per request. - pub blocks_per_log_query: usize, - /// The maximum number of log requests per update. - pub max_log_requests_per_update: Option, - /// The maximum number of log requests per update. - pub max_blocks_per_update: Option, - /// If set to true, the eth1 caches are wiped clean when the eth1 service starts. - pub purge_cache: bool, - pub execution_timeout_multiplier: u32, -} - -impl Config { - /// Sets the block cache to a length that is suitable for the given `EthSpec` and `ChainSpec`. - pub fn set_block_cache_truncation(&mut self, spec: &ChainSpec) { - // Compute the number of eth1 blocks in an eth1 voting period. - let seconds_per_voting_period = - E::SlotsPerEth1VotingPeriod::to_u64() * spec.seconds_per_slot; - let eth1_blocks_per_voting_period = seconds_per_voting_period / spec.seconds_per_eth1_block; - - // Ensure we can store two full windows of voting blocks. - let voting_windows = eth1_blocks_per_voting_period * 2; - - // Extend the cache to account for the cache follow distance. - let extra_follow_distance_blocks = self - .follow_distance - .saturating_sub(self.cache_follow_distance()); - - let length = voting_windows + extra_follow_distance_blocks; - - // Allow for more blocks to account for blocks being generated faster than expected. - // The cache expiry should really be timestamp based, but that would require a more - // extensive refactor. - let cache_size = CACHE_FACTOR * length; - - self.block_cache_truncation = Some(cache_size as usize); - } - - /// The distance at which the cache should follow the head. - /// - /// Defaults to 3/4 of `follow_distance` unless set manually. - pub fn cache_follow_distance(&self) -> u64 { - self.cache_follow_distance - .unwrap_or(3 * self.follow_distance / 4) - } -} - -impl Default for Config { - fn default() -> Self { - Self { - endpoint: Eth1Endpoint::NoAuth( - SensitiveUrl::parse(DEFAULT_ETH1_ENDPOINT) - .expect("The default Eth1 endpoint must always be a valid URL."), - ), - deposit_contract_address: "0x0000000000000000000000000000000000000000".into(), - chain_id: DEFAULT_CHAIN_ID, - deposit_contract_deploy_block: 1, - lowest_cached_block_number: 1, - follow_distance: 128, - cache_follow_distance: None, - node_far_behind_seconds: 128 * 14, - block_cache_truncation: Some(4_096), - auto_update_interval_millis: 60_000, - blocks_per_log_query: 1_000, - max_log_requests_per_update: Some(5_000), - max_blocks_per_update: Some(8_192), - purge_cache: false, - execution_timeout_multiplier: 1, - } - } -} - -pub fn endpoint_from_config(config: &Config) -> Result { - match config.endpoint.clone() { - Eth1Endpoint::Auth { - endpoint, - jwt_path, - jwt_id, - jwt_version, - } => { - let auth = Auth::new_with_path(jwt_path, jwt_id, jwt_version) - .map_err(|e| format!("Failed to initialize jwt auth: {:?}", e))?; - HttpJsonRpc::new_with_auth(endpoint, auth, Some(config.execution_timeout_multiplier)) - .map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e)) - } - Eth1Endpoint::NoAuth(endpoint) => { - HttpJsonRpc::new(endpoint, Some(config.execution_timeout_multiplier)) - .map_err(|e| format!("Failed to create eth1 json rpc client: {:?}", e)) - } - } -} - -/// Provides a set of Eth1 caches and async functions to update them. -/// -/// Stores the following caches: -/// -/// - Deposit cache: stores all deposit logs from the deposit contract. -/// - Block cache: stores some number of eth1 blocks. -#[derive(Clone)] -pub struct Service { - inner: Arc, -} - -impl Service { - /// Creates a new service. Does not attempt to connect to the eth1 node. - pub fn new(config: Config, spec: Arc) -> Result { - Ok(Self { - inner: Arc::new(Inner { - block_cache: <_>::default(), - deposit_cache: RwLock::new(DepositUpdater::new( - config.deposit_contract_deploy_block, - )), - endpoint: endpoint_from_config(&config)?, - to_finalize: RwLock::new(None), - remote_head_block: RwLock::new(None), - config: RwLock::new(config), - spec, - }), - }) - } - - pub fn chain_spec(&self) -> &Arc { - &self.inner.spec - } - - pub fn client(&self) -> &HttpJsonRpc { - &self.inner.endpoint - } - - /// Creates a new service, initializing the deposit tree from a snapshot. - pub fn from_deposit_snapshot( - config: Config, - spec: Arc, - deposit_snapshot: &DepositTreeSnapshot, - ) -> Result { - let deposit_cache = - DepositUpdater::from_snapshot(config.deposit_contract_deploy_block, deposit_snapshot) - .map_err(Error::FailedToInitializeFromSnapshot)?; - - Ok(Self { - inner: Arc::new(Inner { - block_cache: <_>::default(), - deposit_cache: RwLock::new(deposit_cache), - endpoint: endpoint_from_config(&config) - .map_err(Error::FailedToInitializeFromSnapshot)?, - to_finalize: RwLock::new(None), - remote_head_block: RwLock::new(None), - config: RwLock::new(config), - spec, - }), - }) - } - - pub fn set_to_finalize(&self, eth1_data: Option) { - *(self.inner.to_finalize.write()) = eth1_data; - } - - /// Returns the follow distance that has been shortened to accommodate for differences in the - /// spacing between blocks. - pub fn cache_follow_distance(&self) -> u64 { - self.config().cache_follow_distance() - } - - /// Return byte representation of deposit and block caches. - pub fn as_bytes(&self) -> Vec { - self.inner.as_bytes() - } - - /// Recover the deposit and block caches from encoded bytes. - pub fn from_bytes(bytes: &[u8], config: Config, spec: Arc) -> Result { - let inner = Inner::from_bytes(bytes, config, spec)?; - Ok(Self { - inner: Arc::new(inner), - }) - } - - /// Provides access to the block cache. - pub fn blocks(&self) -> &RwLock { - &self.inner.block_cache - } - - /// Provides access to the deposit cache. - pub fn deposits(&self) -> &RwLock { - &self.inner.deposit_cache - } - - /// Removes all blocks from the cache, except for the latest block. - /// - /// We don't remove the latest blocks so we don't lose track of the latest block. - pub fn clear_block_cache(&self) { - self.inner.block_cache.write().truncate(1) - } - - /// Drop the block cache, replacing it with an empty one. - pub fn drop_block_cache(&self) { - *(self.inner.block_cache.write()) = BlockCache::default(); - } - - /// Returns the timestamp of the earliest block in the cache (if any). - pub fn earliest_block_timestamp(&self) -> Option { - self.inner.block_cache.read().earliest_block_timestamp() - } - - /// Returns the timestamp of the latest block in the cache (if any). - pub fn latest_block_timestamp(&self) -> Option { - self.inner.block_cache.read().latest_block_timestamp() - } - - /// Returns the latest head block returned from an Eth1 node. - /// - /// ## Note - /// - /// This is the simply the head of the Eth1 chain, with no regard to follow distance or the - /// voting period start. - pub fn head_block(&self) -> Option { - self.inner.remote_head_block.read().as_ref().cloned() - } - - /// Returns the latest cached block. - pub fn latest_cached_block(&self) -> Option { - self.inner.block_cache.read().latest_block().cloned() - } - - /// Returns the lowest block number stored. - pub fn lowest_block_number(&self) -> Option { - self.inner.block_cache.read().lowest_block_number() - } - - /// Returns the highest block that is present in both the deposit and block caches. - pub fn highest_safe_block(&self) -> Option { - let block_cache = self.blocks().read().highest_block_number()?; - let deposit_cache = self.deposits().read().last_processed_block?; - - Some(std::cmp::min(block_cache, deposit_cache)) - } - - /// Returns the number of currently cached blocks. - pub fn block_cache_len(&self) -> usize { - self.blocks().read().len() - } - - /// Returns the number deposits available in the deposit cache. - pub fn deposit_cache_len(&self) -> usize { - self.deposits().read().cache.len() - } - - /// Returns the number of deposits with valid signatures that have been observed. - pub fn get_valid_signature_count(&self) -> Option { - let highest_safe_block = self.highest_safe_block()?; - self.deposits() - .read() - .cache - .get_valid_signature_count(highest_safe_block) - } - - /// Returns the number of deposits with valid signatures that have been observed, without - /// respecting the `highest_safe_block`. - pub fn get_raw_valid_signature_count(&self) -> Option { - let deposits = self.deposits().read(); - deposits - .cache - .get_valid_signature_count(deposits.cache.latest_block_number()) - } - - /// Returns the number of deposits with valid signatures that have been observed up to and - /// including the block at `block_number`. - /// - /// Returns `None` if the `block_number` is zero or prior to contract deployment. - pub fn get_valid_signature_count_at_block(&self, block_number: u64) -> Option { - self.deposits() - .read() - .cache - .get_valid_signature_count(block_number) - } - - /// Read the service's configuration. - pub fn config(&self) -> RwLockReadGuard { - self.inner.config.read() - } - - /// Updates the configuration in `self to be `new_config`. - /// - /// Will truncate the block cache if the new configure specifies truncation. - pub fn update_config(&self, new_config: Config) -> Result<(), String> { - let mut old_config = self.inner.config.write(); - - if new_config.deposit_contract_deploy_block != old_config.deposit_contract_deploy_block { - // This may be possible, I just haven't looked into the details to ensure it's safe. - Err("Updating deposit_contract_deploy_block is not supported".to_string()) - } else { - *old_config = new_config; - - // Prevents a locking condition when calling prune_blocks. - drop(old_config); - - self.inner.prune_blocks(); - - Ok(()) - } - } - - /// Set the lowest block that the block cache will store. - /// - /// Note: this block may not always be present if truncating is enabled. - pub fn set_lowest_cached_block(&self, block_number: u64) { - self.inner.config.write().lowest_cached_block_number = block_number; - } - - /// Update the deposit and block cache, returning an error if either fail. - /// - /// ## Returns - /// - /// - Ok(_) if the update was successful (the cache may or may not have been modified). - /// - Err(_) if there is an error. - /// - /// Emits logs for debugging and errors. - pub async fn update( - &self, - ) -> Result<(DepositCacheUpdateOutcome, BlockCacheUpdateOutcome), String> { - let client = self.client(); - let chain_id = self.config().chain_id.clone(); - let node_far_behind_seconds = self.inner.config.read().node_far_behind_seconds; - - match endpoint_state(client, &chain_id).await { - Ok(()) => crate::metrics::set_gauge(&metrics::ETH1_CONNECTED, 1), - Err(e) => { - crate::metrics::set_gauge(&metrics::ETH1_CONNECTED, 0); - return Err(format!("Invalid endpoint state: {:?}", e)); - } - } - let (remote_head_block, new_block_numbers_deposit, new_block_numbers_block_cache) = - get_remote_head_and_new_block_ranges(client, self, node_far_behind_seconds) - .await - .map_err(|e| format!("Failed to get remote head and new block ranges: {:?}", e))?; - - *self.inner.remote_head_block.write() = Some(remote_head_block); - - let update_deposit_cache = async { - let outcome_result = self - .update_deposit_cache(Some(new_block_numbers_deposit)) - .await; - - // Reset the `last_procesed block` to the last valid deposit's block number. - // This will ensure that the next batch of blocks fetched is immediately after - // the last cached valid deposit allowing us to recover from scenarios where - // the deposit cache gets corrupted due to invalid responses from eth1 nodes. - if let Err(Error::FailedToInsertDeposit(DepositCacheError::NonConsecutive { - log_index: _, - expected: _, - })) = &outcome_result - { - let mut deposit_cache = self.inner.deposit_cache.write(); - debug!( - old_block_number = deposit_cache.last_processed_block, - new_block_number = deposit_cache.cache.latest_block_number(), - "Resetting last processed block" - ); - deposit_cache.last_processed_block = - Some(deposit_cache.cache.latest_block_number()); - } - - let outcome = - outcome_result.map_err(|e| format!("Failed to update deposit cache: {:?}", e))?; - - trace!( - cached_deposits = self.inner.deposit_cache.read().cache.len(), - logs_imported = outcome.logs_imported, - last_processed_execution_block = - self.inner.deposit_cache.read().last_processed_block, - "Updated deposit cache" - ); - Ok::<_, String>(outcome) - }; - - let update_block_cache = async { - let outcome = self - .update_block_cache(Some(new_block_numbers_block_cache)) - .await - .map_err(|e| format!("Failed to update deposit contract block cache: {:?}", e))?; - - trace!( - cached_blocks = self.inner.block_cache.read().len(), - blocks_imported = outcome.blocks_imported, - head_block = outcome.head_block_number, - "Updated deposit contract block cache" - ); - Ok::<_, String>(outcome) - }; - - let (deposit_outcome, block_outcome) = - futures::try_join!(update_deposit_cache, update_block_cache)?; - - Ok((deposit_outcome, block_outcome)) - } - - /// A looping future that updates the cache, then waits `config.auto_update_interval` before - /// updating it again. - /// - /// ## Returns - /// - /// - Ok(_) if the update was successful (the cache may or may not have been modified). - /// - Err(_) if there is an error. - /// - /// Emits logs for debugging and errors. - pub fn auto_update(self, handle: task_executor::TaskExecutor) { - let update_interval = Duration::from_millis(self.config().auto_update_interval_millis); - - let mut interval = interval_at(Instant::now(), update_interval); - - let update_future = async move { - loop { - interval.tick().await; - self.do_update(update_interval).await.ok(); - } - }; - - handle.spawn(update_future, "eth1"); - } - - async fn do_update(&self, update_interval: Duration) -> Result<(), ()> { - let update_result = self.update().await; - match update_result { - Err(e) => error!( - retry_millis = update_interval.as_millis(), - error = e, - "Error updating deposit contract cache" - ), - Ok((deposit, block)) => debug!( - retry_millis = update_interval.as_millis(), - ?block, - ?deposit, - "Updated deposit contract cache" - ), - }; - let optional_eth1data = self.inner.to_finalize.write().take(); - if let Some(eth1data_to_finalize) = optional_eth1data { - let already_finalized = self - .inner - .deposit_cache - .read() - .cache - .finalized_deposit_count(); - let deposit_count_to_finalize = eth1data_to_finalize.deposit_count; - if deposit_count_to_finalize > already_finalized { - match self.finalize_deposits(eth1data_to_finalize) { - Err(e) => warn!( - error = ?e, - info = "this should resolve on its own", - "Failed to finalize deposit cache" - ), - Ok(()) => info!( - finalized_deposit_count = deposit_count_to_finalize, - "Successfully finalized deposit tree" - ), - } - } else { - debug!( - %already_finalized, - %deposit_count_to_finalize, - "Deposits tree already finalized" - ); - } - } - Ok(()) - } - - /// Returns the range of new block numbers to be considered for the given head type. - fn relevant_new_block_numbers( - &self, - remote_highest_block_number: u64, - remote_highest_block_timestamp: Option, - head_type: HeadType, - ) -> Result>, Error> { - let follow_distance = self.cache_follow_distance(); - let latest_cached_block = self.latest_cached_block(); - let next_required_block = match head_type { - HeadType::Deposit => self - .deposits() - .read() - .last_processed_block - .map(|n| n + 1) - .unwrap_or_else(|| self.config().deposit_contract_deploy_block), - HeadType::BlockCache => latest_cached_block - .as_ref() - .map(|block| block.number + 1) - .unwrap_or_else(|| self.config().lowest_cached_block_number), - }; - - relevant_block_range( - remote_highest_block_number, - remote_highest_block_timestamp, - next_required_block, - follow_distance, - latest_cached_block.as_ref(), - &self.inner.spec, - ) - } - - pub fn finalize_deposits(&self, eth1_data: Eth1Data) -> Result<(), Error> { - let eth1_block = self - .inner - .block_cache - .read() - .block_by_hash(ð1_data.block_hash) - .cloned() - .ok_or_else(|| { - Error::FailedToFinalizeDeposit(format!( - "Finalized block not found in block cache: {:?}", - eth1_data.block_hash - )) - })?; - self.inner - .deposit_cache - .write() - .cache - .finalize(eth1_block) - .map_err(|e| Error::FailedToFinalizeDeposit(format!("{:?}", e))) - } - - pub fn get_deposit_snapshot(&self) -> Option { - self.inner.deposit_cache.read().cache.get_deposit_snapshot() - } - - /// Contacts the remote eth1 node and attempts to import deposit logs up to the configured - /// follow-distance block. - /// - /// Will process no more than `BLOCKS_PER_LOG_QUERY * MAX_LOG_REQUESTS_PER_UPDATE` blocks in a - /// single update. - /// - /// If `remote_highest_block_opt` is `Some`, use that value instead of querying `self.endpoint` - /// for the head of the eth1 chain. - /// - /// ## Resolves with - /// - /// - Ok(_) if the update was successful (the cache may or may not have been modified). - /// - Err(_) if there is an error. - /// - /// Emits logs for debugging and errors. - pub async fn update_deposit_cache( - &self, - new_block_numbers: Option>>, - ) -> Result { - let client = self.client(); - let deposit_contract_address = self.config().deposit_contract_address.clone(); - - let blocks_per_log_query = self.config().blocks_per_log_query; - let max_log_requests_per_update = self - .config() - .max_log_requests_per_update - .unwrap_or(usize::MAX); - - let range = { - match new_block_numbers { - Some(range) => range, - None => { - relevant_new_block_numbers_from_endpoint(client, self, HeadType::Deposit) - .await? - } - } - }; - - let block_number_chunks = if let Some(range) = range { - range - .collect::>() - .chunks(blocks_per_log_query) - .take(max_log_requests_per_update) - .map(|vec| { - let first = vec.first().cloned().unwrap_or(0); - let last = vec.last().map(|n| n + 1).unwrap_or(0); - first..last - }) - .collect::>>() - } else { - Vec::new() - }; - - let mut logs_imported: usize = 0; - let deposit_contract_address_ref: &str = &deposit_contract_address; - for block_range in block_number_chunks.into_iter() { - if block_range.is_empty() { - debug!("No new blocks to scan for logs"); - continue; - } - - /* - * Step 1. Download logs. - */ - let block_range_ref = &block_range; - let logs = client - .get_deposit_logs_in_range( - deposit_contract_address_ref, - block_range_ref.clone(), - Duration::from_millis(GET_DEPOSIT_LOG_TIMEOUT_MILLIS), - ) - .await - .map_err(Error::GetDepositLogsFailed)?; - - /* - * Step 2. Import logs to cache. - */ - let mut cache = self.deposits().write(); - logs.iter() - .map(|raw_log| { - raw_log.to_deposit_log(self.inner.spec()).map_err(|error| { - Error::FailedToParseDepositLog { - block_range: block_range.clone(), - error, - } - }) - }) - // Return early if any of the logs cannot be parsed. - // - // This costs an additional `collect`, however it enforces that no logs are - // imported if any one of them cannot be parsed. - .collect::, _>>()? - .into_iter() - // Returns if a deposit is unable to be added to the cache. - // - // If this error occurs, the cache will no longer be guaranteed to hold either - // none or all of the logs for each block (i.e., they may exist _some_ logs for - // a block, but not _all_ logs for that block). This scenario can cause the - // node to choose an invalid genesis state or propose an invalid block. - .try_for_each(|deposit_log| { - if let DepositCacheInsertOutcome::Inserted = cache - .cache - .insert_log(deposit_log) - .map_err(Error::FailedToInsertDeposit)? - { - logs_imported += 1; - } - - Ok::<_, Error>(()) - })?; - - debug!(logs = logs.len(), "Imported deposit logs chunk"); - - cache.last_processed_block = Some(block_range.end.saturating_sub(1)); - - metrics::set_gauge(&metrics::DEPOSIT_CACHE_LEN, cache.cache.len() as i64); - metrics::set_gauge( - &metrics::HIGHEST_PROCESSED_DEPOSIT_BLOCK, - cache.last_processed_block.unwrap_or(0) as i64, - ); - } - - if logs_imported > 0 { - info!( - latest_block = self.inner.deposit_cache.read().cache.latest_block_number(), - total = self.deposit_cache_len(), - new = logs_imported, - "Imported deposit log(s)" - ); - } else { - debug!( - latest_block = self.inner.deposit_cache.read().cache.latest_block_number(), - total_deposits = self.deposit_cache_len(), - "No new deposits found" - ); - } - - Ok(DepositCacheUpdateOutcome { logs_imported }) - } - - /// Contacts the remote eth1 node and attempts to import all blocks up to the configured - /// follow-distance block. - /// - /// If configured, prunes the block cache after importing new blocks. - /// - /// If `remote_highest_block_opt` is `Some`, use that value instead of querying `self.endpoint` - /// for the head of the eth1 chain. - /// - /// ## Resolves with - /// - /// - Ok(_) if the update was successful (the cache may or may not have been modified). - /// - Err(_) if there is an error. - /// - /// Emits logs for debugging and errors. - pub async fn update_block_cache( - &self, - new_block_numbers: Option>>, - ) -> Result { - let client = self.client(); - let block_cache_truncation = self.config().block_cache_truncation; - let max_blocks_per_update = self.config().max_blocks_per_update.unwrap_or(usize::MAX); - - let range = { - match new_block_numbers { - Some(range) => range, - None => { - relevant_new_block_numbers_from_endpoint(client, self, HeadType::BlockCache) - .await? - } - } - }; - - // Map the range of required blocks into a Vec. - // - // If the required range is larger than the size of the cache, drop the exiting cache - // because it's exipred and just download enough blocks to fill the cache. - let required_block_numbers = if let Some(range) = range { - if range.start() > range.end() { - // Note: this check is not strictly necessary, however it remains to safe - // guard against any regression which may cause an underflow in a following - // subtraction operation. - return Err(Error::Internal("Range was not increasing".into())); - } else { - let range_size = range.end() - range.start(); - let max_size = block_cache_truncation - .map(|n| n as u64) - .unwrap_or_else(|| u64::MAX); - if range_size > max_size { - // If the range of required blocks is larger than `max_size`, drop all - // existing blocks and download `max_size` count of blocks. - let first_block = range.end() - max_size; - (*self.inner.block_cache.write()) = BlockCache::default(); - (first_block..=*range.end()).collect::>() - } else { - range.collect::>() - } - } - } else { - Vec::new() - }; - - // This value is used to prevent the block cache from importing a block that is not yet in - // the deposit cache. - let latest_in_cache = self - .inner - .deposit_cache - .read() - .last_processed_block - .unwrap_or(0); - - let required_block_numbers = required_block_numbers - .into_iter() - .filter(|x| *x <= latest_in_cache) - .take(max_blocks_per_update) - .collect::>(); - - debug!( - first = ?required_block_numbers.first(), - last = ?required_block_numbers.last(), - "Downloading execution blocks" - ); - - // Produce a stream from the list of required block numbers and return a future that - // consumes the it. - - let mut blocks_imported = 0; - for block_number in required_block_numbers { - let eth1_block = - download_eth1_block(client, self.inner.clone(), Some(block_number)).await?; - - self.inner - .block_cache - .write() - .insert_root_or_child(eth1_block) - .map_err(Error::FailedToInsertEth1Block)?; - - metrics::set_gauge( - &metrics::BLOCK_CACHE_LEN, - self.inner.block_cache.read().len() as i64, - ); - metrics::set_gauge( - &metrics::LATEST_CACHED_BLOCK_TIMESTAMP, - self.inner - .block_cache - .read() - .latest_block_timestamp() - .unwrap_or(0) as i64, - ); - - blocks_imported += 1; - } - - // Prune the block cache, preventing it from growing too large. - self.inner.prune_blocks(); - - metrics::set_gauge( - &metrics::BLOCK_CACHE_LEN, - self.inner.block_cache.read().len() as i64, - ); - - let block_cache = self.inner.block_cache.read(); - let latest_block_mins = block_cache - .latest_block_timestamp() - .and_then(|timestamp| { - SystemTime::now() - .duration_since(UNIX_EPOCH) - .ok() - .and_then(|now| now.checked_sub(Duration::from_secs(timestamp))) - }) - .map(|duration| format!("{} mins", duration.as_secs() / 60)) - .unwrap_or_else(|| "n/a".into()); - - if blocks_imported > 0 { - debug!( - latest_block_age = latest_block_mins, - latest_block = block_cache.highest_block_number(), - total_cached_blocks = block_cache.len(), - new = %blocks_imported, - "Imported execution block(s)" - ); - } else { - debug!( - latest_block = block_cache.highest_block_number(), - cached_blocks = block_cache.len(), - "No new execution blocks imported" - ); - } - - Ok(BlockCacheUpdateOutcome { - blocks_imported, - head_block_number: block_cache.highest_block_number(), - }) - } -} - -/// Returns the range of blocks starting from `next_required_block` that are at least -/// `follow_distance` many blocks before `remote_highest_block`. -/// Returns an error if `next_required_block > remote_highest_block + 1` which means the remote went -/// backwards. -fn relevant_block_range( - remote_highest_block_number: u64, - remote_highest_block_timestamp: Option, - next_required_block: u64, - cache_follow_distance: u64, - latest_cached_block: Option<&Eth1Block>, - spec: &ChainSpec, -) -> Result>, Error> { - // If the latest cached block is lagging the head block by more than `cache_follow_distance` - // times the expected block time then the eth1 block time is likely quite different from what we - // assumed. - // - // In order to catch up, load batches of `CATCHUP_BATCH_SIZE` until the situation rights itself. - // Note that we need to check this condition before the regular follow distance condition - // or we will keep downloading small numbers of blocks. - if let (Some(remote_highest_block_timestamp), Some(latest_cached_block)) = - (remote_highest_block_timestamp, latest_cached_block) - { - let lagging = latest_cached_block.timestamp - + cache_follow_distance * spec.seconds_per_eth1_block - < remote_highest_block_timestamp; - let end_block = std::cmp::max( - std::cmp::min( - remote_highest_block_number.saturating_sub(CATCHUP_MIN_FOLLOW_DISTANCE), - next_required_block + CATCHUP_BATCH_SIZE, - ), - remote_highest_block_number.saturating_sub(cache_follow_distance), - ); - if lagging && next_required_block <= end_block { - return Ok(Some(next_required_block..=end_block)); - } - } - - let remote_follow_block = remote_highest_block_number.saturating_sub(cache_follow_distance); - if next_required_block <= remote_follow_block { - Ok(Some(next_required_block..=remote_follow_block)) - } else if next_required_block > remote_highest_block_number + 1 { - // If this is the case, the node must have gone "backwards" in terms of it's sync - // (i.e., it's head block is lower than it was before). - // - // We assume that the `cache_follow_distance` should be sufficient to ensure this never - // happens, otherwise it is an error. - Err(Error::RemoteNotSynced { - next_required_block, - remote_highest_block: remote_highest_block_number, - cache_follow_distance, - }) - } else { - // Return an empty range. - Ok(None) - } -} - -/// Downloads the `(block, deposit_root, deposit_count)` tuple from an eth1 node for the given -/// `block_number`. -/// -/// Set `block_number_opt = None` to get the "latest" eth1 block (i.e., the head). -/// -/// Performs three async calls to an Eth1 HTTP JSON RPC endpoint. -async fn download_eth1_block( - endpoint: &HttpJsonRpc, - cache: Arc, - block_number_opt: Option, -) -> Result { - let deposit_root = block_number_opt.and_then(|block_number| { - cache - .deposit_cache - .read() - .cache - .get_deposit_root_from_cache(block_number) - }); - - let deposit_count = block_number_opt.and_then(|block_number| { - cache - .deposit_cache - .read() - .cache - .get_deposit_count_from_cache(block_number) - }); - - // Performs a `get_blockByNumber` call to an eth1 node. - let http_block = endpoint - .get_block( - block_number_opt - .map(BlockQuery::Number) - .unwrap_or_else(|| BlockQuery::Latest), - Duration::from_millis(GET_BLOCK_TIMEOUT_MILLIS), - ) - .map_err(Error::BlockDownloadFailed) - .await?; - - Ok(Eth1Block { - hash: http_block.hash, - number: http_block.number, - timestamp: http_block.timestamp, - deposit_root, - deposit_count, - }) -} - -#[cfg(test)] -mod tests { - use super::*; - use types::MainnetEthSpec; - - #[test] - // Ensures the default config does not panic. - fn default_config() { - Config::default(); - } - - #[test] - fn serde_serialize() { - let serialized = - serde_yaml::to_string(&Config::default()).expect("Should serde encode default config"); - serde_yaml::from_str::(&serialized).expect("Should serde decode default config"); - } - - #[test] - fn block_cache_size() { - let mut config = Config::default(); - - let spec = MainnetEthSpec::default_spec(); - - config.set_block_cache_truncation::(&spec); - - let len = config.block_cache_truncation.unwrap(); - - let seconds_per_voting_period = - ::SlotsPerEth1VotingPeriod::to_u64() * spec.seconds_per_slot; - let eth1_blocks_per_voting_period = seconds_per_voting_period / spec.seconds_per_eth1_block; - let cache_follow_distance_blocks = config.follow_distance - config.cache_follow_distance(); - - let minimum_len = eth1_blocks_per_voting_period * 2 + cache_follow_distance_blocks; - - assert!(len > minimum_len as usize); - } -} diff --git a/beacon_node/eth1/tests/test.rs b/beacon_node/eth1/tests/test.rs deleted file mode 100644 index 48ed1892598..00000000000 --- a/beacon_node/eth1/tests/test.rs +++ /dev/null @@ -1,836 +0,0 @@ -#![cfg(test)] -use environment::{Environment, EnvironmentBuilder}; -use eth1::{Config, Eth1Endpoint, Service}; -use eth1::{DepositCache, DEFAULT_CHAIN_ID}; -use eth1_test_rig::{AnvilEth1Instance, Http, Middleware, Provider}; -use execution_layer::http::{deposit_methods::*, HttpJsonRpc, Log}; -use logging::create_test_tracing_subscriber; -use merkle_proof::verify_merkle_proof; -use sensitive_url::SensitiveUrl; -use std::ops::Range; -use std::sync::Arc; -use std::time::Duration; -use tree_hash::TreeHash; -use types::{ - DepositData, EthSpec, FixedBytesExtended, Hash256, Keypair, MainnetEthSpec, MinimalEthSpec, - Signature, -}; - -const DEPOSIT_CONTRACT_TREE_DEPTH: usize = 32; - -pub fn new_env() -> Environment { - create_test_tracing_subscriber(); - EnvironmentBuilder::minimal() - .multi_threaded_tokio_runtime() - .expect("should start tokio runtime") - .build() - .expect("should build env") -} - -fn timeout() -> Duration { - Duration::from_secs(2) -} - -fn random_deposit_data() -> DepositData { - let keypair = Keypair::random(); - - let mut deposit = DepositData { - pubkey: keypair.pk.into(), - withdrawal_credentials: Hash256::zero(), - amount: 32_000_000_000, - signature: Signature::empty().into(), - }; - - deposit.signature = deposit.create_signature(&keypair.sk, &MainnetEthSpec::default_spec()); - - deposit -} - -/// Blocking operation to get the deposit logs from the `deposit_contract`. -async fn blocking_deposit_logs( - client: &HttpJsonRpc, - eth1: &AnvilEth1Instance, - range: Range, -) -> Vec { - client - .get_deposit_logs_in_range(ð1.deposit_contract.address(), range, timeout()) - .await - .expect("should get logs") -} - -/// Blocking operation to get the deposit root from the `deposit_contract`. -async fn blocking_deposit_root( - client: &HttpJsonRpc, - eth1: &AnvilEth1Instance, - block_number: u64, -) -> Option { - client - .get_deposit_root(ð1.deposit_contract.address(), block_number, timeout()) - .await - .expect("should get deposit root") -} - -/// Blocking operation to get the deposit count from the `deposit_contract`. -async fn blocking_deposit_count( - client: &HttpJsonRpc, - eth1: &AnvilEth1Instance, - block_number: u64, -) -> Option { - client - .get_deposit_count(ð1.deposit_contract.address(), block_number, timeout()) - .await - .expect("should get deposit count") -} - -async fn get_block_number(client: &Provider) -> u64 { - client - .get_block_number() - .await - .map(|v| v.as_u64()) - .expect("should get block number") -} - -async fn new_anvil_instance() -> Result { - AnvilEth1Instance::new(DEFAULT_CHAIN_ID.into()).await -} - -mod eth1_cache { - use super::*; - - #[tokio::test] - async fn simple_scenario() { - create_test_tracing_subscriber(); - async { - for follow_distance in 0..3 { - let eth1 = new_anvil_instance() - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let anvil_client = eth1.json_rpc_client(); - - let initial_block_number = get_block_number(&anvil_client).await; - - let config = Config { - endpoint: Eth1Endpoint::NoAuth( - SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - ), - deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: initial_block_number, - follow_distance, - ..Config::default() - }; - let cache_follow_distance = config.cache_follow_distance(); - - let service = - Service::new(config, Arc::new(MainnetEthSpec::default_spec())).unwrap(); - - // Create some blocks and then consume them, performing the test `rounds` times. - for round in 0..2 { - let blocks = 4; - - let initial = if round == 0 { - initial_block_number - } else { - service - .blocks() - .read() - .highest_block_number() - .map(|n| n + cache_follow_distance) - .expect("should have a latest block after the first round") - }; - - for _ in 0..blocks { - eth1.anvil.evm_mine().await.expect("should mine block"); - } - - service - .update_deposit_cache(None) - .await - .expect("should update deposit cache"); - service - .update_block_cache(None) - .await - .expect("should update block cache"); - - service - .update_block_cache(None) - .await - .expect("should update cache when nothing has changed"); - - assert_eq!( - service - .blocks() - .read() - .highest_block_number() - .map(|n| n + cache_follow_distance), - Some(initial + blocks), - "should update {} blocks in round {} (follow {} i.e. {})", - blocks, - round, - follow_distance, - cache_follow_distance - ); - } - } - } - .await; - } - - /// Tests the case where we attempt to download more blocks than will fit in the cache. - - #[tokio::test] - async fn big_skip() { - create_test_tracing_subscriber(); - async { - let eth1 = new_anvil_instance() - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let anvil_client = eth1.json_rpc_client(); - - let cache_len = 4; - - let service = Service::new( - Config { - endpoint: Eth1Endpoint::NoAuth( - SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - ), - deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: get_block_number(&anvil_client).await, - follow_distance: 0, - block_cache_truncation: Some(cache_len), - ..Config::default() - }, - Arc::new(MainnetEthSpec::default_spec()), - ) - .unwrap(); - - let blocks = cache_len * 2; - - for _ in 0..blocks { - eth1.anvil.evm_mine().await.expect("should mine block") - } - - service - .update_deposit_cache(None) - .await - .expect("should update deposit cache"); - service - .update_block_cache(None) - .await - .expect("should update block cache"); - - assert_eq!( - service.block_cache_len(), - cache_len, - "should not grow cache beyond target" - ); - } - .await; - } - - /// Tests to ensure that the cache gets pruned when doing multiple downloads smaller than the - /// cache size. - #[tokio::test] - async fn pruning() { - create_test_tracing_subscriber(); - async { - let eth1 = new_anvil_instance() - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let anvil_client = eth1.json_rpc_client(); - - let cache_len = 4; - - let service = Service::new( - Config { - endpoint: Eth1Endpoint::NoAuth( - SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - ), - deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: get_block_number(&anvil_client).await, - follow_distance: 0, - block_cache_truncation: Some(cache_len), - ..Config::default() - }, - Arc::new(MainnetEthSpec::default_spec()), - ) - .unwrap(); - - for _ in 0..4u8 { - for _ in 0..cache_len / 2 { - eth1.anvil.evm_mine().await.expect("should mine block") - } - service - .update_deposit_cache(None) - .await - .expect("should update deposit cache"); - service - .update_block_cache(None) - .await - .expect("should update block cache"); - } - - assert_eq!( - service.block_cache_len(), - cache_len, - "should not grow cache beyond target" - ); - } - .await; - } - - #[tokio::test] - async fn double_update() { - create_test_tracing_subscriber(); - async { - let n = 16; - - let eth1 = new_anvil_instance() - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let anvil_client = eth1.json_rpc_client(); - - let service = Service::new( - Config { - endpoint: Eth1Endpoint::NoAuth( - SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - ), - deposit_contract_address: deposit_contract.address(), - lowest_cached_block_number: get_block_number(&anvil_client).await, - follow_distance: 0, - ..Config::default() - }, - Arc::new(MainnetEthSpec::default_spec()), - ) - .unwrap(); - - for _ in 0..n { - eth1.anvil.evm_mine().await.expect("should mine block") - } - - futures::try_join!( - service.update_deposit_cache(None), - service.update_deposit_cache(None) - ) - .expect("should perform two simultaneous updates of deposit cache"); - futures::try_join!( - service.update_block_cache(None), - service.update_block_cache(None) - ) - .expect("should perform two simultaneous updates of block cache"); - - assert!(service.block_cache_len() >= n, "should grow the cache"); - } - .await; - } -} - -mod deposit_tree { - - use super::*; - - #[tokio::test] - async fn updating() { - create_test_tracing_subscriber(); - async { - let n = 4; - - let eth1 = new_anvil_instance() - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let anvil_client = eth1.json_rpc_client(); - - let start_block = get_block_number(&anvil_client).await; - - let service = Service::new( - Config { - endpoint: Eth1Endpoint::NoAuth( - SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - ), - deposit_contract_address: deposit_contract.address(), - deposit_contract_deploy_block: start_block, - follow_distance: 0, - ..Config::default() - }, - Arc::new(MainnetEthSpec::default_spec()), - ) - .unwrap(); - - for round in 0..3 { - let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); - - for deposit in &deposits { - deposit_contract - .deposit(deposit.clone()) - .await - .expect("should perform a deposit"); - } - - service - .update_deposit_cache(None) - .await - .expect("should perform update"); - - service - .update_deposit_cache(None) - .await - .expect("should perform update when nothing has changed"); - - let first = n * round; - let last = n * (round + 1); - - let (_root, local_deposits) = service - .deposits() - .read() - .cache - .get_deposits(first, last, last) - .unwrap_or_else(|_| panic!("should get deposits in round {}", round)); - - assert_eq!( - local_deposits.len(), - n as usize, - "should get the right number of deposits in round {}", - round - ); - - assert_eq!( - local_deposits - .iter() - .map(|d| d.data.clone()) - .collect::>(), - deposits.to_vec(), - "obtained deposits should match those submitted in round {}", - round - ); - } - } - .await; - } - - #[tokio::test] - async fn double_update() { - create_test_tracing_subscriber(); - async { - let n = 8; - - let eth1 = new_anvil_instance() - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let anvil_client = eth1.json_rpc_client(); - - let start_block = get_block_number(&anvil_client).await; - - let service = Service::new( - Config { - endpoint: Eth1Endpoint::NoAuth( - SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - ), - deposit_contract_address: deposit_contract.address(), - deposit_contract_deploy_block: start_block, - lowest_cached_block_number: start_block, - follow_distance: 0, - ..Config::default() - }, - Arc::new(MainnetEthSpec::default_spec()), - ) - .unwrap(); - - let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); - - for deposit in &deposits { - deposit_contract - .deposit(deposit.clone()) - .await - .expect("should perform a deposit"); - } - - futures::try_join!( - service.update_deposit_cache(None), - service.update_deposit_cache(None) - ) - .expect("should perform two updates concurrently"); - - assert_eq!(service.deposit_cache_len(), n); - } - .await; - } - - #[tokio::test] - async fn cache_consistency() { - async { - let n = 8; - - let spec = &MainnetEthSpec::default_spec(); - - let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); - - let eth1 = new_anvil_instance() - .await - .expect("should start eth1 environment"); - - let deposit_contract = ð1.deposit_contract; - let anvil_client = eth1.json_rpc_client(); - - let mut deposit_roots = vec![]; - let mut deposit_counts = vec![]; - - let client = - HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap(); - - // Perform deposits to the smart contract, recording it's state along the way. - for deposit in &deposits { - deposit_contract - .deposit(deposit.clone()) - .await - .expect("should perform a deposit"); - let block_number = get_block_number(&anvil_client).await; - deposit_roots.push( - blocking_deposit_root(&client, ð1, block_number) - .await - .expect("should get root if contract exists"), - ); - deposit_counts.push( - blocking_deposit_count(&client, ð1, block_number) - .await - .expect("should get count if contract exists"), - ); - } - - let mut tree = DepositCache::default(); - - // Pull all the deposit logs from the contract. - let block_number = get_block_number(&anvil_client).await; - let logs: Vec<_> = blocking_deposit_logs(&client, ð1, 0..block_number) - .await - .iter() - .map(|raw| raw.to_deposit_log(spec).expect("should parse deposit log")) - .inspect(|log| { - tree.insert_log(log.clone()) - .expect("should add consecutive logs"); - }) - .collect(); - - // Check the logs for invariants. - for i in 0..logs.len() { - let log = &logs[i]; - assert_eq!( - log.deposit_data, deposits[i], - "log {} should have correct deposit data", - i - ); - assert_eq!(log.index, i as u64, "log {} should have correct index", i); - } - - // For each deposit test some more invariants - for i in 0..n { - // Ensure the deposit count from the smart contract was as expected. - assert_eq!( - deposit_counts[i], - i as u64 + 1, - "deposit count should be accurate" - ); - - // Ensure that the root from the deposit tree matches what the contract reported. - let (root, deposits) = tree - .get_deposits(0, i as u64, deposit_counts[i]) - .expect("should get deposits"); - assert_eq!( - root, deposit_roots[i], - "tree deposit root {} should match the contract", - i - ); - - // Ensure that the deposits all prove into the root from the smart contract. - let deposit_root = deposit_roots[i]; - for (j, deposit) in deposits.iter().enumerate() { - assert!( - verify_merkle_proof( - deposit.data.tree_hash_root(), - &deposit.proof, - DEPOSIT_CONTRACT_TREE_DEPTH + 1, - j, - deposit_root - ), - "deposit merkle proof should prove into deposit contract root" - ) - } - } - } - .await; - } -} - -/// Tests for the base HTTP requests and response handlers. -mod http { - use super::*; - - async fn get_block(client: &HttpJsonRpc, block_number: u64) -> Block { - client - .get_block(BlockQuery::Number(block_number), timeout()) - .await - .expect("should get block number") - } - - #[tokio::test] - async fn incrementing_deposits() { - async { - let eth1 = new_anvil_instance() - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let anvil_client = eth1.json_rpc_client(); - let client = - HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap(); - - let block_number = get_block_number(&anvil_client).await; - let logs = blocking_deposit_logs(&client, ð1, 0..block_number).await; - assert_eq!(logs.len(), 0); - - let mut old_root = blocking_deposit_root(&client, ð1, block_number).await; - let mut old_block = get_block(&client, block_number).await; - let mut old_block_number = block_number; - - assert_eq!( - blocking_deposit_count(&client, ð1, block_number).await, - Some(0), - "should have deposit count zero" - ); - - for i in 1..=8 { - eth1.anvil - .increase_time(1) - .await - .expect("should be able to increase time on anvil"); - - deposit_contract - .deposit(random_deposit_data()) - .await - .expect("should perform a deposit"); - - // Check the logs. - let block_number = get_block_number(&anvil_client).await; - let logs = blocking_deposit_logs(&client, ð1, 0..block_number).await; - assert_eq!(logs.len(), i, "the number of logs should be as expected"); - - // Check the deposit count. - assert_eq!( - blocking_deposit_count(&client, ð1, block_number).await, - Some(i as u64), - "should have a correct deposit count" - ); - - // Check the deposit root. - let new_root = blocking_deposit_root(&client, ð1, block_number).await; - assert_ne!( - new_root, old_root, - "deposit root should change with each deposit" - ); - old_root = new_root; - - // Check the block hash. - let new_block = get_block(&client, block_number).await; - assert_ne!( - new_block.hash, old_block.hash, - "block hash should change with each deposit" - ); - - // Check to ensure the timestamp is increasing - assert!( - old_block.timestamp <= new_block.timestamp, - "block timestamp should increase" - ); - - old_block = new_block.clone(); - - // Check the block number. - assert!( - block_number > old_block_number, - "block number should increase" - ); - old_block_number = block_number; - - // Check to ensure the block root is changing - assert_ne!( - new_root, - Some(new_block.hash), - "the deposit root should be different to the block hash" - ); - } - } - .await; - } -} - -mod fast { - use super::*; - - // Adds deposits into deposit cache and matches deposit_count and deposit_root - // with the deposit count and root computed from the deposit cache. - #[tokio::test] - async fn deposit_cache_query() { - create_test_tracing_subscriber(); - async { - let eth1 = new_anvil_instance() - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let anvil_client = eth1.json_rpc_client(); - - let now = get_block_number(&anvil_client).await; - let spec = Arc::new(MainnetEthSpec::default_spec()); - let service = Service::new( - Config { - endpoint: Eth1Endpoint::NoAuth( - SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - ), - deposit_contract_address: deposit_contract.address(), - deposit_contract_deploy_block: now, - lowest_cached_block_number: now, - follow_distance: 0, - block_cache_truncation: None, - ..Config::default() - }, - spec.clone(), - ) - .unwrap(); - let client = - HttpJsonRpc::new(SensitiveUrl::parse(ð1.endpoint()).unwrap(), None).unwrap(); - let n = 10; - let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); - for deposit in &deposits { - deposit_contract - .deposit(deposit.clone()) - .await - .expect("should perform a deposit"); - // Mine an extra block between deposits to test for corner cases - eth1.anvil.evm_mine().await.expect("should mine block"); - } - - service - .update_deposit_cache(None) - .await - .expect("should perform update"); - - assert!( - service.deposit_cache_len() >= n, - "should have imported n deposits" - ); - - for block_num in 0..=get_block_number(&anvil_client).await { - let expected_deposit_count = - blocking_deposit_count(&client, ð1, block_num).await; - let expected_deposit_root = blocking_deposit_root(&client, ð1, block_num).await; - - let deposit_count = service - .deposits() - .read() - .cache - .get_deposit_count_from_cache(block_num); - let deposit_root = service - .deposits() - .read() - .cache - .get_deposit_root_from_cache(block_num); - assert_eq!( - expected_deposit_count, deposit_count, - "deposit count from cache should match queried" - ); - assert_eq!( - expected_deposit_root, deposit_root, - "deposit root from cache should match queried" - ); - } - } - .await; - } -} - -mod persist { - use super::*; - #[tokio::test] - async fn test_persist_caches() { - create_test_tracing_subscriber(); - async { - let eth1 = new_anvil_instance() - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let anvil_client = eth1.json_rpc_client(); - - let now = get_block_number(&anvil_client).await; - let config = Config { - endpoint: Eth1Endpoint::NoAuth( - SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - ), - deposit_contract_address: deposit_contract.address(), - deposit_contract_deploy_block: now, - lowest_cached_block_number: now, - follow_distance: 0, - block_cache_truncation: None, - ..Config::default() - }; - let service = - Service::new(config.clone(), Arc::new(MainnetEthSpec::default_spec())).unwrap(); - let n = 10; - let deposits: Vec<_> = (0..n).map(|_| random_deposit_data()).collect(); - for deposit in &deposits { - deposit_contract - .deposit(deposit.clone()) - .await - .expect("should perform a deposit"); - } - - service - .update_deposit_cache(None) - .await - .expect("should perform update"); - - assert!( - service.deposit_cache_len() >= n, - "should have imported n deposits" - ); - - let deposit_count = service.deposit_cache_len(); - - service - .update_block_cache(None) - .await - .expect("should perform update"); - - assert!( - service.block_cache_len() >= n, - "should have imported n eth1 blocks" - ); - - let block_count = service.block_cache_len(); - - let eth1_bytes = service.as_bytes(); - - // Drop service and recover from bytes - drop(service); - - let recovered_service = Service::from_bytes( - ð1_bytes, - config, - Arc::new(MainnetEthSpec::default_spec()), - ) - .unwrap(); - assert_eq!( - recovered_service.block_cache_len(), - block_count, - "Should have equal cached blocks as before recovery" - ); - assert_eq!( - recovered_service.deposit_cache_len(), - deposit_count, - "Should have equal cached deposits as before recovery" - ); - } - .await; - } -} diff --git a/beacon_node/genesis/Cargo.toml b/beacon_node/genesis/Cargo.toml index f752b888a77..8f6f3516fc5 100644 --- a/beacon_node/genesis/Cargo.toml +++ b/beacon_node/genesis/Cargo.toml @@ -5,21 +5,12 @@ authors = ["Paul Hauner "] edition = { workspace = true } [dependencies] -environment = { workspace = true } -eth1 = { workspace = true } ethereum_hashing = { workspace = true } ethereum_ssz = { workspace = true } -futures = { workspace = true } int_to_bytes = { workspace = true } merkle_proof = { workspace = true } rayon = { workspace = true } state_processing = { workspace = true } -tokio = { workspace = true } tracing = { workspace = true } tree_hash = { workspace = true } types = { workspace = true } - -[dev-dependencies] -eth1_test_rig = { workspace = true } -logging = { workspace = true } -sensitive_url = { workspace = true } diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs deleted file mode 100644 index dede96512c0..00000000000 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ /dev/null @@ -1,461 +0,0 @@ -pub use crate::common::genesis_deposits; -pub use eth1::Config as Eth1Config; - -use eth1::{DepositLog, Eth1Block, Service as Eth1Service}; -use state_processing::{ - eth2_genesis_time, initialize_beacon_state_from_eth1, is_valid_genesis_state, - per_block_processing::process_operations::apply_deposit, process_activations, -}; -use std::sync::{ - atomic::{AtomicU64, AtomicUsize, Ordering}, - Arc, -}; -use std::time::Duration; -use tokio::time::sleep; -use tracing::{debug, error, info, trace}; -use types::{BeaconState, ChainSpec, Deposit, Eth1Data, EthSpec, FixedBytesExtended, Hash256}; - -/// The number of blocks that are pulled per request whilst waiting for genesis. -const BLOCKS_PER_GENESIS_POLL: usize = 99; - -/// Stats about the eth1 genesis process. -pub struct Statistics { - highest_processed_block: AtomicU64, - active_validator_count: AtomicUsize, - total_deposit_count: AtomicUsize, - latest_timestamp: AtomicU64, -} - -/// Provides a service that connects to some Eth1 HTTP JSON-RPC endpoint and maintains a cache of -/// eth1 blocks and deposits, listening for the eth1 block that triggers eth2 genesis and returning -/// the genesis `BeaconState`. -/// -/// Is a wrapper around the `Service` struct of the `eth1` crate. -#[derive(Clone)] -pub struct Eth1GenesisService { - /// The underlying service. Access to this object is only required for testing and diagnosis. - pub eth1_service: Eth1Service, - /// Statistics about genesis progress. - stats: Arc, -} - -impl Eth1GenesisService { - /// Creates a new service. Does not attempt to connect to the Eth1 node. - /// - /// Modifies the given `config` to make it more suitable to the task of listening to genesis. - pub fn new(config: Eth1Config, spec: Arc) -> Result { - let config = Eth1Config { - // Truncating the block cache makes searching for genesis more - // complicated. - block_cache_truncation: None, - // Scan large ranges of blocks when awaiting genesis. - blocks_per_log_query: 1_000, - // Only perform a few log requests each time the eth1 node is polled. - // - // For small testnets this makes finding genesis much faster, - // as it usually happens within 1,000 blocks. - max_log_requests_per_update: Some(5), - // Only perform a few logs requests each time the eth1 node is polled. - // - // For small testnets, this is much faster as they do not have - // a `MIN_GENESIS_SECONDS`, so after `MIN_GENESIS_VALIDATOR_COUNT` - // has been reached only a single block needs to be read. - max_blocks_per_update: Some(BLOCKS_PER_GENESIS_POLL), - ..config - }; - - Ok(Self { - eth1_service: Eth1Service::new(config, spec) - .map_err(|e| format!("Failed to create eth1 service: {:?}", e))?, - stats: Arc::new(Statistics { - highest_processed_block: AtomicU64::new(0), - active_validator_count: AtomicUsize::new(0), - total_deposit_count: AtomicUsize::new(0), - latest_timestamp: AtomicU64::new(0), - }), - }) - } - - /// Returns the first eth1 block that has enough deposits that it's a (potentially invalid) - /// candidate for genesis. - fn first_candidate_eth1_block(&self, min_genesis_active_validator_count: usize) -> Option { - if self.eth1_service.deposit_cache_len() < min_genesis_active_validator_count { - None - } else { - self.eth1_service - .deposits() - .read() - .cache - .get_log(min_genesis_active_validator_count.saturating_sub(1)) - .map(|log| log.block_number) - } - } - - /// Scans the Eth1 chain, returning a genesis state once it has been discovered. - /// - /// ## Returns - /// - /// - `Ok(state)` once the canonical eth2 genesis state has been discovered. - /// - `Err(e)` if there is some internal error during updates. - pub async fn wait_for_genesis_state( - &self, - update_interval: Duration, - ) -> Result, String> { - let eth1_service = &self.eth1_service; - let spec = eth1_service.chain_spec(); - - let mut sync_blocks = false; - let mut highest_processed_block = None; - - info!("Importing eth1 deposit logs"); - - loop { - let update_result = eth1_service - .update_deposit_cache(None) - .await - .map_err(|e| format!("{:?}", e)); - - if let Err(e) = update_result { - error!(error = e, "Failed to update eth1 deposit cache") - } - - self.stats - .total_deposit_count - .store(eth1_service.deposit_cache_len(), Ordering::Relaxed); - - if !sync_blocks { - if let Some(viable_eth1_block) = self - .first_candidate_eth1_block(spec.min_genesis_active_validator_count as usize) - { - info!("Importing eth1 blocks"); - self.eth1_service.set_lowest_cached_block(viable_eth1_block); - sync_blocks = true - } else { - info!( - min_genesis_active_validators = spec.min_genesis_active_validator_count, - total_deposits = eth1_service.deposit_cache_len(), - valid_deposits = eth1_service.get_raw_valid_signature_count(), - "Waiting for more deposits" - ); - - sleep(update_interval).await; - - continue; - } - } - - // Download new eth1 blocks into the cache. - let blocks_imported = match eth1_service.update_block_cache(None).await { - Ok(outcome) => { - debug!( - latest_block_timestamp = eth1_service.latest_block_timestamp(), - cache_head = eth1_service.highest_safe_block(), - count = outcome.blocks_imported, - "Imported eth1 blocks" - ); - outcome.blocks_imported - } - Err(e) => { - error!( - error = ?e, - "Failed to update eth1 block cache" - ); - 0 - } - }; - - // Scan the new eth1 blocks, searching for genesis. - if let Some(genesis_state) = - self.scan_new_blocks::(&mut highest_processed_block, spec)? - { - info!( - genesis_validators = genesis_state - .get_active_validator_indices(E::genesis_epoch(), spec) - .map_err(|e| format!("Genesis validators error: {:?}", e))? - .len(), - genesis_time = genesis_state.genesis_time(), - "Genesis ceremony complete" - ); - break Ok(genesis_state); - } - - // Drop all the scanned blocks as they are no longer required. - eth1_service.clear_block_cache(); - - // Load some statistics from the atomics. - let active_validator_count = self.stats.active_validator_count.load(Ordering::Relaxed); - let total_deposit_count = self.stats.total_deposit_count.load(Ordering::Relaxed); - let latest_timestamp = self.stats.latest_timestamp.load(Ordering::Relaxed); - - // Perform some logging. - if timestamp_can_trigger_genesis(latest_timestamp, spec)? { - // Indicate that we are awaiting adequate active validators. - if (active_validator_count as u64) < spec.min_genesis_active_validator_count { - info!( - min_genesis_active_validators = spec.min_genesis_active_validator_count, - active_validators = active_validator_count, - total_deposits = total_deposit_count, - valid_deposits = eth1_service.get_valid_signature_count().unwrap_or(0), - "Waiting for more validators" - ); - } - } else { - info!( - genesis_delay = spec.genesis_delay, - genesis_time = spec.min_genesis_time, - latest_eth1_timestamp = latest_timestamp, - "Waiting for adequate eth1 timestamp" - ); - } - - // If we imported the full number of blocks, poll again in a short amount of time. - // - // We assume that if we imported a large chunk of blocks then we're some distance from - // the head and we should sync faster. - if blocks_imported >= BLOCKS_PER_GENESIS_POLL { - sleep(Duration::from_millis(50)).await; - } else { - sleep(update_interval).await; - } - } - } - - /// Processes any new blocks that have appeared since this function was last run. - /// - /// Blocks are always tested in increasing order, starting with the lowest unknown block - /// number in the cache. - /// - /// ## Returns - /// - /// - `Ok(Some(eth1_block))` if a previously-unprocessed block would trigger Eth2 genesis. - /// - `Ok(None)` if none of the new blocks would trigger genesis, or there were no new blocks. - /// - `Err(_)` if there was some internal error. - fn scan_new_blocks( - &self, - highest_processed_block: &mut Option, - spec: &ChainSpec, - ) -> Result>, String> { - let eth1_service = &self.eth1_service; - - for block in eth1_service.blocks().read().iter() { - // It's possible that the block and deposit caches aren't synced. Ignore any blocks - // which are not safe for both caches. - // - // Don't update the highest processed block since we want to come back and process this - // again later. - if eth1_service - .highest_safe_block() - .is_none_or(|n| block.number > n) - { - continue; - } - - // Ignore any block that has already been processed or update the highest processed - // block. - if highest_processed_block.is_some_and(|highest| highest >= block.number) { - continue; - } else { - self.stats - .highest_processed_block - .store(block.number, Ordering::Relaxed); - self.stats - .latest_timestamp - .store(block.timestamp, Ordering::Relaxed); - - *highest_processed_block = Some(block.number) - } - - // Ignore any block with an insufficient timestamp. - if !timestamp_can_trigger_genesis(block.timestamp, spec)? { - trace!( - genesis_delay = spec.genesis_delay, - min_genesis_time = spec.min_genesis_time, - eth1_block_timestamp = block.timestamp, - eth1_block_number = block.number, - "Insufficient block timestamp" - ); - continue; - } - - let valid_signature_count = eth1_service - .get_valid_signature_count_at_block(block.number) - .unwrap_or(0); - if (valid_signature_count as u64) < spec.min_genesis_active_validator_count { - trace!( - genesis_delay = spec.genesis_delay, - valid_signature_count = valid_signature_count, - min_validator_count = spec.min_genesis_active_validator_count, - eth1_block_number = block.number, - "Insufficient valid signatures" - ); - continue; - } - - // Generate a potential beacon state for this eth1 block. - // - // Note: this state is fully valid, some fields have been bypassed to make verification - // faster. - let state = self.cheap_state_at_eth1_block::(block, spec)?; - let active_validator_count = state - .get_active_validator_indices(E::genesis_epoch(), spec) - .map_err(|e| format!("Genesis validators error: {:?}", e))? - .len(); - - self.stats - .active_validator_count - .store(active_validator_count, Ordering::Relaxed); - - if is_valid_genesis_state(&state, spec) { - let genesis_state = self - .genesis_from_eth1_block(block.clone(), spec) - .map_err(|e| format!("Failed to generate valid genesis state : {}", e))?; - - return Ok(Some(genesis_state)); - } else { - trace!( - min_genesis_active_validator_count = - format!("{}", spec.min_genesis_active_validator_count), - active_validators = active_validator_count, - eth1_block_number = block.number, - "Insufficient active validators" - ); - } - } - - Ok(None) - } - - /// Produces an eth2 genesis `BeaconState` from the given `eth1_block`. The caller should have - /// verified that `eth1_block` produces a valid genesis state. - /// - /// ## Returns - /// - /// - `Ok(genesis_state)`: if all went well. - /// - `Err(e)`: if the given `eth1_block` was not a viable block to trigger genesis or there was - /// an internal error. - fn genesis_from_eth1_block( - &self, - eth1_block: Eth1Block, - spec: &ChainSpec, - ) -> Result, String> { - let deposit_logs = self - .eth1_service - .deposits() - .read() - .cache - .iter() - .take_while(|log| log.block_number <= eth1_block.number) - .map(|log| log.deposit_data.clone()) - .collect::>(); - - let genesis_state = initialize_beacon_state_from_eth1( - eth1_block.hash, - eth1_block.timestamp, - genesis_deposits(deposit_logs, spec)?, - None, - spec, - ) - .map_err(|e| format!("Unable to initialize genesis state: {:?}", e))?; - - if is_valid_genesis_state(&genesis_state, spec) { - Ok(genesis_state) - } else { - Err("Generated state was not valid.".to_string()) - } - } - - /// Generates an incomplete `BeaconState` for some `eth1_block` that can be used for checking - /// to see if that `eth1_block` triggers eth2 genesis. - /// - /// ## Notes - /// - /// The returned `BeaconState` should **not** be used as the genesis state, it is - /// incomplete. - fn cheap_state_at_eth1_block( - &self, - eth1_block: &Eth1Block, - spec: &ChainSpec, - ) -> Result, String> { - let genesis_time = eth2_genesis_time(eth1_block.timestamp, spec) - .map_err(|e| format!("Unable to set genesis time: {:?}", e))?; - - let mut state: BeaconState = BeaconState::new( - genesis_time, - Eth1Data { - block_hash: Hash256::zero(), - deposit_root: Hash256::zero(), - deposit_count: 0, - }, - spec, - ); - - self.deposit_logs_at_block(eth1_block.number) - .iter() - .map(|deposit_log| Deposit { - // Generate a bogus proof. - // - // The deposits are coming directly from our own deposit tree to there's no need to - // make proofs about their inclusion in it. - proof: vec![Hash256::zero(); spec.deposit_contract_tree_depth as usize].into(), - data: deposit_log.deposit_data.clone(), - }) - .try_for_each(|deposit| { - // Skip proof verification (see comment about bogus proof generation). - const PROOF_VERIFICATION: bool = false; - - // Note: presently all the signatures are verified each time this function is - // run. - // - // It would be more efficient to pre-verify signatures, filter out the invalid - // ones and disable verification for `process_deposit`. - // - // Such an optimization would only be useful in a scenario where `MIN_GENESIS_TIME` - // is reached _prior_ to `MIN_ACTIVE_VALIDATOR_COUNT`. I suspect this won't be the - // case for mainnet, so we defer this optimization. - let Deposit { proof, data } = deposit; - let proof = if PROOF_VERIFICATION { - Some(proof) - } else { - None - }; - - apply_deposit(&mut state, data, proof, true, spec) - .map_err(|e| format!("Error whilst processing deposit: {:?}", e)) - })?; - - process_activations(&mut state, spec) - .map_err(|e| format!("Error whilst processing activations: {:?}", e))?; - - Ok(state) - } - - /// Returns all deposit logs included in `block_number` and all prior blocks. - fn deposit_logs_at_block(&self, block_number: u64) -> Vec { - self.eth1_service - .deposits() - .read() - .cache - .iter() - .take_while(|log| log.block_number <= block_number) - .cloned() - .collect() - } - - /// Returns statistics about eth1 genesis. - pub fn statistics(&self) -> &Statistics { - &self.stats - } - - /// Returns the `Service` contained in `self`. - pub fn into_core_service(self) -> Eth1Service { - self.eth1_service - } -} - -/// Returns `false` for a timestamp that would result in a genesis time that is earlier than -/// `MIN_GENESIS_TIME`. -fn timestamp_can_trigger_genesis(timestamp: u64, spec: &ChainSpec) -> Result { - eth2_genesis_time(timestamp, spec) - .map(|t| t >= spec.min_genesis_time) - .map_err(|e| format!("Arith error when during genesis calculation: {:?}", e)) -} diff --git a/beacon_node/genesis/src/lib.rs b/beacon_node/genesis/src/lib.rs index 1fba64aafb3..35f0b0e3801 100644 --- a/beacon_node/genesis/src/lib.rs +++ b/beacon_node/genesis/src/lib.rs @@ -1,10 +1,6 @@ mod common; -mod eth1_genesis_service; mod interop; -pub use eth1::Config as Eth1Config; -pub use eth1::Eth1Endpoint; -pub use eth1_genesis_service::{Eth1GenesisService, Statistics}; pub use interop::{ bls_withdrawal_credentials, interop_genesis_state, interop_genesis_state_with_eth1, InteropGenesisBuilder, DEFAULT_ETH1_BLOCK_HASH, diff --git a/beacon_node/genesis/tests/tests.rs b/beacon_node/genesis/tests/tests.rs deleted file mode 100644 index b5710e50fd4..00000000000 --- a/beacon_node/genesis/tests/tests.rs +++ /dev/null @@ -1,107 +0,0 @@ -#![cfg(test)] -use environment::{Environment, EnvironmentBuilder}; -use eth1::{Eth1Endpoint, DEFAULT_CHAIN_ID}; -use eth1_test_rig::{AnvilEth1Instance, DelayThenDeposit, Middleware}; -use genesis::{Eth1Config, Eth1GenesisService}; -use logging::create_test_tracing_subscriber; -use sensitive_url::SensitiveUrl; -use state_processing::is_valid_genesis_state; -use std::sync::Arc; -use std::time::Duration; -use types::{ - test_utils::generate_deterministic_keypair, FixedBytesExtended, Hash256, MinimalEthSpec, -}; - -pub fn new_env() -> Environment { - create_test_tracing_subscriber(); - EnvironmentBuilder::minimal() - .multi_threaded_tokio_runtime() - .expect("should start tokio runtime") - .build() - .expect("should build env") -} - -#[test] -fn basic() { - let env = new_env(); - let mut spec = (*env.eth2_config().spec).clone(); - spec.min_genesis_time = 0; - spec.min_genesis_active_validator_count = 8; - let spec = Arc::new(spec); - - env.runtime().block_on(async { - let eth1 = AnvilEth1Instance::new(DEFAULT_CHAIN_ID.into()) - .await - .expect("should start eth1 environment"); - let deposit_contract = ð1.deposit_contract; - let client = eth1.json_rpc_client(); - - let now = client - .get_block_number() - .await - .map(|v| v.as_u64()) - .expect("should get block number"); - - let service = Eth1GenesisService::new( - Eth1Config { - endpoint: Eth1Endpoint::NoAuth( - SensitiveUrl::parse(eth1.endpoint().as_str()).unwrap(), - ), - deposit_contract_address: deposit_contract.address(), - deposit_contract_deploy_block: now, - lowest_cached_block_number: now, - follow_distance: 0, - block_cache_truncation: None, - ..Eth1Config::default() - }, - spec.clone(), - ) - .unwrap(); - - // NOTE: this test is sensitive to the response speed of the external web3 server. If - // you're experiencing failures, try increasing the update_interval. - let update_interval = Duration::from_millis(500); - - let deposits = (0..spec.min_genesis_active_validator_count + 2) - .map(|i| { - deposit_contract.deposit_helper::( - generate_deterministic_keypair(i as usize), - Hash256::from_low_u64_le(i), - 32_000_000_000, - ) - }) - .map(|deposit| DelayThenDeposit { - delay: Duration::from_secs(0), - deposit, - }) - .collect::>(); - - let deposit_future = deposit_contract.deposit_multiple(deposits); - - let wait_future = service.wait_for_genesis_state::(update_interval); - - let state = futures::try_join!(deposit_future, wait_future) - .map(|(_, state)| state) - .expect("should finish waiting for genesis"); - - // Note: using anvil these deposits are 1-per-block, therefore we know there should only be - // the minimum number of validators. - assert_eq!( - state.validators().len(), - spec.min_genesis_active_validator_count as usize, - "should have expected validator count" - ); - - assert!(state.genesis_time() > 0, "should have some genesis time"); - - assert!( - is_valid_genesis_state(&state, &spec), - "should be valid genesis state" - ); - - assert!( - is_valid_genesis_state(&state, &spec), - "should be valid genesis state" - ); - }); -} diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index afc68ad96d4..781a4cfa44e 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -12,7 +12,6 @@ bs58 = "0.4.0" bytes = { workspace = true } directory = { workspace = true } either = { workspace = true } -eth1 = { workspace = true } eth2 = { workspace = true } ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 73b20197c13..a3e214de862 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -130,7 +130,6 @@ pub struct Context { pub network_senders: Option>, pub network_globals: Option>>, pub beacon_processor_send: Option>, - pub eth1_service: Option, pub sse_logging_components: Option, } @@ -214,7 +213,6 @@ pub fn prometheus_metrics() -> warp::filters::log::Log( } }); - // Create a `warp` filter that provides access to the Eth1 service. - let inner_ctx = ctx.clone(); - let eth1_service_filter = warp::any() - .map(move || inner_ctx.eth1_service.clone()) - .and_then(|eth1_service| async move { - match eth1_service { - Some(eth1_service) => Ok(eth1_service), - None => Err(warp_utils::reject::custom_not_found( - "The Eth1 service is not started. Use --eth1 on the CLI.".to_string(), - )), - } - }); - // Create a `warp` filter that rejects requests whilst the node is syncing. let not_while_syncing_filter = warp::any() @@ -2396,56 +2381,6 @@ pub fn serve( }, ); - // GET beacon/deposit_snapshot - let get_beacon_deposit_snapshot = eth_v1 - .and(warp::path("beacon")) - .and(warp::path("deposit_snapshot")) - .and(warp::path::end()) - .and(warp::header::optional::("accept")) - .and(task_spawner_filter.clone()) - .and(eth1_service_filter.clone()) - .then( - |accept_header: Option, - task_spawner: TaskSpawner, - eth1_service: eth1::Service| { - task_spawner.blocking_response_task(Priority::P1, move || match accept_header { - Some(api_types::Accept::Ssz) => eth1_service - .get_deposit_snapshot() - .map(|snapshot| { - Response::builder() - .status(200) - .body(snapshot.as_ssz_bytes().into()) - .map(|res: Response| add_ssz_content_type_header(res)) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to create response: {}", - e - )) - }) - }) - .unwrap_or_else(|| { - Response::builder() - .status(503) - .body(Vec::new().into()) - .map(|res: Response| add_ssz_content_type_header(res)) - .map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "failed to create response: {}", - e - )) - }) - }), - _ => { - let snapshot = eth1_service.get_deposit_snapshot(); - Ok( - warp::reply::json(&api_types::GenericResponse::from(snapshot)) - .into_response(), - ) - } - }) - }, - ); - let beacon_rewards_path = eth_v1 .and(warp::path("beacon")) .and(warp::path("rewards")) @@ -4536,105 +4471,17 @@ pub fn serve( }, ); - // GET lighthouse/eth1/syncing - let get_lighthouse_eth1_syncing = warp::path("lighthouse") - .and(warp::path("eth1")) - .and(warp::path("syncing")) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - let current_slot_opt = chain.slot().ok(); - - chain - .eth1_chain - .as_ref() - .ok_or_else(|| { - warp_utils::reject::custom_not_found( - "Eth1 sync is disabled. See the --eth1 CLI flag.".to_string(), - ) - }) - .and_then(|eth1| { - eth1.sync_status(chain.genesis_time, current_slot_opt, &chain.spec) - .ok_or_else(|| { - warp_utils::reject::custom_server_error( - "Unable to determine Eth1 sync status".to_string(), - ) - }) - }) - .map(api_types::GenericResponse::from) - }) - }, - ); - - // GET lighthouse/eth1/block_cache - let get_lighthouse_eth1_block_cache = warp::path("lighthouse") - .and(warp::path("eth1")) - .and(warp::path("block_cache")) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(eth1_service_filter.clone()) - .then( - |task_spawner: TaskSpawner, eth1_service: eth1::Service| { - task_spawner.blocking_json_task(Priority::P1, move || { - Ok(api_types::GenericResponse::from( - eth1_service - .blocks() - .read() - .iter() - .cloned() - .collect::>(), - )) - }) - }, - ); - - // GET lighthouse/eth1/deposit_cache - let get_lighthouse_eth1_deposit_cache = warp::path("lighthouse") - .and(warp::path("eth1")) - .and(warp::path("deposit_cache")) - .and(warp::path::end()) - .and(task_spawner_filter.clone()) - .and(eth1_service_filter) - .then( - |task_spawner: TaskSpawner, eth1_service: eth1::Service| { - task_spawner.blocking_json_task(Priority::P1, move || { - Ok(api_types::GenericResponse::from( - eth1_service - .deposits() - .read() - .cache - .iter() - .cloned() - .collect::>(), - )) - }) - }, - ); - // GET lighthouse/staking let get_lighthouse_staking = warp::path("lighthouse") .and(warp::path("staking")) .and(warp::path::end()) .and(task_spawner_filter.clone()) - .and(chain_filter.clone()) - .then( - |task_spawner: TaskSpawner, chain: Arc>| { - task_spawner.blocking_json_task(Priority::P1, move || { - if chain.eth1_chain.is_some() { - Ok(()) - } else { - Err(warp_utils::reject::custom_not_found( - "staking is not enabled, \ - see the --staking CLI flag" - .to_string(), - )) - } - }) - }, - ); + .then(|task_spawner: TaskSpawner| { + // This API is fairly useless since we abolished the distinction between staking and + // non-staking nodes. We keep it for backwards-compatibility with LH v7.0.0, and in case + // we want to reintroduce the distinction in future. + task_spawner.blocking_json_task(Priority::P1, move || Ok(())) + }); let database_path = warp::path("lighthouse").and(warp::path("database")); @@ -4936,7 +4783,6 @@ pub fn serve( .uor(get_beacon_pool_proposer_slashings) .uor(get_beacon_pool_voluntary_exits) .uor(get_beacon_pool_bls_to_execution_changes) - .uor(get_beacon_deposit_snapshot) .uor(get_beacon_rewards_blocks) .uor(get_config_fork_schedule) .uor(get_config_spec) @@ -4968,9 +4814,6 @@ pub fn serve( .uor(get_lighthouse_proto_array) .uor(get_lighthouse_validator_inclusion_global) .uor(get_lighthouse_validator_inclusion) - .uor(get_lighthouse_eth1_syncing) - .uor(get_lighthouse_eth1_block_cache) - .uor(get_lighthouse_eth1_deposit_cache) .uor(get_lighthouse_staking) .uor(get_lighthouse_database_info) .uor(get_lighthouse_block_rewards) diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index 9c285f4039f..0ea85881254 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -188,8 +188,6 @@ pub async fn create_api_server_with_config( })); *network_globals.sync_state.write() = SyncState::Synced; - let eth1_service = eth1::Service::new(eth1::Config::default(), chain.spec.clone()).unwrap(); - let beacon_processor_config = BeaconProcessorConfig { // The number of workers must be greater than one. Tests which use the // builder workflow sometimes require an internal HTTP request in order @@ -236,7 +234,6 @@ pub async fn create_api_server_with_config( network_senders: Some(network_senders), network_globals: Some(network_globals), beacon_processor_send: Some(beacon_processor_send), - eth1_service: Some(eth1_service), sse_logging_components: None, }); diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 843242c22f7..28b81c2bdaa 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -317,7 +317,7 @@ pub async fn consensus_gossip() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert_server_message_error(error_response, "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0xfc675d642ff7a06458eb33c7d7b62a5813e34d1b2bb1aee3e395100b579da026 }".to_string()); + assert_server_message_error(error_response, "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0x253405be9aa159bce7b276b8e1d3849c743e673118dfafe8c7d07c203ae0d80d }".to_string()); } /// This test checks that a block that is valid from both a gossip and consensus perspective, but nonetheless equivocates, is accepted when using `broadcast_validation=consensus`. @@ -604,7 +604,7 @@ pub async fn equivocation_gossip() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert_server_message_error(error_response, "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0xfc675d642ff7a06458eb33c7d7b62a5813e34d1b2bb1aee3e395100b579da026 }".to_string()); + assert_server_message_error(error_response, "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0x253405be9aa159bce7b276b8e1d3849c743e673118dfafe8c7d07c203ae0d80d }".to_string()); } /// This test checks that a block that is valid from both a gossip and consensus perspective but @@ -1002,7 +1002,7 @@ pub async fn blinded_consensus_gossip() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert_server_message_error(error_response, "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0xfc675d642ff7a06458eb33c7d7b62a5813e34d1b2bb1aee3e395100b579da026 }".to_string()); + assert_server_message_error(error_response, "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0x253405be9aa159bce7b276b8e1d3849c743e673118dfafe8c7d07c203ae0d80d }".to_string()); } /// This test checks that a block that is valid from both a gossip and consensus perspective is accepted when using `broadcast_validation=consensus`. @@ -1212,7 +1212,7 @@ pub async fn blinded_equivocation_gossip() { /* mandated by Beacon API spec */ assert_eq!(error_response.status(), Some(StatusCode::BAD_REQUEST)); - assert_server_message_error(error_response, "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0xfc675d642ff7a06458eb33c7d7b62a5813e34d1b2bb1aee3e395100b579da026 }".to_string()); + assert_server_message_error(error_response, "BAD_REQUEST: Invalid block: StateRootMismatch { block: 0x0000000000000000000000000000000000000000000000000000000000000000, local: 0x253405be9aa159bce7b276b8e1d3849c743e673118dfafe8c7d07c203ae0d80d }".to_string()); } /// This test checks that a block that is valid from both a gossip and diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index c23ab924159..fe3e3747274 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -5955,40 +5955,6 @@ impl ApiTester { self } - pub async fn test_get_lighthouse_eth1_syncing(self) -> Self { - self.client.get_lighthouse_eth1_syncing().await.unwrap(); - - self - } - - pub async fn test_get_lighthouse_eth1_block_cache(self) -> Self { - let blocks = self.client.get_lighthouse_eth1_block_cache().await.unwrap(); - - assert!(blocks.data.is_empty()); - - self - } - - pub async fn test_get_lighthouse_eth1_deposit_cache(self) -> Self { - let deposits = self - .client - .get_lighthouse_eth1_deposit_cache() - .await - .unwrap(); - - assert!(deposits.data.is_empty()); - - self - } - - pub async fn test_get_lighthouse_staking(self) -> Self { - let result = self.client.get_lighthouse_staking().await.unwrap(); - - assert_eq!(result, self.chain.eth1_chain.is_some()); - - self - } - pub async fn test_post_lighthouse_database_reconstruct(self) -> Self { let response = self .client @@ -7700,14 +7666,6 @@ async fn lighthouse_endpoints() { .await .test_get_lighthouse_validator_inclusion_global() .await - .test_get_lighthouse_eth1_syncing() - .await - .test_get_lighthouse_eth1_block_cache() - .await - .test_get_lighthouse_eth1_deposit_cache() - .await - .test_get_lighthouse_staking() - .await .test_post_lighthouse_database_reconstruct() .await .test_post_lighthouse_liveness() diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index 637792ab37f..f7c3a1bf8db 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -1096,16 +1096,13 @@ impl NetworkBeaconProcessor { #[cfg(test)] use { - beacon_chain::{builder::Witness, eth1_chain::CachingEth1Backend}, - beacon_processor::BeaconProcessorChannels, - slot_clock::ManualSlotClock, - store::MemoryStore, - tokio::sync::mpsc::UnboundedSender, + beacon_chain::builder::Witness, beacon_processor::BeaconProcessorChannels, + slot_clock::ManualSlotClock, store::MemoryStore, tokio::sync::mpsc::UnboundedSender, }; #[cfg(test)] pub(crate) type TestBeaconChainType = - Witness, E, MemoryStore, MemoryStore>; + Witness, MemoryStore>; #[cfg(test)] impl NetworkBeaconProcessor> { diff --git a/beacon_node/network/src/subnet_service/tests/mod.rs b/beacon_node/network/src/subnet_service/tests/mod.rs index 7fdf9047fc3..86d1be08ece 100644 --- a/beacon_node/network/src/subnet_service/tests/mod.rs +++ b/beacon_node/network/src/subnet_service/tests/mod.rs @@ -1,7 +1,6 @@ use super::*; use beacon_chain::{ builder::{BeaconChainBuilder, Witness}, - eth1_chain::CachingEth1Backend, test_utils::get_kzg, BeaconChain, }; @@ -27,7 +26,6 @@ const TEST_LOG_LEVEL: Option<&str> = None; type TestBeaconChainType = Witness< SystemTimeSlotClock, - CachingEth1Backend, MainnetEthSpec, MemoryStore, MemoryStore, @@ -70,8 +68,6 @@ impl TestBeaconChain { .expect("should generate interop state"), ) .expect("should build state using recent genesis") - .dummy_eth1_backend() - .expect("should build dummy backend") .slot_clock(SystemTimeSlotClock::new( Slot::new(0), Duration::from_secs(recent_genesis_time()), diff --git a/beacon_node/network/src/sync/tests/mod.rs b/beacon_node/network/src/sync/tests/mod.rs index 3dca4571086..1cc11e01525 100644 --- a/beacon_node/network/src/sync/tests/mod.rs +++ b/beacon_node/network/src/sync/tests/mod.rs @@ -3,7 +3,6 @@ use crate::sync::range_sync::RangeSyncType; use crate::sync::SyncMessage; use crate::NetworkMessage; use beacon_chain::builder::Witness; -use beacon_chain::eth1_chain::CachingEth1Backend; use beacon_chain::test_utils::{BeaconChainHarness, EphemeralHarnessType}; use beacon_processor::WorkEvent; use lighthouse_network::NetworkGlobals; @@ -22,7 +21,7 @@ use types::{ChainSpec, ForkName, MinimalEthSpec as E}; mod lookups; mod range; -type T = Witness, E, MemoryStore, MemoryStore>; +type T = Witness, MemoryStore>; /// This test utility enables integration testing of Lighthouse sync components. /// diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 4ffaec8b03b..f3f9aa97a20 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -702,54 +702,33 @@ pub fn cli_app() -> Command { /* * Eth1 Integration */ - .arg( - Arg::new("eth1") - .long("eth1") - .help("DEPRECATED") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .display_order(0) - .hide(true) - ) - .arg( - Arg::new("dummy-eth1") - .long("dummy-eth1") - .help("DEPRECATED") - .action(ArgAction::SetTrue) - .help_heading(FLAG_HEADER) - .conflicts_with("eth1") - .display_order(0) - .hide(true) - ) .arg( Arg::new("eth1-purge-cache") .long("eth1-purge-cache") .value_name("PURGE-CACHE") - .help("Purges the eth1 block and deposit caches") + .help("DEPRECATED") .action(ArgAction::SetTrue) .help_heading(FLAG_HEADER) .display_order(0) + .hide(true) ) .arg( Arg::new("eth1-blocks-per-log-query") .long("eth1-blocks-per-log-query") .value_name("BLOCKS") - .help("Specifies the number of blocks that a deposit log query should span. \ - This will reduce the size of responses from the Eth1 endpoint.") - .default_value("1000") + .help("DEPRECATED") .action(ArgAction::Set) .display_order(0) + .hide(true) ) .arg( Arg::new("eth1-cache-follow-distance") .long("eth1-cache-follow-distance") .value_name("BLOCKS") - .help("Specifies the distance between the Eth1 chain head and the last block which \ - should be imported into the cache. Setting this value lower can help \ - compensate for irregular Proof-of-Work block times, but setting it too low \ - can make the node vulnerable to re-orgs.") + .help("DEPRECATED") .action(ArgAction::Set) .display_order(0) + .hide(true) ) .arg( Arg::new("slots-per-restore-point") @@ -1513,13 +1492,12 @@ pub fn cli_app() -> Command { .arg( Arg::new("disable-deposit-contract-sync") .long("disable-deposit-contract-sync") - .help("Explicitly disables syncing of deposit logs from the execution node. \ - This overrides any previous option that depends on it. \ - Useful if you intend to run a non-validating beacon node.") + .help("DEPRECATED") .action(ArgAction::SetTrue) .help_heading(FLAG_HEADER) .conflicts_with("staking") .display_order(0) + .hide(true) ) .arg( Arg::new("disable-optimistic-finalized-sync") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 9bf6811496b..3c6339c03e3 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -13,12 +13,10 @@ use client::{ClientConfig, ClientGenesis}; use directory::{DEFAULT_BEACON_NODE_DIR, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR}; use environment::RuntimeContext; use execution_layer::DEFAULT_JWT_FILE; -use genesis::Eth1Endpoint; use http_api::TlsConfig; use lighthouse_network::ListenAddress; use lighthouse_network::{multiaddr::Protocol, Enr, Multiaddr, NetworkConfig, PeerIdSerialized}; use sensitive_url::SensitiveUrl; -use std::cmp::max; use std::collections::HashSet; use std::fmt::Debug; use std::fs; @@ -266,31 +264,21 @@ pub fn get_config( } /* - * Eth1 + * Deprecated Eth1 flags (can be removed in the next minor release after v7.1.0) */ - - if cli_args.get_flag("dummy-eth1") { - warn!("The --dummy-eth1 flag is deprecated"); - } - - if cli_args.get_flag("eth1") { - warn!("The --eth1 flag is deprecated"); - } - - if let Some(val) = cli_args.get_one::("eth1-blocks-per-log-query") { - client_config.eth1.blocks_per_log_query = val - .parse() - .map_err(|_| "eth1-blocks-per-log-query is not a valid integer".to_string())?; + if cli_args + .get_one::("eth1-blocks-per-log-query") + .is_some() + { + warn!("The eth1-blocks-per-log-query flag is deprecated"); } if cli_args.get_flag("eth1-purge-cache") { - client_config.eth1.purge_cache = true; + warn!("The eth1-purge-cache flag is deprecated"); } - if let Some(follow_distance) = - clap_utils::parse_optional(cli_args, "eth1-cache-follow-distance")? - { - client_config.eth1.cache_follow_distance = Some(follow_distance); + if clap_utils::parse_optional::(cli_args, "eth1-cache-follow-distance")?.is_some() { + warn!("The eth1-purge-cache flag is deprecated"); } // `--execution-endpoint` is required now. @@ -358,13 +346,6 @@ pub fn get_config( clap_utils::parse_required(cli_args, "execution-timeout-multiplier")?; el_config.execution_timeout_multiplier = Some(execution_timeout_multiplier); - client_config.eth1.endpoint = Eth1Endpoint::Auth { - endpoint: execution_endpoint, - jwt_path: secret_file, - jwt_id: el_config.jwt_id.clone(), - jwt_version: el_config.jwt_version.clone(), - }; - // Store the EL config in the client config. client_config.execution_layer = Some(el_config); @@ -506,20 +487,9 @@ pub fn get_config( .as_ref() .ok_or("Context is missing eth2 network config")?; - client_config.eth1.deposit_contract_address = format!("{:?}", spec.deposit_contract_address); - client_config.eth1.deposit_contract_deploy_block = - eth2_network_config.deposit_contract_deploy_block; - client_config.eth1.lowest_cached_block_number = - client_config.eth1.deposit_contract_deploy_block; - client_config.eth1.follow_distance = spec.eth1_follow_distance; - client_config.eth1.node_far_behind_seconds = - max(5, spec.eth1_follow_distance / 2) * spec.seconds_per_eth1_block; - client_config.eth1.chain_id = spec.deposit_chain_id.into(); - client_config.eth1.set_block_cache_truncation::(spec); - info!( - deploy_block = client_config.eth1.deposit_contract_deploy_block, - address = &client_config.eth1.deposit_contract_address, + deploy_block = eth2_network_config.deposit_contract_deploy_block, + address = ?spec.deposit_contract_address, "Deposit contract" ); @@ -815,9 +785,8 @@ pub fn get_config( } } - // Note: This overrides any previous flags that enable this option. if cli_args.get_flag("disable-deposit-contract-sync") { - client_config.sync_eth1_chain = false; + warn!("The disable-deposit-contract-sync flag is deprecated"); } client_config.chain.prepare_payload_lookahead = diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index a7f92434ce3..96abae735b2 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -2,9 +2,7 @@ mod cli; mod config; pub use beacon_chain; -use beacon_chain::{ - builder::Witness, eth1_chain::CachingEth1Backend, slot_clock::SystemTimeSlotClock, -}; +use beacon_chain::{builder::Witness, slot_clock::SystemTimeSlotClock}; use clap::ArgMatches; pub use cli::cli_app; pub use client::{Client, ClientBuilder, ClientConfig, ClientGenesis}; @@ -19,15 +17,8 @@ use tracing::{info, warn}; use types::{ChainSpec, Epoch, EthSpec, ForkName}; /// A type-alias to the tighten the definition of a production-intended `Client`. -pub type ProductionClient = Client< - Witness< - SystemTimeSlotClock, - CachingEth1Backend, - E, - BeaconNodeBackend, - BeaconNodeBackend, - >, ->; +pub type ProductionClient = + Client, BeaconNodeBackend>>; /// The beacon node `Client` that will be used in production. /// @@ -132,22 +123,7 @@ impl ProductionBeaconNode { let builder = builder .beacon_chain_builder(client_genesis, client_config.clone()) .await?; - let builder = if client_config.sync_eth1_chain { - info!( - endpoint = ?client_config.eth1.endpoint, - method = "json rpc via http", - "Block production enabled" - ); - builder - .caching_eth1_backend(client_config.eth1.clone()) - .await? - } else { - info!( - reason = "no eth1 backend configured", - "Block production disabled" - ); - builder.no_eth1_backend()? - }; + info!("Block production enabled"); let builder = builder.system_time_slot_clock()?; diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index d60708faca8..ede4b4435e3 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -316,6 +316,7 @@ pub enum DBColumn { BeaconChain, #[strum(serialize = "opo")] OpPool, + /// DEPRECATED. #[strum(serialize = "etc")] Eth1Cache, #[strum(serialize = "frk")] diff --git a/book/src/api_lighthouse.md b/book/src/api_lighthouse.md index b65bef47628..2eee8356b19 100644 --- a/book/src/api_lighthouse.md +++ b/book/src/api_lighthouse.md @@ -353,126 +353,6 @@ See [Validator Inclusion APIs](./api_validator_inclusion.md). See [Validator Inclusion APIs](./api_validator_inclusion.md). -## `/lighthouse/eth1/syncing` - -Returns information regarding execution layer, as it is required for use in -consensus layer - -### Fields - -- `head_block_number`, `head_block_timestamp`: the block number and timestamp -from the very head of the execution chain. Useful for understanding the immediate -health of the execution node that the beacon node is connected to. -- `latest_cached_block_number` & `latest_cached_block_timestamp`: the block -number and timestamp of the latest block we have in our block cache. - - For correct execution client voting this timestamp should be later than the -`voting_target_timestamp`. - -- `voting_target_timestamp`: The latest timestamp allowed for an execution layer block in this voting period. -- `eth1_node_sync_status_percentage` (float): An estimate of how far the head of the - execution node is from the head of the execution chain. - - `100.0` indicates a fully synced execution node. - - `0.0` indicates an execution node that has not verified any blocks past the - genesis block. -- `lighthouse_is_cached_and_ready`: Is set to `true` if the caches in the - beacon node are ready for block production. - - This value might be set to - `false` whilst `eth1_node_sync_status_percentage == 100.0` if the beacon - node is still building its internal cache. - - This value might be set to `true` whilst - `eth1_node_sync_status_percentage < 100.0` since the cache only cares - about blocks a certain distance behind the head. - -### Example - -```bash -curl -X GET "http://localhost:5052/lighthouse/eth1/syncing" -H "accept: application/json" | jq -``` - -```json -{ - "data": { - "head_block_number": 3611806, - "head_block_timestamp": 1603249317, - "latest_cached_block_number": 3610758, - "latest_cached_block_timestamp": 1603233597, - "voting_target_timestamp": 1603228632, - "eth1_node_sync_status_percentage": 100, - "lighthouse_is_cached_and_ready": true - } -} -``` - -## `/lighthouse/eth1/block_cache` - -Returns a list of all the execution layer blocks in the execution client voting cache. - -### Example - -```bash -curl -X GET "http://localhost:5052/lighthouse/eth1/block_cache" -H "accept: application/json" | jq -``` - -```json -{ - "data": [ - { - "hash": "0x3a17f4b7ae4ee57ef793c49ebc9c06ff85207a5e15a1d0bd37b68c5ef5710d7f", - "timestamp": 1603173338, - "number": 3606741, - "deposit_root": "0xd24920d936e8fb9b67e93fd126ce1d9e14058b6d82dcf7d35aea46879fae6dee", - "deposit_count": 88911 - }, - { - "hash": "0x78852954ea4904e5f81038f175b2adefbede74fbb2338212964405443431c1e7", - "timestamp": 1603173353, - "number": 3606742, - "deposit_root": "0xd24920d936e8fb9b67e93fd126ce1d9e14058b6d82dcf7d35aea46879fae6dee", - "deposit_count": 88911 - } - ] -} -``` - -## `/lighthouse/eth1/deposit_cache` - -Returns a list of all cached logs from the deposit contract. - -### Example - -```bash -curl -X GET "http://localhost:5052/lighthouse/eth1/deposit_cache" -H "accept: application/json" | jq -``` - -```json -{ - "data": [ - { - "deposit_data": { - "pubkey": "0xae9e6a550ac71490cdf134533b1688fcbdb16f113d7190eacf4f2e9ca6e013d5bd08c37cb2bde9bbdec8ffb8edbd495b", - "withdrawal_credentials": "0x0062a90ebe71c4c01c4e057d7d13b944d9705f524ebfa24290c22477ab0517e4", - "amount": "32000000000", - "signature": "0xa87a4874d276982c471e981a113f8af74a31ffa7d18898a02df2419de2a7f02084065784aa2f743d9ddf80952986ea0b012190cd866f1f2d9c633a7a33c2725d0b181906d413c82e2c18323154a2f7c7ae6f72686782ed9e423070daa00db05b" - }, - "block_number": 3086571, - "index": 0, - "signature_is_valid": false - }, - { - "deposit_data": { - "pubkey": "0xb1d0ec8f907e023ea7b8cb1236be8a74d02ba3f13aba162da4a68e9ffa2e395134658d150ef884bcfaeecdf35c286496", - "withdrawal_credentials": "0x00a6aa2a632a6c4847cf87ef96d789058eb65bfaa4cc4e0ebc39237421c22e54", - "amount": "32000000000", - "signature": "0x8d0f8ec11935010202d6dde9ab437f8d835b9cfd5052c001be5af9304f650ada90c5363022e1f9ef2392dd222cfe55b40dfd52578468d2b2092588d4ad3745775ea4d8199216f3f90e57c9435c501946c030f7bfc8dbd715a55effa6674fd5a4" - }, - "block_number": 3086579, - "index": 1, - "signature_is_valid": false - } - ] -} -``` - ## `/lighthouse/liveness` POST request that checks if any of the given validators have attested in the given epoch. Returns a list diff --git a/book/src/help_bn.md b/book/src/help_bn.md index bd425805187..b2d2af6cec9 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -122,15 +122,6 @@ Options: The number of epochs to wait between running the migration of data from the hot DB to the cold DB. Less frequent runs can be useful for minimizing disk writes [default: 1] - --eth1-blocks-per-log-query - Specifies the number of blocks that a deposit log query should span. - This will reduce the size of responses from the Eth1 endpoint. - [default: 1000] - --eth1-cache-follow-distance - Specifies the distance between the Eth1 chain head and the last block - which should be imported into the cache. Setting this value lower can - help compensate for irregular Proof-of-Work block times, but setting - it too low can make the node vulnerable to re-orgs. --execution-endpoint Server endpoint for an execution layer JWT-authenticated HTTP JSON-RPC connection. Uses the same endpoint to populate the deposit cache. @@ -454,10 +445,6 @@ Flags: resource contention which degrades staking performance. Stakers should generally choose to avoid this flag since backfill sync is not required for staking. - --disable-deposit-contract-sync - Explicitly disables syncing of deposit logs from the execution node. - This overrides any previous option that depends on it. Useful if you - intend to run a non-validating beacon node. --disable-enr-auto-update Discovery automatically updates the nodes local ENR with an external IP address and port as seen by other peers on the network. This @@ -499,8 +486,6 @@ Flags: --enable-private-discovery Lighthouse by default does not discover private IP addresses. Set this flag to enable connection attempts to local addresses. - --eth1-purge-cache - Purges the eth1 block and deposit caches --genesis-backfill Attempts to download blocks all the way back to genesis when checkpoint syncing. diff --git a/common/eth2/src/lib.rs b/common/eth2/src/lib.rs index 1dd2970c10e..b3f8f0becd6 100644 --- a/common/eth2/src/lib.rs +++ b/common/eth2/src/lib.rs @@ -1682,18 +1682,6 @@ impl BeaconNodeHttpClient { Ok(()) } - /// `GET beacon/deposit_snapshot` - pub async fn get_deposit_snapshot(&self) -> Result, Error> { - let mut path = self.eth_path(V1)?; - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("beacon") - .push("deposit_snapshot"); - self.get_opt_with_timeout::, _>(path, self.timeouts.get_deposit_snapshot) - .await - .map(|opt| opt.map(|r| r.data)) - } - /// `POST beacon/rewards/sync_committee` pub async fn post_beacon_rewards_sync_committee( &self, diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 9a5d9100cf5..24fb110a358 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -7,11 +7,8 @@ pub mod sync_state; use crate::{ lighthouse::sync_state::SyncState, - types::{ - AdminPeer, DepositTreeSnapshot, Epoch, FinalizedExecutionBlock, GenericResponse, - ValidatorId, - }, - BeaconNodeHttpClient, DepositData, Error, Eth1Data, Hash256, Slot, + types::{AdminPeer, Epoch, GenericResponse, ValidatorId}, + BeaconNodeHttpClient, DepositData, Error, Hash256, Slot, }; use proto_array::core::ProtoArray; use serde::{Deserialize, Serialize}; @@ -159,18 +156,6 @@ pub struct ProcessHealth { pub pid_process_seconds_total: u64, } -/// Indicates how up-to-date the Eth1 caches are. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -pub struct Eth1SyncStatusData { - pub head_block_number: Option, - pub head_block_timestamp: Option, - pub latest_cached_block_number: Option, - pub latest_cached_block_timestamp: Option, - pub voting_target_timestamp: u64, - pub eth1_node_sync_status_percentage: f64, - pub lighthouse_is_cached_and_ready: bool, -} - /// A fully parsed eth1 deposit contract log. #[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode)] pub struct DepositLog { @@ -183,41 +168,6 @@ pub struct DepositLog { pub signature_is_valid: bool, } -/// A block of the eth1 chain. -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize, Encode, Decode)] -pub struct Eth1Block { - pub hash: Hash256, - pub timestamp: u64, - pub number: u64, - #[ssz(with = "four_byte_option_hash256")] - pub deposit_root: Option, - #[ssz(with = "four_byte_option_u64")] - pub deposit_count: Option, -} - -impl Eth1Block { - pub fn eth1_data(self) -> Option { - Some(Eth1Data { - deposit_root: self.deposit_root?, - deposit_count: self.deposit_count?, - block_hash: self.hash, - }) - } -} - -impl From for FinalizedExecutionBlock { - fn from(eth1_block: Eth1Block) -> Self { - Self { - deposit_count: eth1_block.deposit_count.unwrap_or(0), - deposit_root: eth1_block - .deposit_root - .unwrap_or_else(|| DepositTreeSnapshot::default().deposit_root), - block_hash: eth1_block.hash, - block_height: eth1_block.number, - } - } -} - impl BeaconNodeHttpClient { /// `GET lighthouse/health` pub async fn get_lighthouse_health(&self) -> Result, Error> { @@ -298,63 +248,6 @@ impl BeaconNodeHttpClient { self.get(path).await } - /// `GET lighthouse/eth1/syncing` - pub async fn get_lighthouse_eth1_syncing( - &self, - ) -> Result, Error> { - let mut path = self.server.full.clone(); - - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("lighthouse") - .push("eth1") - .push("syncing"); - - self.get(path).await - } - - /// `GET lighthouse/eth1/block_cache` - pub async fn get_lighthouse_eth1_block_cache( - &self, - ) -> Result>, Error> { - let mut path = self.server.full.clone(); - - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("lighthouse") - .push("eth1") - .push("block_cache"); - - self.get(path).await - } - - /// `GET lighthouse/eth1/deposit_cache` - pub async fn get_lighthouse_eth1_deposit_cache( - &self, - ) -> Result>, Error> { - let mut path = self.server.full.clone(); - - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("lighthouse") - .push("eth1") - .push("deposit_cache"); - - self.get(path).await - } - - /// `GET lighthouse/staking` - pub async fn get_lighthouse_staking(&self) -> Result { - let mut path = self.server.full.clone(); - - path.path_segments_mut() - .map_err(|()| Error::InvalidUrl(self.server.clone()))? - .push("lighthouse") - .push("staking"); - - self.get_opt::<(), _>(path).await.map(|opt| opt.is_some()) - } - /// `POST lighthouse/database/reconstruct` pub async fn post_lighthouse_database_reconstruct(&self) -> Result { let mut path = self.server.full.clone(); diff --git a/database_manager/src/lib.rs b/database_manager/src/lib.rs index d15a8419dfc..f45e4146b77 100644 --- a/database_manager/src/lib.rs +++ b/database_manager/src/lib.rs @@ -3,8 +3,7 @@ use crate::cli::DatabaseManager; use crate::cli::Migrate; use crate::cli::PruneStates; use beacon_chain::{ - builder::Witness, eth1_chain::CachingEth1Backend, schema_change::migrate_schema, - slot_clock::SystemTimeSlotClock, + builder::Witness, schema_change::migrate_schema, slot_clock::SystemTimeSlotClock, }; use beacon_node::{get_data_dir, ClientConfig}; use clap::ArgMatches; @@ -328,7 +327,7 @@ pub fn migrate_db( "Migrating database schema" ); - migrate_schema::, _, _, _>>(db, from, to) + migrate_schema::>(db, from, to) } pub fn prune_payloads( diff --git a/lighthouse/Cargo.toml b/lighthouse/Cargo.toml index 3ca93aedf7a..fdda1696b1f 100644 --- a/lighthouse/Cargo.toml +++ b/lighthouse/Cargo.toml @@ -80,7 +80,6 @@ malloc_utils = { workspace = true } [dev-dependencies] beacon_node_fallback = { workspace = true } beacon_processor = { workspace = true } -eth1 = { workspace = true } eth2 = { workspace = true } initialized_validators = { workspace = true } lighthouse_network = { workspace = true } diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index b59244df191..26b6c8ff0ed 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -8,7 +8,6 @@ use beacon_node::{ beacon_chain::store::config::DatabaseBackend as BeaconNodeBackend, ClientConfig as Config, }; use beacon_processor::BeaconProcessorConfig; -use eth1::Eth1Endpoint; use lighthouse_network::PeerId; use std::fs::File; use std::io::{Read, Write}; @@ -115,11 +114,6 @@ fn staking_flag() { .run_with_zero_port() .with_config(|config| { assert!(config.http_api.enabled); - assert!(config.sync_eth1_chain); - assert_eq!( - config.eth1.endpoint.get_endpoint().to_string(), - DEFAULT_EXECUTION_ENDPOINT - ); }); } @@ -398,51 +392,24 @@ fn genesis_backfill_with_historic_flag() { // Tests for Eth1 flags. // DEPRECATED but should not crash #[test] -fn dummy_eth1_flag() { - CommandLineTest::new() - .flag("dummy-eth1", None) - .run_with_zero_port(); -} -// DEPRECATED but should not crash -#[test] -fn eth1_flag() { - CommandLineTest::new() - .flag("eth1", None) - .run_with_zero_port() - .with_config(|config| assert!(config.sync_eth1_chain)); -} -#[test] fn eth1_blocks_per_log_query_flag() { CommandLineTest::new() .flag("eth1-blocks-per-log-query", Some("500")) - .run_with_zero_port() - .with_config(|config| assert_eq!(config.eth1.blocks_per_log_query, 500)); + .run_with_zero_port(); } +// DEPRECATED but should not crash #[test] fn eth1_purge_cache_flag() { CommandLineTest::new() .flag("eth1-purge-cache", None) - .run_with_zero_port() - .with_config(|config| assert!(config.eth1.purge_cache)); -} -#[test] -fn eth1_cache_follow_distance_default() { - CommandLineTest::new() - .run_with_zero_port() - .with_config(|config| { - assert_eq!(config.eth1.cache_follow_distance, None); - assert_eq!(config.eth1.cache_follow_distance(), 3 * 2048 / 4); - }); + .run_with_zero_port(); } +// DEPRECATED but should not crash #[test] fn eth1_cache_follow_distance_manual() { CommandLineTest::new() .flag("eth1-cache-follow-distance", Some("128")) - .run_with_zero_port() - .with_config(|config| { - assert_eq!(config.eth1.cache_follow_distance, Some(128)); - assert_eq!(config.eth1.cache_follow_distance(), 128); - }); + .run_with_zero_port(); } // Tests for Bellatrix flags. @@ -755,8 +722,6 @@ fn test_builder_disable_ssz_flag() { } fn run_jwt_optional_flags_test(jwt_flag: &str, jwt_id_flag: &str, jwt_version_flag: &str) { - use sensitive_url::SensitiveUrl; - let dir = TempDir::new().expect("Unable to create temporary directory"); let execution_endpoint = "http://meow.cats"; let jwt_file = "jwt-file"; @@ -772,15 +737,6 @@ fn run_jwt_optional_flags_test(jwt_flag: &str, jwt_id_flag: &str, jwt_version_fl let el_config = config.execution_layer.as_ref().unwrap(); assert_eq!(el_config.jwt_id, Some(id.to_string())); assert_eq!(el_config.jwt_version, Some(version.to_string())); - assert_eq!( - config.eth1.endpoint, - Eth1Endpoint::Auth { - endpoint: SensitiveUrl::parse(execution_endpoint).unwrap(), - jwt_path: dir.path().join(jwt_file), - jwt_id: Some(id.to_string()), - jwt_version: Some(version.to_string()), - } - ); }); } #[test] @@ -2520,26 +2476,8 @@ fn logfile_format_flag() { ) }); } -#[test] -fn sync_eth1_chain_default() { - CommandLineTest::new() - .run_with_zero_port() - .with_config(|config| assert!(config.sync_eth1_chain)); -} - -#[test] -fn sync_eth1_chain_execution_endpoints_flag() { - let dir = TempDir::new().expect("Unable to create temporary directory"); - CommandLineTest::new_with_no_execution_endpoint() - .flag("execution-endpoints", Some("http://localhost:8551/")) - .flag( - "execution-jwt", - dir.path().join("jwt-file").as_os_str().to_str(), - ) - .run_with_zero_port() - .with_config(|config| assert!(config.sync_eth1_chain)); -} +// DEPRECATED but should not crash. #[test] fn sync_eth1_chain_disable_deposit_contract_sync_flag() { let dir = TempDir::new().expect("Unable to create temporary directory"); @@ -2550,8 +2488,7 @@ fn sync_eth1_chain_disable_deposit_contract_sync_flag() { "execution-jwt", dir.path().join("jwt-file").as_os_str().to_str(), ) - .run_with_zero_port() - .with_config(|config| assert!(!config.sync_eth1_chain)); + .run_with_zero_port(); } #[test] diff --git a/testing/eth1_test_rig/.gitignore b/testing/eth1_test_rig/.gitignore deleted file mode 100644 index 81b46ff033e..00000000000 --- a/testing/eth1_test_rig/.gitignore +++ /dev/null @@ -1 +0,0 @@ -contract/ diff --git a/testing/eth1_test_rig/Cargo.toml b/testing/eth1_test_rig/Cargo.toml deleted file mode 100644 index 9b0ac5ec9b3..00000000000 --- a/testing/eth1_test_rig/Cargo.toml +++ /dev/null @@ -1,16 +0,0 @@ -[package] -name = "eth1_test_rig" -version = "0.2.0" -authors = ["Paul Hauner "] -edition = { workspace = true } - -[dependencies] -deposit_contract = { workspace = true } -ethers-contract = "1.0.2" -ethers-core = { workspace = true } -ethers-providers = { workspace = true } -hex = { workspace = true } -serde_json = { workspace = true } -tokio = { workspace = true } -types = { workspace = true } -unused_port = { workspace = true } diff --git a/testing/eth1_test_rig/src/anvil.rs b/testing/eth1_test_rig/src/anvil.rs deleted file mode 100644 index c6c37ae4a7f..00000000000 --- a/testing/eth1_test_rig/src/anvil.rs +++ /dev/null @@ -1,100 +0,0 @@ -use ethers_core::utils::{Anvil, AnvilInstance}; -use ethers_providers::{Http, Middleware, Provider}; -use serde_json::json; -use unused_port::unused_tcp4_port; - -/// Provides a dedicated `anvil` instance. -/// -/// Requires that `anvil` is installed and available on `PATH`. -pub struct AnvilCliInstance { - pub port: u16, - pub anvil: AnvilInstance, - pub client: Provider, - chain_id: u64, -} - -impl AnvilCliInstance { - fn new_from_child(anvil_instance: Anvil, chain_id: u64, port: u16) -> Result { - let client = Provider::::try_from(&endpoint(port)) - .map_err(|e| format!("Failed to start HTTP transport connected to anvil: {:?}", e))?; - Ok(Self { - port, - anvil: anvil_instance.spawn(), - client, - chain_id, - }) - } - pub fn new(chain_id: u64) -> Result { - let port = unused_tcp4_port()?; - - let anvil = Anvil::new() - .port(port) - .mnemonic("vast thought differ pull jewel broom cook wrist tribe word before omit") - .arg("--balance") - .arg("1000000000") - .arg("--gas-limit") - .arg("1000000000") - .arg("--accounts") - .arg("10") - .arg("--chain-id") - .arg(format!("{}", chain_id)); - - Self::new_from_child(anvil, chain_id, port) - } - - pub fn fork(&self) -> Result { - let port = unused_tcp4_port()?; - - let anvil = Anvil::new() - .port(port) - .arg("--chain-id") - .arg(format!("{}", self.chain_id())) - .fork(self.endpoint()); - - Self::new_from_child(anvil, self.chain_id, port) - } - - /// Returns the endpoint that this instance is listening on. - pub fn endpoint(&self) -> String { - endpoint(self.port) - } - - /// Returns the chain id of the anvil instance - pub fn chain_id(&self) -> u64 { - self.chain_id - } - - /// Increase the timestamp on future blocks by `increase_by` seconds. - pub async fn increase_time(&self, increase_by: u64) -> Result<(), String> { - self.client - .request("evm_increaseTime", vec![json!(increase_by)]) - .await - .map(|_json_value: u64| ()) - .map_err(|e| format!("Failed to increase time on EVM (is this anvil?): {:?}", e)) - } - - /// Returns the current block number, as u64 - pub async fn block_number(&self) -> Result { - self.client - .get_block_number() - .await - .map(|v| v.as_u64()) - .map_err(|e| format!("Failed to get block number: {:?}", e)) - } - - /// Mines a single block. - pub async fn evm_mine(&self) -> Result<(), String> { - self.client - .request("evm_mine", ()) - .await - .map(|_: String| ()) - .map_err(|_| { - "utils should mine new block with evm_mine (only works with anvil/ganache!)" - .to_string() - }) - } -} - -fn endpoint(port: u16) -> String { - format!("http://127.0.0.1:{}", port) -} diff --git a/testing/eth1_test_rig/src/lib.rs b/testing/eth1_test_rig/src/lib.rs deleted file mode 100644 index 3cba908261a..00000000000 --- a/testing/eth1_test_rig/src/lib.rs +++ /dev/null @@ -1,301 +0,0 @@ -//! Provides utilities for deploying and manipulating the eth2 deposit contract on the eth1 chain. -//! -//! Presently used with [`anvil`](https://github.com/foundry-rs/foundry/tree/master/crates/anvil) to simulate -//! the deposit contract for testing beacon node eth1 integration. -//! -//! Not tested to work with actual clients (e.g., geth). It should work fine, however there may be -//! some initial issues. -mod anvil; - -use anvil::AnvilCliInstance; -use deposit_contract::{ - encode_eth1_tx_data, testnet, ABI, BYTECODE, CONTRACT_DEPLOY_GAS, DEPOSIT_GAS, -}; -use ethers_contract::Contract; -use ethers_core::{ - abi::Abi, - types::{transaction::eip2718::TypedTransaction, Address, Bytes, TransactionRequest, U256}, -}; -pub use ethers_providers::{Http, Middleware, Provider}; -use std::time::Duration; -use tokio::time::sleep; -use types::{test_utils::generate_deterministic_keypair, EthSpec, Hash256, Keypair, Signature}; -use types::{DepositData, FixedBytesExtended}; - -pub const DEPLOYER_ACCOUNTS_INDEX: usize = 0; -pub const DEPOSIT_ACCOUNTS_INDEX: usize = 0; - -/// Provides a dedicated anvil instance with the deposit contract already deployed. -pub struct AnvilEth1Instance { - pub anvil: AnvilCliInstance, - pub deposit_contract: DepositContract, -} - -impl AnvilEth1Instance { - pub async fn new(chain_id: u64) -> Result { - let anvil = AnvilCliInstance::new(chain_id)?; - DepositContract::deploy(anvil.client.clone(), 0, None) - .await - .map(|deposit_contract| Self { - anvil, - deposit_contract, - }) - } - - pub fn endpoint(&self) -> String { - self.anvil.endpoint() - } - - pub fn json_rpc_client(&self) -> Provider { - self.anvil.client.clone() - } -} - -/// Deploys and provides functions for the eth2 deposit contract, deployed on the eth1 chain. -#[derive(Clone, Debug)] -pub struct DepositContract { - client: Provider, - contract: Contract>, -} - -impl DepositContract { - pub async fn deploy( - client: Provider, - confirmations: usize, - password: Option, - ) -> Result { - Self::deploy_bytecode(client, confirmations, BYTECODE, ABI, password).await - } - - pub async fn deploy_testnet( - client: Provider, - confirmations: usize, - password: Option, - ) -> Result { - Self::deploy_bytecode( - client, - confirmations, - testnet::BYTECODE, - testnet::ABI, - password, - ) - .await - } - - async fn deploy_bytecode( - client: Provider, - confirmations: usize, - bytecode: &[u8], - abi: &[u8], - password: Option, - ) -> Result { - let abi = Abi::load(abi).map_err(|e| format!("Invalid deposit contract abi: {:?}", e))?; - let address = - deploy_deposit_contract(client.clone(), confirmations, bytecode.to_vec(), password) - .await - .map_err(|e| { - format!( - "Failed to deploy contract: {}. Is the RPC server running?.", - e - ) - })?; - - let contract = Contract::new(address, abi, client.clone()); - Ok(Self { client, contract }) - } - - /// The deposit contract's address in `0x00ab...` format. - pub fn address(&self) -> String { - format!("0x{:x}", self.contract.address()) - } - - /// A helper to return a fully-formed `DepositData`. Does not submit the deposit data to the - /// smart contact. - pub fn deposit_helper( - &self, - keypair: Keypair, - withdrawal_credentials: Hash256, - amount: u64, - ) -> DepositData { - let mut deposit = DepositData { - pubkey: keypair.pk.into(), - withdrawal_credentials, - amount, - signature: Signature::empty().into(), - }; - - deposit.signature = deposit.create_signature(&keypair.sk, &E::default_spec()); - - deposit - } - - /// Creates a random, valid deposit and submits it to the deposit contract. - /// - /// The keypairs are created randomly and destroyed. - pub async fn deposit_random(&self) -> Result<(), String> { - let keypair = Keypair::random(); - - let mut deposit = DepositData { - pubkey: keypair.pk.into(), - withdrawal_credentials: Hash256::zero(), - amount: 32_000_000_000, - signature: Signature::empty().into(), - }; - - deposit.signature = deposit.create_signature(&keypair.sk, &E::default_spec()); - - self.deposit(deposit).await - } - - /// Perfoms a blocking deposit. - pub async fn deposit(&self, deposit_data: DepositData) -> Result<(), String> { - self.deposit_async(deposit_data) - .await - .map_err(|e| format!("Deposit failed: {:?}", e)) - } - - pub async fn deposit_deterministic_async( - &self, - keypair_index: usize, - amount: u64, - ) -> Result<(), String> { - let keypair = generate_deterministic_keypair(keypair_index); - - let mut deposit = DepositData { - pubkey: keypair.pk.into(), - withdrawal_credentials: Hash256::zero(), - amount, - signature: Signature::empty().into(), - }; - - deposit.signature = deposit.create_signature(&keypair.sk, &E::default_spec()); - - self.deposit_async(deposit).await - } - - /// Performs a non-blocking deposit. - pub async fn deposit_async(&self, deposit_data: DepositData) -> Result<(), String> { - let from = self - .client - .get_accounts() - .await - .map_err(|e| format!("Failed to get accounts: {:?}", e)) - .and_then(|accounts| { - accounts - .get(DEPOSIT_ACCOUNTS_INDEX) - .cloned() - .ok_or_else(|| "Insufficient accounts for deposit".to_string()) - })?; - // Note: the reason we use this `TransactionRequest` instead of just using the - // function in `self.contract` is so that the `eth1_tx_data` function gets used - // during testing. - // - // It's important that `eth1_tx_data` stays correct and does not suffer from - // code-rot. - let tx_request = TransactionRequest::new() - .from(from) - .to(self.contract.address()) - .gas(DEPOSIT_GAS) - .value(from_gwei(deposit_data.amount)) - .data(Bytes::from(encode_eth1_tx_data(&deposit_data).map_err( - |e| format!("Failed to encode deposit data: {:?}", e), - )?)); - - let pending_tx = self - .client - .send_transaction(tx_request, None) - .await - .map_err(|e| format!("Failed to call deposit fn: {:?}", e))?; - - pending_tx - .interval(Duration::from_millis(10)) - .confirmations(0) - .await - .map_err(|e| format!("Transaction failed to resolve: {:?}", e))? - .ok_or_else(|| "Transaction dropped from mempool".to_string())?; - Ok(()) - } - - /// Peforms many deposits, each preceded by a delay. - pub async fn deposit_multiple(&self, deposits: Vec) -> Result<(), String> { - for deposit in deposits.into_iter() { - sleep(deposit.delay).await; - self.deposit_async(deposit.deposit).await?; - } - Ok(()) - } -} - -/// Describes a deposit and a delay that should should precede it's submission to the deposit -/// contract. -#[derive(Clone)] -pub struct DelayThenDeposit { - /// Wait this duration ... - pub delay: Duration, - /// ... then submit this deposit. - pub deposit: DepositData, -} - -fn from_gwei(gwei: u64) -> U256 { - U256::from(gwei) * U256::exp10(9) -} - -/// Deploys the deposit contract to the given web3 instance using the account with index -/// `DEPLOYER_ACCOUNTS_INDEX`. -async fn deploy_deposit_contract( - client: Provider, - confirmations: usize, - bytecode: Vec, - password_opt: Option, -) -> Result { - let from_address = client - .get_accounts() - .await - .map_err(|e| format!("Failed to get accounts: {:?}", e)) - .and_then(|accounts| { - accounts - .get(DEPLOYER_ACCOUNTS_INDEX) - .cloned() - .ok_or_else(|| "Insufficient accounts for deployer".to_string()) - })?; - - let deploy_address = if let Some(password) = password_opt { - let result = client - .request( - "personal_unlockAccount", - vec![from_address.to_string(), password], - ) - .await; - - match result { - Ok(true) => from_address, - Ok(false) => return Err("Eth1 node refused to unlock account".to_string()), - Err(e) => return Err(format!("Eth1 unlock request failed: {:?}", e)), - } - } else { - from_address - }; - - let mut bytecode = String::from_utf8(bytecode).unwrap(); - bytecode.retain(|c| c.is_ascii_hexdigit()); - let bytecode = hex::decode(&bytecode[1..]).unwrap(); - - let deploy_tx: TypedTransaction = TransactionRequest::new() - .from(deploy_address) - .data(Bytes::from(bytecode)) - .gas(CONTRACT_DEPLOY_GAS) - .into(); - - let pending_tx = client - .send_transaction(deploy_tx, None) - .await - .map_err(|e| format!("Failed to send tx: {:?}", e))?; - - let tx = pending_tx - .interval(Duration::from_millis(500)) - .confirmations(confirmations) - .await - .map_err(|e| format!("Failed to fetch tx receipt: {:?}", e))?; - tx.and_then(|tx| tx.contract_address) - .ok_or_else(|| "Deposit contract not deployed successfully".to_string()) -} diff --git a/validator_client/src/lib.rs b/validator_client/src/lib.rs index 6692fe3a7b6..a96780b3356 100644 --- a/validator_client/src/lib.rs +++ b/validator_client/src/lib.rs @@ -86,7 +86,6 @@ pub struct ProductionValidatorClient { slot_clock: SystemTimeSlotClock, http_api_listen_addr: Option, config: Config, - beacon_nodes: Arc>, genesis_time: u64, } @@ -516,7 +515,6 @@ impl ProductionValidatorClient { slot_clock, http_api_listen_addr: None, genesis_time, - beacon_nodes, }) } @@ -562,7 +560,7 @@ impl ProductionValidatorClient { }; // Wait until genesis has occurred. - wait_for_genesis(&self.beacon_nodes, self.genesis_time).await?; + wait_for_genesis(self.genesis_time).await?; duties_service::start_update_service(self.duties_service.clone(), block_service_tx); @@ -703,10 +701,7 @@ async fn init_from_beacon_node( Ok((genesis.genesis_time, genesis.genesis_validators_root)) } -async fn wait_for_genesis( - beacon_nodes: &BeaconNodeFallback, - genesis_time: u64, -) -> Result<(), String> { +async fn wait_for_genesis(genesis_time: u64) -> Result<(), String> { let now = SystemTime::now() .duration_since(UNIX_EPOCH) .map_err(|e| format!("Unable to read system time: {:?}", e))?; @@ -726,7 +721,7 @@ async fn wait_for_genesis( // Start polling the node for pre-genesis information, cancelling the polling as soon as the // timer runs out. tokio::select! { - result = poll_whilst_waiting_for_genesis(beacon_nodes, genesis_time) => result?, + result = poll_whilst_waiting_for_genesis(genesis_time) => result?, () = sleep(genesis_time - now) => () }; @@ -746,46 +741,20 @@ async fn wait_for_genesis( /// Request the version from the node, looping back and trying again on failure. Exit once the node /// has been contacted. -async fn poll_whilst_waiting_for_genesis( - beacon_nodes: &BeaconNodeFallback, - genesis_time: Duration, -) -> Result<(), String> { +async fn poll_whilst_waiting_for_genesis(genesis_time: Duration) -> Result<(), String> { loop { - match beacon_nodes - .first_success(|beacon_node| async move { beacon_node.get_lighthouse_staking().await }) - .await - { - Ok(is_staking) => { - let now = SystemTime::now() - .duration_since(UNIX_EPOCH) - .map_err(|e| format!("Unable to read system time: {:?}", e))?; - - if !is_staking { - error!( - msg = "this will caused missed duties", - info = "see the --staking CLI flag on the beacon node", - "Staking is disabled for beacon node" - ); - } + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .map_err(|e| format!("Unable to read system time: {:?}", e))?; - if now < genesis_time { - info!( - bn_staking_enabled = is_staking, - seconds_to_wait = (genesis_time - now).as_secs(), - "Waiting for genesis" - ); - } else { - break Ok(()); - } - } - Err(e) => { - error!( - error = %e, - "Error polling beacon node" - ); - } + if now < genesis_time { + info!( + seconds_to_wait = (genesis_time - now).as_secs(), + "Waiting for genesis" + ); + } else { + break Ok(()); } - sleep(WAITING_FOR_GENESIS_POLL_TIME).await; } }