From c16ff6986c0a40ca4332247b49e126ef87ed8ca6 Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Thu, 12 Jun 2025 12:28:58 +0200 Subject: [PATCH 01/14] omg so broken --- beacon_node/http_api/src/light_client.rs | 10 +- .../lighthouse_network/src/rpc/codec.rs | 62 +------- .../lighthouse_network/src/rpc/methods.rs | 17 +++ beacon_node/network/src/service.rs | 7 + consensus/types/src/chain_spec.rs | 138 +++++++++++++----- consensus/types/src/fork_context.rs | 64 ++++---- lcli/src/generate_bootnode_enr.rs | 3 +- 7 files changed, 172 insertions(+), 129 deletions(-) diff --git a/beacon_node/http_api/src/light_client.rs b/beacon_node/http_api/src/light_client.rs index 24b1338a724..0aec5d2942a 100644 --- a/beacon_node/http_api/src/light_client.rs +++ b/beacon_node/http_api/src/light_client.rs @@ -150,13 +150,11 @@ fn map_light_client_update_to_ssz_chunk( chain: &BeaconChain, light_client_update: &LightClientUpdate, ) -> LightClientUpdateResponseChunk { - let fork_name = chain - .spec - .fork_name_at_slot::(light_client_update.attested_header_slot()); - - let fork_digest = ChainSpec::compute_fork_digest( - chain.spec.fork_version_for_name(fork_name), + let fork_digest = chain.spec.compute_fork_digest( chain.genesis_validators_root, + light_client_update + .attested_header_slot() + .epoch(T::EthSpec::slots_per_epoch()), ); let payload = light_client_update.as_ssz_bytes(); diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index f638dd56151..65ddafbbd29 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -469,65 +469,9 @@ fn context_bytes( // Add the context bytes if required if protocol.has_context_bytes() { if let RpcResponse::Success(rpc_variant) = resp { - match rpc_variant { - RpcSuccessResponse::BlocksByRange(ref_box_block) - | RpcSuccessResponse::BlocksByRoot(ref_box_block) => { - return match **ref_box_block { - // NOTE: If you are adding another fork type here, be sure to modify the - // `fork_context.to_context_bytes()` function to support it as well! - SignedBeaconBlock::Fulu { .. } => { - fork_context.to_context_bytes(ForkName::Fulu) - } - SignedBeaconBlock::Electra { .. } => { - fork_context.to_context_bytes(ForkName::Electra) - } - SignedBeaconBlock::Deneb { .. } => { - fork_context.to_context_bytes(ForkName::Deneb) - } - SignedBeaconBlock::Capella { .. } => { - fork_context.to_context_bytes(ForkName::Capella) - } - SignedBeaconBlock::Bellatrix { .. } => { - fork_context.to_context_bytes(ForkName::Bellatrix) - } - SignedBeaconBlock::Altair { .. } => { - fork_context.to_context_bytes(ForkName::Altair) - } - SignedBeaconBlock::Base { .. } => { - Some(fork_context.genesis_context_bytes()) - } - }; - } - RpcSuccessResponse::BlobsByRange(_) | RpcSuccessResponse::BlobsByRoot(_) => { - return fork_context.to_context_bytes(ForkName::Deneb); - } - RpcSuccessResponse::DataColumnsByRoot(_) - | RpcSuccessResponse::DataColumnsByRange(_) => { - return fork_context.to_context_bytes(ForkName::Fulu); - } - RpcSuccessResponse::LightClientBootstrap(lc_bootstrap) => { - return lc_bootstrap - .map_with_fork_name(|fork_name| fork_context.to_context_bytes(fork_name)); - } - RpcSuccessResponse::LightClientOptimisticUpdate(lc_optimistic_update) => { - return lc_optimistic_update - .map_with_fork_name(|fork_name| fork_context.to_context_bytes(fork_name)); - } - RpcSuccessResponse::LightClientFinalityUpdate(lc_finality_update) => { - return lc_finality_update - .map_with_fork_name(|fork_name| fork_context.to_context_bytes(fork_name)); - } - RpcSuccessResponse::LightClientUpdatesByRange(lc_update) => { - return lc_update - .map_with_fork_name(|fork_name| fork_context.to_context_bytes(fork_name)); - } - // These will not pass the has_context_bytes() check - RpcSuccessResponse::Status(_) - | RpcSuccessResponse::Pong(_) - | RpcSuccessResponse::MetaData(_) => { - return None; - } - } + rpc_variant + .slot() + .map(|slot| fork_context.context_bytes(slot)) } } None diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 74cfc6d1982..68b9b5d382f 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -749,6 +749,23 @@ impl RpcSuccessResponse { RpcSuccessResponse::LightClientUpdatesByRange(_) => Protocol::LightClientUpdatesByRange, } } + + pub fn slot(&self) -> Option { + match self { + Self::BlocksByRange(r) | Self::BlocksByRoot(r) => Some(r.slot()), + Self::BlobsByRange(r) | Self::BlobsByRoot(r) => { + Some(r.signed_block_header.message().slot()) + } + Self::DataColumnsByRange(r) | Self::DataColumnsByRoot(r) => { + Some(r.signed_block_header.message().slot()) + } + Self::LightClientBootstrap(r) => Some(r.get_slot()), + Self::LightClientFinalityUpdate(r) => Some(r.get_attested_header_slot()), + Self::LightClientOptimisticUpdate(r) => Some(r.get_slot()), + Self::LightClientUpdatesByRange(r) => Some(r.attested_header_slot()), + Self::MetaData(_) | Self::Status(_) | Self::Pong(_) => None, + } + } } impl std::fmt::Display for RpcErrorResponse { diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 0a6d5152322..fdc36dac751 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -188,6 +188,8 @@ pub struct NetworkService { network_globals: Arc>, /// A delay that expires when a new fork takes place. next_fork_update: Pin>>, + /// A delay the expires when the next digest update takes place. + next_digest_update: Pin>>, /// A delay that expires when we need to subscribe to a new fork's topics. next_fork_subscriptions: Pin>>, /// A delay that expires when we need to unsubscribe from old fork topics. @@ -892,6 +894,11 @@ fn next_fork_delay( .map(|(_, until_fork)| tokio::time::sleep(until_fork)) } +fn next_digest_delay( + beacon_chain: &BeaconChain, +) -> Option { +} + /// Returns a `Sleep` that triggers `SUBSCRIBE_DELAY_SLOTS` before the next fork. /// Returns `None` if there are no scheduled forks or we are already past `current_slot + SUBSCRIBE_DELAY_SLOTS > fork_slot`. fn next_fork_subscriptions_delay( diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index b4fd5afe871..3b10b5b36e1 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -6,6 +6,7 @@ use int_to_bytes::int_to_bytes4; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_utils::quoted_u64::MaybeQuoted; +use sha2::Sha256; use ssz::Encode; use std::fs::File; use std::path::Path; @@ -282,7 +283,8 @@ impl ChainSpec { genesis_validators_root: Hash256, ) -> EnrForkId { EnrForkId { - fork_digest: self.fork_digest::(slot, genesis_validators_root), + fork_digest: self + .compute_fork_digest(genesis_validators_root, slot.epoch(E::slots_per_epoch())), next_fork_version: self.next_fork_version::(slot), next_fork_epoch: self .next_fork_epoch::(slot) @@ -291,18 +293,6 @@ impl ChainSpec { } } - /// Returns the `ForkDigest` for the given slot. - /// - /// If `self.altair_fork_epoch == None`, then this function returns the genesis fork digest - /// otherwise, returns the fork digest based on the slot. - pub fn fork_digest(&self, slot: Slot, genesis_validators_root: Hash256) -> [u8; 4] { - let fork_name = self.fork_name_at_slot::(slot); - Self::compute_fork_digest( - self.fork_version_for_name(fork_name), - genesis_validators_root, - ) - } - /// Returns the `next_fork_version`. /// /// `next_fork_version = current_fork_version` if no future fork is planned, @@ -364,6 +354,11 @@ impl ChainSpec { } } + // This is `compute_fork_version` in the spec + pub fn fork_version_for_epoch(&self, epoch: Epoch) -> [u8; 4] { + self.fork_version_for_name(self.fork_name_at_epoch(epoch)) + } + /// For a given fork name, return the epoch at which it activates. pub fn fork_epoch(&self, fork_name: ForkName) -> Option { match fork_name { @@ -555,18 +550,52 @@ impl ChainSpec { /// /// This is a digest primarily used for domain separation on the p2p layer. /// 4-bytes suffices for practical separation of forks/chains. - pub fn compute_fork_digest( - current_version: [u8; 4], - genesis_validators_root: Hash256, - ) -> [u8; 4] { - let mut result = [0; 4]; - let root = Self::compute_fork_data_root(current_version, genesis_validators_root); - result.copy_from_slice( + pub fn compute_fork_digest(self, genesis_validators_root: Hash256, epoch: Epoch) -> [u8; 4] { + let fork_version = self.fork_version_for_epoch(epoch); + let mut base_digest = [0u8; 4]; + let root = Self::compute_fork_data_root(fork_version, genesis_validators_root); + base_digest.copy_from_slice( root.as_slice() .get(0..4) .expect("root hash is at least 4 bytes"), ); - result + + let Some(blob_parameters) = self.get_blob_parameters(epoch) else { + return base_digest; + }; + + match self.fulu_fork_epoch { + Some(fulu_epoch) if epoch >= fulu_epoch => { + // Concatenate epoch and max_blobs_per_block as u64 bytes + let mut input = Vec::with_capacity(16); + input.extend_from_slice(&blob_parameters.epoch.as_u64().to_be_bytes()); + input.extend_from_slice(&blob_parameters.max_blobs_per_block.to_be_bytes()); + + // Hash the concatenated bytes + let hash = Sha256::digest(input); + + // XOR the base digest with the first 4 bytes of the hash + let mut masked_digest = [0u8; 4]; + for (i, (a, b)) in base_digest.iter().zip(hash.iter()).enumerate() { + masked_digest[i] = a ^ b; + } + masked_digest + } + _ => base_digest, + } + } + + pub fn all_digest_epochs(&self) -> Iter { + let mut relevant_epochs = ForkName::list_all_fork_epochs(self) + .filter_map(|(_, epoch)| epoch) + .collect::>(); + + if self.fulu_fork_epoch.is_some() { + for blob_parameters in self.blob_schedule { + relevant_epochs.insert(blob_parameters.epoch); + } + } + relevant_epochs.iter().sorted() } /// Compute a domain by applying the given `fork_version`. @@ -671,6 +700,35 @@ impl ChainSpec { } } + pub fn get_blob_parameters(&self, epoch: Epoch) -> Option { + match self.fulu_fork_epoch { + Some(fulu_epoch) if epoch >= fulu_epoch => self + .blob_schedule + .blob_parameters_for_epoch(epoch) + .or_else(|| { + Some(BlobParameters { + epoch: self + .electra_fork_epoch + .expect("electra fork epoch must be set if fulu epoch is set"), + max_blobs_per_block: self.max_blobs_per_block_electra, + }) + }), + _ => match self.electra_fork_epoch { + Some(electra_epoch) if epoch >= electra_epoch => Some(BlobParameters { + epoch: electra_epoch, + max_blobs_per_block: self.max_blobs_per_block_electra, + }), + _ => match self.deneb_fork_epoch { + Some(deneb_epoch) if epoch >= deneb_epoch => Some(BlobParameters { + epoch: deneb_epoch, + max_blobs_per_block: self.max_blobs_per_block, + }), + _ => None, + }, + }, + } + } + // TODO(EIP-7892): remove this once we have fork-version changes on BPO forks pub fn max_blobs_per_block_within_fork(&self, fork_name: ForkName) -> u64 { if !fork_name.fulu_enabled() { @@ -1378,29 +1436,29 @@ impl Default for ChainSpec { #[derive(arbitrary::Arbitrary, Serialize, Deserialize, Debug, PartialEq, Clone)] #[serde(rename_all = "UPPERCASE")] -pub struct BPOFork { - epoch: Epoch, +pub struct BlobParameters { + pub epoch: Epoch, #[serde(with = "serde_utils::quoted_u64")] - max_blobs_per_block: u64, + pub max_blobs_per_block: u64, } -// A wrapper around a vector of BPOFork to ensure that the vector is reverse +// A wrapper around a vector of BlobParameters to ensure that the vector is reverse // sorted by epoch. #[derive(arbitrary::Arbitrary, Serialize, Debug, PartialEq, Clone)] -pub struct BlobSchedule(Vec); +pub struct BlobSchedule(Vec); impl<'de> Deserialize<'de> for BlobSchedule { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { - let vec = Vec::::deserialize(deserializer)?; + let vec = Vec::::deserialize(deserializer)?; Ok(BlobSchedule::new(vec)) } } impl BlobSchedule { - pub fn new(mut vec: Vec) -> Self { + pub fn new(mut vec: Vec) -> Self { // reverse sort by epoch vec.sort_by(|a, b| b.epoch.cmp(&a.epoch)); Self(vec) @@ -1417,19 +1475,23 @@ impl BlobSchedule { .map(|entry| entry.max_blobs_per_block) } + pub fn blob_parameters_for_epoch(&self, epoch: Epoch) -> Option { + self.0.iter().find(|entry| epoch >= entry.epoch).cloned() + } + pub const fn default() -> Self { // TODO(EIP-7892): think about what the default should be Self(vec![]) } - pub fn as_vec(&self) -> &Vec { + pub fn as_vec(&self) -> &Vec { &self.0 } } impl<'a> IntoIterator for &'a BlobSchedule { - type Item = &'a BPOFork; - type IntoIter = std::slice::Iter<'a, BPOFork>; + type Item = &'a BlobParameters; + type IntoIter = std::slice::Iter<'a, BlobParameters>; fn into_iter(self) -> Self::IntoIter { self.0.iter() @@ -1437,8 +1499,8 @@ impl<'a> IntoIterator for &'a BlobSchedule { } impl IntoIterator for BlobSchedule { - type Item = BPOFork; - type IntoIter = std::vec::IntoIter; + type Item = BlobParameters; + type IntoIter = std::vec::IntoIter; fn into_iter(self) -> Self::IntoIter { self.0.into_iter() @@ -2490,23 +2552,23 @@ mod yaml_tests { assert_eq!( config.blob_schedule.as_vec(), &vec![ - BPOFork { + BlobParameters { epoch: Epoch::new(1584), max_blobs_per_block: 20 }, - BPOFork { + BlobParameters { epoch: Epoch::new(1280), max_blobs_per_block: 9 }, - BPOFork { + BlobParameters { epoch: Epoch::new(1024), max_blobs_per_block: 18 }, - BPOFork { + BlobParameters { epoch: Epoch::new(768), max_blobs_per_block: 15 }, - BPOFork { + BlobParameters { epoch: Epoch::new(512), max_blobs_per_block: 12 }, diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index a6360705bad..51a31b9dcd0 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -1,18 +1,21 @@ use parking_lot::RwLock; use crate::{ChainSpec, EthSpec, ForkName, Hash256, Slot}; -use std::collections::HashMap; +use std::{collections::HashMap, marker::PhantomData}; /// Provides fork specific info like the current fork name and the fork digests corresponding to every valid fork. #[derive(Debug)] -pub struct ForkContext { - current_fork: RwLock, - fork_to_digest: HashMap, +pub struct ForkContext { + relevant_epoch: Epoch, + enabled_forks: HashSet, + genesis_validators_root: Hash256, + epoch_to_digest: BTreeMap, digest_to_fork: HashMap<[u8; 4], ForkName>, pub spec: ChainSpec, + phantom_data: PhantomData, } -impl ForkContext { +impl ForkContext { /// Creates a new `ForkContext` object by enumerating all enabled forks and computing their /// fork digest. /// @@ -22,40 +25,44 @@ impl ForkContext { genesis_validators_root: Hash256, spec: &ChainSpec, ) -> Self { - let fork_to_digest: HashMap = ForkName::list_all() + let enabled_forks = ForkName::list_all() .into_iter() - .filter_map(|fork| { - if spec.fork_epoch(fork).is_some() { - Some(( - fork, - ChainSpec::compute_fork_digest( - spec.fork_version_for_name(fork), - genesis_validators_root, - ), - )) - } else { - None - } + .filter(|fork| spec.fork_epoch(fork).is_some()); + + let epoch_to_digest = spec + .all_digest_epochs() + .into_iter() + .map(|epoch| { + let fork_version = spec.fork_version_for_epoch(epoch); + let fork_digest = spec.compute_fork_digest(genesis_validators_root, epoch); + (epoch, fork_digest) }) .collect(); - let digest_to_fork = fork_to_digest - .clone() - .into_iter() - .map(|(k, v)| (v, k)) + let digest_to_fork = epoch_to_digest + .iter() + .map(|(epoch, digest)| { + let fork_name = spec.fork_name_at_epoch(epoch); + (*digest, fork_name) + }) .collect(); + let relevant_epoch = RwLock::new(current_slot.epoch(E::slots_per_epoch())); + Self { - current_fork: RwLock::new(spec.fork_name_at_slot::(current_slot)), - fork_to_digest, + relevant_epoch, + enabled_forks, + genesis_validators_root, + epoch_to_digest, digest_to_fork, spec: spec.clone(), + phantom_data: PhantomData::::default(), } } /// Returns `true` if the provided `fork_name` exists in the `ForkContext` object. pub fn fork_exists(&self, fork_name: ForkName) -> bool { - self.fork_to_digest.contains_key(&fork_name) + self.enabled_forks.contains_key(&fork_name) } /// Returns the `current_fork`. @@ -88,6 +95,13 @@ impl ForkContext { self.fork_to_digest.get(&fork_name).cloned() } + // TODO: we may delete this entire object and just use the spec + pub fn context_bytes(&self, slot: Slot) -> [u8; 4] { + let epoch = slot.epoch(E::slots_per_epoch()); + self.spec + .compute_fork_digest(self.genesis_validators_root, epoch) + } + /// Returns all `fork_digest`s that are currently in the `ForkContext` object. pub fn all_fork_digests(&self) -> Vec<[u8; 4]> { self.digest_to_fork.keys().cloned().collect() diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs index e1acac12dfe..9ef7c12c48f 100644 --- a/lcli/src/generate_bootnode_enr.rs +++ b/lcli/src/generate_bootnode_enr.rs @@ -15,6 +15,7 @@ pub fn run(matches: &ArgMatches, spec: &ChainSpec) -> Result<(), Str let udp_port: NonZeroU16 = clap_utils::parse_required(matches, "udp-port")?; let tcp_port: NonZeroU16 = clap_utils::parse_required(matches, "tcp-port")?; let output_dir: PathBuf = clap_utils::parse_required(matches, "output-dir")?; + // FIXME: why is this being read from.. somewhere rather than just using the spec? let genesis_fork_version: [u8; 4] = clap_utils::parse_ssz_required(matches, "genesis-fork-version")?; @@ -33,7 +34,7 @@ pub fn run(matches: &ArgMatches, spec: &ChainSpec) -> Result<(), Str let secp256k1_keypair = secp256k1::Keypair::generate(); let enr_key = CombinedKey::from_secp256k1(&secp256k1_keypair); let enr_fork_id = EnrForkId { - fork_digest: ChainSpec::compute_fork_digest(genesis_fork_version, Hash256::zero()), + fork_digest: spec.compute_fork_digest(Hash256::zero(), Epoch::new(0)), next_fork_version: genesis_fork_version, next_fork_epoch: Epoch::max_value(), // FAR_FUTURE_EPOCH }; From f3a314b2067aec6f4f587c9025d40de6a587fa7c Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Tue, 17 Jun 2025 13:34:12 -0500 Subject: [PATCH 02/14] a bit less broken --- beacon_node/beacon_chain/src/beacon_chain.rs | 14 +++ beacon_node/http_api/src/light_client.rs | 2 +- .../lighthouse_network/src/rpc/codec.rs | 4 +- .../lighthouse_network/src/rpc/methods.rs | 4 +- .../lighthouse_network/src/service/mod.rs | 37 +++--- beacon_node/network/src/service.rs | 79 +++++++----- consensus/types/src/chain_spec.rs | 115 ++++++++++++++++-- consensus/types/src/fork_context.rs | 58 ++++----- 8 files changed, 214 insertions(+), 99 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index de377dab974..17736b05c52 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -6813,6 +6813,20 @@ impl BeaconChain { .map(|duration| (fork_name, duration)) } + pub fn duration_to_next_digest(&self) -> Option<(Epoch, Duration)> { + // If we are unable to read the slot clock we assume that it is prior to genesis and + // therefore use the genesis slot. + let slot = self.slot().unwrap_or(self.spec.genesis_slot); + let epoch = slot.epoch(T::EthSpec::slots_per_epoch()); + + let next_digest_epoch = self.spec.next_digest_epoch(epoch)?; + let next_digest_slot = next_digest_epoch.start_slot(T::EthSpec::slots_per_epoch()); + + self.slot_clock + .duration_to_slot(next_digest_slot) + .map(|duration| (next_digest_epoch, duration)) + } + /// This method serves to get a sense of the current chain health. It is used in block proposal /// to determine whether we should outsource payload production duties. /// diff --git a/beacon_node/http_api/src/light_client.rs b/beacon_node/http_api/src/light_client.rs index 0aec5d2942a..65fa595d594 100644 --- a/beacon_node/http_api/src/light_client.rs +++ b/beacon_node/http_api/src/light_client.rs @@ -9,7 +9,7 @@ use eth2::types::{ }; use ssz::Encode; use std::sync::Arc; -use types::{BeaconResponse, ForkName, Hash256, LightClientBootstrap}; +use types::{BeaconResponse, EthSpec, ForkName, Hash256, LightClientBootstrap}; use warp::{ hyper::{Body, Response}, reply::Reply, diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 65ddafbbd29..553ed9aff97 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -469,9 +469,9 @@ fn context_bytes( // Add the context bytes if required if protocol.has_context_bytes() { if let RpcResponse::Success(rpc_variant) = resp { - rpc_variant + return rpc_variant .slot() - .map(|slot| fork_context.context_bytes(slot)) + .map(|slot| fork_context.context_bytes(slot.epoch(E::slots_per_epoch()))); } } None diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 68b9b5d382f..8e065ba6e53 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -754,10 +754,10 @@ impl RpcSuccessResponse { match self { Self::BlocksByRange(r) | Self::BlocksByRoot(r) => Some(r.slot()), Self::BlobsByRange(r) | Self::BlobsByRoot(r) => { - Some(r.signed_block_header.message().slot()) + Some(r.signed_block_header.message.slot) } Self::DataColumnsByRange(r) | Self::DataColumnsByRoot(r) => { - Some(r.signed_block_header.message().slot()) + Some(r.signed_block_header.message.slot) } Self::LightClientBootstrap(r) => Some(r.get_slot()), Self::LightClientFinalityUpdate(r) => Some(r.get_attested_header_slot()), diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index e2c6f244058..ea332374fd1 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -281,27 +281,26 @@ impl Network { // Set up a scoring update interval let update_gossipsub_scores = tokio::time::interval(params.decay_interval); - let current_and_future_forks = ForkName::list_all().into_iter().filter_map(|fork| { - if fork >= ctx.fork_context.current_fork() { - ctx.fork_context - .to_context_bytes(fork) - .map(|fork_digest| (fork, fork_digest)) - } else { - None - } - }); + let current_digest_epoch = ctx.fork_context.digest_epoch(); + let current_and_future_digests = + ctx.chain_spec + .all_digest_epochs() + .filter_map(|digest_epoch| { + if digest_epoch >= current_digest_epoch { + Some((digest_epoch, ctx.fork_context.context_bytes(digest_epoch))) + } else { + None + } + }); - let all_topics_for_forks = current_and_future_forks - .map(|(fork, fork_digest)| { + let all_topics_for_digests = current_and_future_digests + .map(|(epoch, digest)| { + let fork = ctx.chain_spec.fork_name_at_epoch(epoch); all_topics_at_fork::(fork, &ctx.chain_spec) .into_iter() .map(|topic| { - Topic::new(GossipTopic::new( - topic, - GossipEncoding::default(), - fork_digest, - )) - .into() + Topic::new(GossipTopic::new(topic, GossipEncoding::default(), digest)) + .into() }) .collect::>() }) @@ -309,7 +308,7 @@ impl Network { // For simplicity find the fork with the most individual topics and assume all forks // have the same topic count - let max_topics_at_any_fork = all_topics_for_forks + let max_topics_at_any_fork = all_topics_for_digests .iter() .map(|topics| topics.len()) .max() @@ -360,7 +359,7 @@ impl Network { // If we are using metrics, then register which topics we want to make sure to keep // track of if ctx.libp2p_registry.is_some() { - for topics in all_topics_for_forks { + for topics in all_topics_for_digests { gossipsub.register_topics_for_metrics(topics); } } diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index fdc36dac751..8acb7d937eb 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -188,8 +188,6 @@ pub struct NetworkService { network_globals: Arc>, /// A delay that expires when a new fork takes place. next_fork_update: Pin>>, - /// A delay the expires when the next digest update takes place. - next_digest_update: Pin>>, /// A delay that expires when we need to subscribe to a new fork's topics. next_fork_subscriptions: Pin>>, /// A delay that expires when we need to unsubscribe from old fork topics. @@ -266,7 +264,7 @@ impl NetworkService { &beacon_chain.spec, )); - debug!(fork_name = ?fork_context.current_fork(), "Current fork"); + //debug!(fork_name = ?fork_context.current_fork(), "Current fork"); // construct the libp2p service context let service_context = Context { @@ -390,29 +388,16 @@ impl NetworkService { let fork_context = &self.fork_context; let spec = &self.beacon_chain.spec; let current_slot = self.beacon_chain.slot().unwrap_or(spec.genesis_slot); - let current_fork = fork_context.current_fork(); - - let mut result = vec![fork_context - .to_context_bytes(current_fork) - .unwrap_or_else(|| { - panic!( - "{} fork bytes should exist as it's initialized in ForkContext", - current_fork - ) - })]; + let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); + + let mut result = vec![fork_context.context_bytes(current_epoch)]; - if let Some((next_fork, fork_epoch)) = spec.next_fork_epoch::(current_slot) { + if let Some(next_digest_epoch) = spec.next_digest_epoch(current_epoch) { if current_slot.saturating_add(Slot::new(SUBSCRIBE_DELAY_SLOTS)) - >= fork_epoch.start_slot(T::EthSpec::slots_per_epoch()) + >= next_digest_epoch.start_slot(T::EthSpec::slots_per_epoch()) { - let next_fork_context_bytes = - fork_context.to_context_bytes(next_fork).unwrap_or_else(|| { - panic!( - "context bytes should exist as spec.next_fork_epoch({}) returned Some({})", - current_slot, next_fork - ) - }); - result.push(next_fork_context_bytes); + let next_digest = fork_context.context_bytes(next_digest_epoch); + result.push(next_digest); } } @@ -465,9 +450,10 @@ impl NetworkService { } Some(_) = &mut self.next_fork_subscriptions => { - if let Some((fork_name, _)) = self.beacon_chain.duration_to_next_fork() { + if let Some((epoch, _)) = self.beacon_chain.duration_to_next_digest() { + let fork_name = self.beacon_chain.spec.fork_name_at_epoch(epoch); let fork_version = self.beacon_chain.spec.fork_version_for_name(fork_name); - let fork_digest = ChainSpec::compute_fork_digest(fork_version, self.beacon_chain.genesis_validators_root); + let fork_digest = self.beacon_chain.spec.compute_fork_digest(self.beacon_chain.genesis_validators_root, epoch); info!("Subscribing to new fork topics"); self.libp2p.subscribe_new_fork_topics(fork_name, fork_digest); self.next_fork_subscriptions = Box::pin(None.into()); @@ -832,20 +818,30 @@ impl NetworkService { fn update_next_fork(&mut self) { let new_enr_fork_id = self.beacon_chain.enr_fork_id(); + let current_epoch = self.beacon_chain.epoch().expect("dont fail!!"); let new_fork_digest = new_enr_fork_id.fork_digest; let fork_context = &self.fork_context; if let Some(new_fork_name) = fork_context.from_context_bytes(new_fork_digest) { - info!( - old_fork = ?fork_context.current_fork(), - new_fork = ?new_fork_name, - "Transitioned to new fork" - ); - fork_context.update_current_fork(*new_fork_name); + if fork_context.current_fork() == *new_fork_name { + // BPO FORK + info!( + epoch = ?current_epoch, + "BPO Fork Triggered" + ) + } else { + info!( + old_fork = ?fork_context.current_fork(), + new_fork = ?new_fork_name, + "Transitioned to new fork" + ); + } + + fork_context.update_digest_epoch(current_epoch); self.libp2p.update_fork_version(new_enr_fork_id); // Reinitialize the next_fork_update - self.next_fork_update = Box::pin(next_fork_delay(&self.beacon_chain).into()); + self.next_fork_update = Box::pin(next_digest_delay(&self.beacon_chain).into()); // Set the next_unsubscribe delay. let epoch_duration = @@ -854,7 +850,7 @@ impl NetworkService { // Update the `next_fork_subscriptions` timer if the next fork is known. self.next_fork_subscriptions = - Box::pin(next_fork_subscriptions_delay(&self.beacon_chain).into()); + Box::pin(next_digest_subscriptions_delay(&self.beacon_chain).into()); self.next_unsubscribe = Box::pin(Some(tokio::time::sleep(unsubscribe_delay)).into()); info!( remaining_epochs = UNSUBSCRIBE_DELAY_EPOCHS, @@ -897,6 +893,9 @@ fn next_fork_delay( fn next_digest_delay( beacon_chain: &BeaconChain, ) -> Option { + beacon_chain + .duration_to_next_digest() + .map(|(_, until_epoch)| tokio::time::sleep(until_epoch)) } /// Returns a `Sleep` that triggers `SUBSCRIBE_DELAY_SLOTS` before the next fork. @@ -915,6 +914,20 @@ fn next_fork_subscriptions_delay( None } +fn next_digest_subscriptions_delay( + beacon_chain: &BeaconChain, +) -> Option { + if let Some((_, duration_to_epoch)) = beacon_chain.duration_to_next_digest() { + let duration_to_subscription = duration_to_epoch.saturating_sub(Duration::from_secs( + beacon_chain.spec.seconds_per_slot * SUBSCRIBE_DELAY_SLOTS, + )); + if !duration_to_subscription.is_zero() { + return Some(tokio::time::sleep(duration_to_subscription)); + } + } + None +} + impl Drop for NetworkService { fn drop(&mut self) { // network thread is terminating diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 3b10b5b36e1..50bd2b03f42 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -2,11 +2,11 @@ use crate::application_domain::{ApplicationDomain, APPLICATION_DOMAIN_BUILDER}; use crate::blob_sidecar::BlobIdentifier; use crate::data_column_sidecar::DataColumnsByRootIdentifier; use crate::*; +use ethereum_hashing::hash; use int_to_bytes::int_to_bytes4; use safe_arith::{ArithError, SafeArith}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde_utils::quoted_u64::MaybeQuoted; -use sha2::Sha256; use ssz::Encode; use std::fs::File; use std::path::Path; @@ -550,7 +550,7 @@ impl ChainSpec { /// /// This is a digest primarily used for domain separation on the p2p layer. /// 4-bytes suffices for practical separation of forks/chains. - pub fn compute_fork_digest(self, genesis_validators_root: Hash256, epoch: Epoch) -> [u8; 4] { + pub fn compute_fork_digest(&self, genesis_validators_root: Hash256, epoch: Epoch) -> [u8; 4] { let fork_version = self.fork_version_for_epoch(epoch); let mut base_digest = [0u8; 4]; let root = Self::compute_fork_data_root(fork_version, genesis_validators_root); @@ -568,11 +568,11 @@ impl ChainSpec { Some(fulu_epoch) if epoch >= fulu_epoch => { // Concatenate epoch and max_blobs_per_block as u64 bytes let mut input = Vec::with_capacity(16); - input.extend_from_slice(&blob_parameters.epoch.as_u64().to_be_bytes()); - input.extend_from_slice(&blob_parameters.max_blobs_per_block.to_be_bytes()); + input.extend_from_slice(&blob_parameters.epoch.as_u64().to_le_bytes()); + input.extend_from_slice(&blob_parameters.max_blobs_per_block.to_le_bytes()); // Hash the concatenated bytes - let hash = Sha256::digest(input); + let hash = hash(&input); // XOR the base digest with the first 4 bytes of the hash let mut masked_digest = [0u8; 4]; @@ -585,17 +585,32 @@ impl ChainSpec { } } - pub fn all_digest_epochs(&self) -> Iter { + pub fn all_digest_epochs(&self) -> impl std::iter::Iterator { let mut relevant_epochs = ForkName::list_all_fork_epochs(self) + .into_iter() .filter_map(|(_, epoch)| epoch) - .collect::>(); + .collect::>(); if self.fulu_fork_epoch.is_some() { - for blob_parameters in self.blob_schedule { + for blob_parameters in &self.blob_schedule { relevant_epochs.insert(blob_parameters.epoch); } } - relevant_epochs.iter().sorted() + let mut vec = relevant_epochs.into_iter().collect::>(); + vec.sort(); + vec.into_iter() + } + + pub fn next_digest_epoch(&self, epoch: Epoch) -> Option { + match self.fulu_fork_epoch { + Some(fulu_epoch) if epoch >= fulu_epoch => self + .all_digest_epochs() + .find(|digest_epoch| *digest_epoch > epoch), + _ => self + .fork_name_at_epoch(epoch) + .next_fork() + .and_then(|fork_name| self.fork_epoch(fork_name)), + } } /// Compute a domain by applying the given `fork_version`. @@ -2587,6 +2602,88 @@ mod yaml_tests { assert_eq!(spec.max_blobs_per_block_within_fork(ForkName::Fulu), 20); } + #[test] + fn blob_schedule_fork_digest() { + let spec_contents = r#" + PRESET_BASE: 'mainnet' + MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 384 + MIN_GENESIS_TIME: 1748264340 + GENESIS_FORK_VERSION: 0x10355025 + GENESIS_DELAY: 60 + SECONDS_PER_SLOT: 12 + SECONDS_PER_ETH1_BLOCK: 12 + MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 + SHARD_COMMITTEE_PERIOD: 256 + ETH1_FOLLOW_DISTANCE: 2048 + INACTIVITY_SCORE_BIAS: 4 + INACTIVITY_SCORE_RECOVERY_RATE: 16 + EJECTION_BALANCE: 16000000000 + MIN_PER_EPOCH_CHURN_LIMIT: 4 + CHURN_LIMIT_QUOTIENT: 65536 + MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8 + PROPOSER_SCORE_BOOST: 40 + REORG_HEAD_WEIGHT_THRESHOLD: 20 + REORG_PARENT_WEIGHT_THRESHOLD: 160 + REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 + DEPOSIT_CHAIN_ID: 7042643276 + DEPOSIT_NETWORK_ID: 7042643276 + DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa + + ALTAIR_FORK_VERSION: 0x20355025 + ALTAIR_FORK_EPOCH: 0 + BELLATRIX_FORK_VERSION: 0x30355025 + BELLATRIX_FORK_EPOCH: 0 + CAPELLA_FORK_VERSION: 0x40355025 + CAPELLA_FORK_EPOCH: 0 + DENEB_FORK_VERSION: 0x50355025 + DENEB_FORK_EPOCH: 0 + ELECTRA_FORK_VERSION: 0x60000000 + ELECTRA_FORK_EPOCH: 9 + FULU_FORK_VERSION: 0x06000000 + FULU_FORK_EPOCH: 100 + BLOB_SCHEDULE: + - EPOCH: 9 + MAX_BLOBS_PER_BLOCK: 9 + - EPOCH: 100 + MAX_BLOBS_PER_BLOCK: 100 + - EPOCH: 150 + MAX_BLOBS_PER_BLOCK: 175 + - EPOCH: 200 + MAX_BLOBS_PER_BLOCK: 200 + - EPOCH: 250 + MAX_BLOBS_PER_BLOCK: 275 + - EPOCH: 300 + MAX_BLOBS_PER_BLOCK: 300 + "#; + let config: Config = + serde_yaml::from_str(spec_contents).expect("error while deserializing"); + let spec = + ChainSpec::from_config::(&config).expect("error while creating spec"); + + let genesis_validators_root = Hash256::from_slice(&[0; 32]); + + let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(100)); + assert_eq!(digest, [0xdf, 0x67, 0x55, 0x7b]); + let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(101)); + assert_eq!(digest, [0xdf, 0x67, 0x55, 0x7b]); + let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(150)); + assert_eq!(digest, [0x8a, 0xb3, 0x8b, 0x59]); + let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(199)); + assert_eq!(digest, [0x8a, 0xb3, 0x8b, 0x59]); + let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(200)); + assert_eq!(digest, [0xd9, 0xb8, 0x14, 0x38]); + let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(201)); + assert_eq!(digest, [0xd9, 0xb8, 0x14, 0x38]); + let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(250)); + assert_eq!(digest, [0x4e, 0xf3, 0x2a, 0x62]); + let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(299)); + assert_eq!(digest, [0x4e, 0xf3, 0x2a, 0x62]); + let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(300)); + assert_eq!(digest, [0xca, 0x10, 0x0d, 0x64]); + let digest = spec.compute_fork_digest(genesis_validators_root, Epoch::new(301)); + assert_eq!(digest, [0xca, 0x10, 0x0d, 0x64]); + } + #[test] fn apply_to_spec() { let mut spec = ChainSpec::minimal(); diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index 51a31b9dcd0..f8dc7872196 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -1,21 +1,19 @@ use parking_lot::RwLock; -use crate::{ChainSpec, EthSpec, ForkName, Hash256, Slot}; -use std::{collections::HashMap, marker::PhantomData}; +use crate::{ChainSpec, Epoch, EthSpec, ForkName, Hash256, Slot}; +use std::collections::{HashMap, HashSet}; /// Provides fork specific info like the current fork name and the fork digests corresponding to every valid fork. #[derive(Debug)] -pub struct ForkContext { - relevant_epoch: Epoch, +pub struct ForkContext { + digest_epoch: RwLock, enabled_forks: HashSet, genesis_validators_root: Hash256, - epoch_to_digest: BTreeMap, digest_to_fork: HashMap<[u8; 4], ForkName>, pub spec: ChainSpec, - phantom_data: PhantomData, } -impl ForkContext { +impl ForkContext { /// Creates a new `ForkContext` object by enumerating all enabled forks and computing their /// fork digest. /// @@ -27,13 +25,13 @@ impl ForkContext { ) -> Self { let enabled_forks = ForkName::list_all() .into_iter() - .filter(|fork| spec.fork_epoch(fork).is_some()); + .filter(|fork| spec.fork_epoch(*fork).is_some()) + .collect(); - let epoch_to_digest = spec + let epoch_to_digest: HashMap<_, _> = spec .all_digest_epochs() .into_iter() .map(|epoch| { - let fork_version = spec.fork_version_for_epoch(epoch); let fork_digest = spec.compute_fork_digest(genesis_validators_root, epoch); (epoch, fork_digest) }) @@ -42,45 +40,46 @@ impl ForkContext { let digest_to_fork = epoch_to_digest .iter() .map(|(epoch, digest)| { - let fork_name = spec.fork_name_at_epoch(epoch); + let fork_name = spec.fork_name_at_epoch(*epoch); (*digest, fork_name) }) .collect(); - let relevant_epoch = RwLock::new(current_slot.epoch(E::slots_per_epoch())); + let digest_epoch = RwLock::new(current_slot.epoch(E::slots_per_epoch())); Self { - relevant_epoch, + digest_epoch, enabled_forks, genesis_validators_root, - epoch_to_digest, digest_to_fork, spec: spec.clone(), - phantom_data: PhantomData::::default(), } } /// Returns `true` if the provided `fork_name` exists in the `ForkContext` object. pub fn fork_exists(&self, fork_name: ForkName) -> bool { - self.enabled_forks.contains_key(&fork_name) + self.enabled_forks.contains(&fork_name) } /// Returns the `current_fork`. pub fn current_fork(&self) -> ForkName { - *self.current_fork.read() + self.spec.fork_name_at_epoch(self.digest_epoch()) + } + + /// Returns the current digest epoch + pub fn digest_epoch(&self) -> Epoch { + *self.digest_epoch.read() } - /// Updates the `current_fork` field to a new fork. - pub fn update_current_fork(&self, new_fork: ForkName) { - *self.current_fork.write() = new_fork; + /// Updates the `digest_epoch` field to a new digest epoch. + pub fn update_digest_epoch(&self, epoch: Epoch) { + *self.digest_epoch.write() = epoch; } /// Returns the context bytes/fork_digest corresponding to the genesis fork version. pub fn genesis_context_bytes(&self) -> [u8; 4] { - *self - .fork_to_digest - .get(&ForkName::Base) - .expect("ForkContext must contain genesis context bytes") + self.spec + .compute_fork_digest(self.genesis_validators_root, Epoch::new(0)) } /// Returns the fork type given the context bytes/fork_digest. @@ -89,15 +88,8 @@ impl ForkContext { self.digest_to_fork.get(&context) } - /// Returns the context bytes/fork_digest corresponding to a fork name. - /// Returns `None` if the `ForkName` has not been initialized. - pub fn to_context_bytes(&self, fork_name: ForkName) -> Option<[u8; 4]> { - self.fork_to_digest.get(&fork_name).cloned() - } - - // TODO: we may delete this entire object and just use the spec - pub fn context_bytes(&self, slot: Slot) -> [u8; 4] { - let epoch = slot.epoch(E::slots_per_epoch()); + // TODO: we *may* delete this entire object and just use the spec + pub fn context_bytes(&self, epoch: Epoch) -> [u8; 4] { self.spec .compute_fork_digest(self.genesis_validators_root, epoch) } From 4f2b87a2c0f57c3c7f53a025eee6a569f0968ff2 Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Tue, 24 Jun 2025 11:44:44 -0500 Subject: [PATCH 03/14] fix next_fork_epoch --- consensus/types/src/chain_spec.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 50bd2b03f42..7f91e2efb73 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -287,8 +287,8 @@ impl ChainSpec { .compute_fork_digest(genesis_validators_root, slot.epoch(E::slots_per_epoch())), next_fork_version: self.next_fork_version::(slot), next_fork_epoch: self - .next_fork_epoch::(slot) - .map(|(_, e)| e) + .next_digest_epoch(slot.epoch(E::slots_per_epoch())) + .map(|e| e) .unwrap_or(self.far_future_epoch), } } From e5c279a52171437f529fd591701248fed4a190b3 Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Tue, 24 Jun 2025 12:17:24 -0500 Subject: [PATCH 04/14] added nfd to ENR --- .../lighthouse_network/src/discovery/enr.rs | 2 ++ .../lighthouse_network/src/discovery/mod.rs | 17 +++++++++++++++-- .../lighthouse_network/src/service/mod.rs | 5 +++-- beacon_node/network/src/service.rs | 12 +++++++++++- 4 files changed, 31 insertions(+), 5 deletions(-) diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 5628d5c463a..b0c6db7659f 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -22,6 +22,8 @@ use super::enr_ext::{EnrExt, QUIC6_ENR_KEY, QUIC_ENR_KEY}; /// The ENR field specifying the fork id. pub const ETH2_ENR_KEY: &str = "eth2"; +/// The ENR field specifying the next fork digest. +pub const NEXT_FORK_DIGEST_ENR_KEY: &str = "nfd"; /// The ENR field specifying the attestation subnet bitfield. pub const ATTESTATION_BITFIELD_ENR_KEY: &str = "attnets"; /// The ENR field specifying the sync committee subnet bitfield. diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index ad4241c5b71..8b0a946273d 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -49,7 +49,7 @@ use tracing::{debug, error, info, trace, warn}; use types::{ChainSpec, EnrForkId, EthSpec}; mod subnet_predicate; -use crate::discovery::enr::PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY; +use crate::discovery::enr::{NEXT_FORK_DIGEST_ENR_KEY, PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY}; pub use subnet_predicate::subnet_predicate; use types::non_zero_usize::new_non_zero_usize; @@ -571,7 +571,7 @@ impl Discovery { } /// Updates the `eth2` field of our local ENR. - pub fn update_eth2_enr(&mut self, enr_fork_id: EnrForkId) { + pub fn update_eth2_enr(&mut self, enr_fork_id: EnrForkId, nfd: Option<[u8; 4]>) { // to avoid having a reference to the spec constant, for the logging we assume // FAR_FUTURE_EPOCH is u64::MAX let next_fork_epoch_log = if enr_fork_id.next_fork_epoch == u64::MAX { @@ -582,6 +582,7 @@ impl Discovery { info!( fork_digest = ?enr_fork_id.fork_digest, + next_fork_digest = ?nfd, next_fork_version = ?enr_fork_id.next_fork_version, next_fork_epoch = next_fork_epoch_log, "Updating the ENR fork version" @@ -597,6 +598,18 @@ impl Discovery { ) }); + if let Some(nfd) = nfd { + let _ = self + .discv5 + .enr_insert::(NEXT_FORK_DIGEST_ENR_KEY, &nfd.as_ssz_bytes().into()) + .map_err(|e| { + warn!( + error = ?e, + "Could not update nfd ENR field" + ); + }); + } + // replace the global version with discovery version *self.network_globals.local_enr.write() = self.discv5.local_enr(); diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index ea332374fd1..71d8bbd635c 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1353,8 +1353,9 @@ impl Network { name = "libp2p", skip_all )] - pub fn update_fork_version(&mut self, enr_fork_id: EnrForkId) { - self.discovery_mut().update_eth2_enr(enr_fork_id.clone()); + pub fn update_fork_version(&mut self, enr_fork_id: EnrForkId, nfd: Option<[u8; 4]>) { + self.discovery_mut() + .update_eth2_enr(enr_fork_id.clone(), nfd); // update the local reference self.enr_fork_id = enr_fork_id; diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 8acb7d937eb..df801e9327a 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -821,6 +821,16 @@ impl NetworkService { let current_epoch = self.beacon_chain.epoch().expect("dont fail!!"); let new_fork_digest = new_enr_fork_id.fork_digest; + let nfd = match self.beacon_chain.spec.fulu_fork_epoch { + Some(fulu_epoch) if fulu_epoch != self.beacon_chain.spec.far_future_epoch => { + Some(self.beacon_chain.spec.compute_fork_digest( + self.beacon_chain.genesis_validators_root, + new_enr_fork_id.next_fork_epoch, + )) + } + _ => None, + }; + let fork_context = &self.fork_context; if let Some(new_fork_name) = fork_context.from_context_bytes(new_fork_digest) { if fork_context.current_fork() == *new_fork_name { @@ -839,7 +849,7 @@ impl NetworkService { fork_context.update_digest_epoch(current_epoch); - self.libp2p.update_fork_version(new_enr_fork_id); + self.libp2p.update_fork_version(new_enr_fork_id, nfd); // Reinitialize the next_fork_update self.next_fork_update = Box::pin(next_digest_delay(&self.beacon_chain).into()); From e18b32b882ae56a941d508ba4edb632a8ce861fc Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Wed, 25 Jun 2025 11:13:37 -0500 Subject: [PATCH 05/14] Advertise NFD before FULU_FORK_EPOCH --- beacon_node/http_api/src/light_client.rs | 2 +- .../lighthouse_network/src/discovery/enr.rs | 12 +++++++++-- .../lighthouse_network/src/discovery/mod.rs | 4 +++- .../lighthouse_network/src/service/mod.rs | 1 + beacon_node/network/src/service.rs | 20 +++++++------------ consensus/types/src/fork_context.rs | 10 ++++++++++ lcli/src/generate_bootnode_enr.rs | 3 ++- 7 files changed, 34 insertions(+), 18 deletions(-) diff --git a/beacon_node/http_api/src/light_client.rs b/beacon_node/http_api/src/light_client.rs index 65fa595d594..a51c4acc719 100644 --- a/beacon_node/http_api/src/light_client.rs +++ b/beacon_node/http_api/src/light_client.rs @@ -4,7 +4,7 @@ use crate::version::{ }; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes}; use eth2::types::{ - self as api_types, ChainSpec, LightClientUpdate, LightClientUpdateResponseChunk, + self as api_types, LightClientUpdate, LightClientUpdateResponseChunk, LightClientUpdateResponseChunkInner, LightClientUpdatesQuery, }; use ssz::Encode; diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index b0c6db7659f..d6fe4e3a3ee 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -152,12 +152,13 @@ pub fn build_or_load_enr( config: &NetworkConfig, enr_fork_id: &EnrForkId, spec: &ChainSpec, + nfd: [u8; 4], ) -> Result { // Build the local ENR. // Note: Discovery should update the ENR record's IP to the external IP as seen by the // majority of our peers, if the CLI doesn't expressly forbid it. let enr_key = CombinedKey::from_libp2p(local_key)?; - let mut local_enr = build_enr::(&enr_key, config, enr_fork_id, spec)?; + let mut local_enr = build_enr::(&enr_key, config, enr_fork_id, spec, nfd)?; use_or_load_enr(&enr_key, &mut local_enr, config)?; Ok(local_enr) @@ -169,6 +170,7 @@ pub fn build_enr( config: &NetworkConfig, enr_fork_id: &EnrForkId, spec: &ChainSpec, + nfd: [u8; 4], ) -> Result { let mut builder = discv5::enr::Enr::builder(); let (maybe_ipv4_address, maybe_ipv6_address) = &config.enr_address; @@ -272,6 +274,11 @@ pub fn build_enr( builder.add_value(PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY, &custody_group_count); } + // only set `nfd` if peer das is scheduled + if spec.is_peer_das_scheduled() { + builder.add_value(NEXT_FORK_DIGEST_ENR_KEY, &nfd); + } + builder .build(enr_key) .map_err(|e| format!("Could not build Local ENR: {:?}", e)) @@ -353,7 +360,8 @@ mod test { let keypair = libp2p::identity::secp256k1::Keypair::generate(); let enr_key = CombinedKey::from_secp256k1(&keypair); let enr_fork_id = EnrForkId::default(); - let enr = build_enr::(&enr_key, &config, &enr_fork_id, spec).unwrap(); + let nfd = [0; 4]; // placeholder + let enr = build_enr::(&enr_key, &config, &enr_fork_id, spec, nfd).unwrap(); (enr, enr_key) } diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 8b0a946273d..080ff414d87 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -1230,7 +1230,9 @@ mod tests { config.set_listening_addr(crate::ListenAddress::unused_v4_ports()); let config = Arc::new(config); let enr_key: CombinedKey = CombinedKey::from_secp256k1(&keypair); - let enr: Enr = build_enr::(&enr_key, &config, &EnrForkId::default(), &spec).unwrap(); + let nfd = [0; 4]; // placeholder + let enr: Enr = + build_enr::(&enr_key, &config, &EnrForkId::default(), &spec, nfd).unwrap(); let globals = NetworkGlobals::new( enr, MetaData::V2(MetaDataV2 { diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 71d8bbd635c..e787bca9ae1 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -199,6 +199,7 @@ impl Network { &config, &ctx.enr_fork_id, &ctx.chain_spec, + ctx.fork_context.next_fork_digest(), )?; // Construct the metadata diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index df801e9327a..9d850298cd5 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -36,8 +36,8 @@ use tokio::sync::mpsc; use tokio::time::Sleep; use tracing::{debug, error, info, info_span, trace, warn, Instrument}; use types::{ - ChainSpec, EthSpec, ForkContext, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, - Unsigned, ValidatorSubscription, + EthSpec, ForkContext, Slot, SubnetId, SyncCommitteeSubscription, SyncSubnetId, Unsigned, + ValidatorSubscription, }; mod tests; @@ -452,7 +452,6 @@ impl NetworkService { Some(_) = &mut self.next_fork_subscriptions => { if let Some((epoch, _)) = self.beacon_chain.duration_to_next_digest() { let fork_name = self.beacon_chain.spec.fork_name_at_epoch(epoch); - let fork_version = self.beacon_chain.spec.fork_version_for_name(fork_name); let fork_digest = self.beacon_chain.spec.compute_fork_digest(self.beacon_chain.genesis_validators_root, epoch); info!("Subscribing to new fork topics"); self.libp2p.subscribe_new_fork_topics(fork_name, fork_digest); @@ -821,16 +820,6 @@ impl NetworkService { let current_epoch = self.beacon_chain.epoch().expect("dont fail!!"); let new_fork_digest = new_enr_fork_id.fork_digest; - let nfd = match self.beacon_chain.spec.fulu_fork_epoch { - Some(fulu_epoch) if fulu_epoch != self.beacon_chain.spec.far_future_epoch => { - Some(self.beacon_chain.spec.compute_fork_digest( - self.beacon_chain.genesis_validators_root, - new_enr_fork_id.next_fork_epoch, - )) - } - _ => None, - }; - let fork_context = &self.fork_context; if let Some(new_fork_name) = fork_context.from_context_bytes(new_fork_digest) { if fork_context.current_fork() == *new_fork_name { @@ -848,6 +837,11 @@ impl NetworkService { } fork_context.update_digest_epoch(current_epoch); + let nfd = if self.beacon_chain.spec.is_peer_das_scheduled() { + Some(fork_context.next_fork_digest()) + } else { + None + }; self.libp2p.update_fork_version(new_enr_fork_id, nfd); // Reinitialize the next_fork_update diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index f8dc7872196..8afdd392bde 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -71,6 +71,16 @@ impl ForkContext { *self.digest_epoch.read() } + pub fn next_fork_digest(&self) -> [u8; 4] { + self.spec + .next_digest_epoch(self.spec.far_future_epoch) + .map(|epoch| { + self.spec + .compute_fork_digest(self.genesis_validators_root, epoch) + }) + .unwrap_or_default() + } + /// Updates the `digest_epoch` field to a new digest epoch. pub fn update_digest_epoch(&self, epoch: Epoch) { *self.digest_epoch.write() = epoch; diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs index 9ef7c12c48f..7334f552576 100644 --- a/lcli/src/generate_bootnode_enr.rs +++ b/lcli/src/generate_bootnode_enr.rs @@ -38,7 +38,8 @@ pub fn run(matches: &ArgMatches, spec: &ChainSpec) -> Result<(), Str next_fork_version: genesis_fork_version, next_fork_epoch: Epoch::max_value(), // FAR_FUTURE_EPOCH }; - let enr = build_enr::(&enr_key, &config, &enr_fork_id, spec) + // FIXME: need the next fork digest + let enr = build_enr::(&enr_key, &config, &enr_fork_id, spec, [0; 4]) .map_err(|e| format!("Unable to create ENR: {:?}", e))?; fs::create_dir_all(&output_dir).map_err(|e| format!("Unable to create output-dir: {:?}", e))?; From 8dbffd3759dbdcf0ca738a5cddc88be81ea2b562 Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Wed, 25 Jun 2025 11:47:11 -0500 Subject: [PATCH 06/14] Another NFD fix.. --- .../lighthouse_network/src/discovery/mod.rs | 28 +++++++++---------- .../lighthouse_network/src/service/mod.rs | 17 +++++++++-- beacon_node/network/src/service.rs | 10 +++---- consensus/types/src/fork_context.rs | 2 +- 4 files changed, 33 insertions(+), 24 deletions(-) diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index 080ff414d87..f3351719e7f 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -570,8 +570,21 @@ impl Discovery { Ok(()) } + pub fn update_enr_nfd(&mut self, nfd: [u8; 4]) -> Result<(), String> { + self.discv5 + .enr_insert::(NEXT_FORK_DIGEST_ENR_KEY, &nfd.as_ssz_bytes().into()) + .map_err(|e| format!("{:?}", e))?; + info!( + next_fork_digest = ?nfd, + "Updating the ENR nfd" + ); + enr::save_enr_to_disk(Path::new(&self.enr_dir), &self.local_enr()); + *self.network_globals.local_enr.write() = self.discv5.local_enr(); + Ok(()) + } + /// Updates the `eth2` field of our local ENR. - pub fn update_eth2_enr(&mut self, enr_fork_id: EnrForkId, nfd: Option<[u8; 4]>) { + pub fn update_eth2_enr(&mut self, enr_fork_id: EnrForkId) { // to avoid having a reference to the spec constant, for the logging we assume // FAR_FUTURE_EPOCH is u64::MAX let next_fork_epoch_log = if enr_fork_id.next_fork_epoch == u64::MAX { @@ -582,7 +595,6 @@ impl Discovery { info!( fork_digest = ?enr_fork_id.fork_digest, - next_fork_digest = ?nfd, next_fork_version = ?enr_fork_id.next_fork_version, next_fork_epoch = next_fork_epoch_log, "Updating the ENR fork version" @@ -598,18 +610,6 @@ impl Discovery { ) }); - if let Some(nfd) = nfd { - let _ = self - .discv5 - .enr_insert::(NEXT_FORK_DIGEST_ENR_KEY, &nfd.as_ssz_bytes().into()) - .map_err(|e| { - warn!( - error = ?e, - "Could not update nfd ENR field" - ); - }); - } - // replace the global version with discovery version *self.network_globals.local_enr.write() = self.discv5.local_enr(); diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index e787bca9ae1..033bc1346b6 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -1354,14 +1354,25 @@ impl Network { name = "libp2p", skip_all )] - pub fn update_fork_version(&mut self, enr_fork_id: EnrForkId, nfd: Option<[u8; 4]>) { - self.discovery_mut() - .update_eth2_enr(enr_fork_id.clone(), nfd); + pub fn update_fork_version(&mut self, enr_fork_id: EnrForkId) { + self.discovery_mut().update_eth2_enr(enr_fork_id.clone()); // update the local reference self.enr_fork_id = enr_fork_id; } + #[instrument(parent = None, + level = "trace", + fields(service = "libp2p"), + name = "libp2p", + skip_all + )] + pub fn update_nfd(&mut self, nfd: [u8; 4]) { + if let Err(e) = self.discovery_mut().update_enr_nfd(nfd) { + warn!(error = %e, "Could not update nfd in ENR"); + } + } + /* Private internal functions */ /// Updates the current meta data of the node to match the local ENR. diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 9d850298cd5..88e910ae550 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -837,13 +837,11 @@ impl NetworkService { } fork_context.update_digest_epoch(current_epoch); - let nfd = if self.beacon_chain.spec.is_peer_das_scheduled() { - Some(fork_context.next_fork_digest()) - } else { - None - }; + if self.beacon_chain.spec.is_peer_das_scheduled() { + self.libp2p.update_nfd(fork_context.next_fork_digest()); + } - self.libp2p.update_fork_version(new_enr_fork_id, nfd); + self.libp2p.update_fork_version(new_enr_fork_id); // Reinitialize the next_fork_update self.next_fork_update = Box::pin(next_digest_delay(&self.beacon_chain).into()); diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index 8afdd392bde..58f33e18f01 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -73,7 +73,7 @@ impl ForkContext { pub fn next_fork_digest(&self) -> [u8; 4] { self.spec - .next_digest_epoch(self.spec.far_future_epoch) + .next_digest_epoch(self.digest_epoch()) .map(|epoch| { self.spec .compute_fork_digest(self.genesis_validators_root, epoch) From 7ceaab5c6efbb01c21342058e3349afe91d10df1 Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Thu, 26 Jun 2025 16:14:13 -0500 Subject: [PATCH 07/14] Clean up some things --- beacon_node/beacon_chain/src/beacon_chain.rs | 13 ---- beacon_node/network/src/service.rs | 74 ++++++++------------ consensus/types/src/chain_spec.rs | 5 +- consensus/types/src/fork_context.rs | 1 - 4 files changed, 32 insertions(+), 61 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 17736b05c52..8972383da6f 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -6800,19 +6800,6 @@ impl BeaconChain { .enr_fork_id::(slot, self.genesis_validators_root) } - /// Calculates the `Duration` to the next fork if it exists and returns it - /// with it's corresponding `ForkName`. - pub fn duration_to_next_fork(&self) -> Option<(ForkName, Duration)> { - // If we are unable to read the slot clock we assume that it is prior to genesis and - // therefore use the genesis slot. - let slot = self.slot().unwrap_or(self.spec.genesis_slot); - - let (fork_name, epoch) = self.spec.next_fork_epoch::(slot)?; - self.slot_clock - .duration_to_slot(epoch.start_slot(T::EthSpec::slots_per_epoch())) - .map(|duration| (fork_name, duration)) - } - pub fn duration_to_next_digest(&self) -> Option<(Epoch, Duration)> { // If we are unable to read the slot clock we assume that it is prior to genesis and // therefore use the genesis slot. diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 88e910ae550..81b1b64d429 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -186,11 +186,11 @@ pub struct NetworkService { store: Arc>, /// A collection of global variables, accessible outside of the network service. network_globals: Arc>, - /// A delay that expires when a new fork takes place. - next_fork_update: Pin>>, - /// A delay that expires when we need to subscribe to a new fork's topics. - next_fork_subscriptions: Pin>>, - /// A delay that expires when we need to unsubscribe from old fork topics. + /// A delay that expires when the fork digest changes. + next_digest_update: Pin>>, + /// A delay that expires when we need to subscribe to a new set of topics. + next_topic_subscriptions: Pin>>, + /// A delay that expires when we need to unsubscribe from old topics. next_unsubscribe: Pin>>, /// Shutdown beacon node after sync is complete. shutdown_after_sync: bool, @@ -249,8 +249,10 @@ impl NetworkService { let enr_fork_id = beacon_chain.enr_fork_id(); // keep track of when our fork_id needs to be updated - let next_fork_update = Box::pin(next_fork_delay(&beacon_chain).into()); - let next_fork_subscriptions = Box::pin(next_fork_subscriptions_delay(&beacon_chain).into()); + let next_digest_update = Box::pin(next_digest_delay(&beacon_chain).into()); + // topics change when the fork digest changes + let next_topic_subscriptions = + Box::pin(next_topic_subscriptions_delay(&beacon_chain).into()); let next_unsubscribe = Box::pin(None.into()); let current_slot = beacon_chain @@ -264,8 +266,6 @@ impl NetworkService { &beacon_chain.spec, )); - //debug!(fork_name = ?fork_context.current_fork(), "Current fork"); - // construct the libp2p service context let service_context = Context { config: config.clone(), @@ -345,8 +345,8 @@ impl NetworkService { router_send, store, network_globals: network_globals.clone(), - next_fork_update, - next_fork_subscriptions, + next_digest_update, + next_topic_subscriptions, next_unsubscribe, shutdown_after_sync: config.shutdown_after_sync, metrics_enabled: config.metrics_enabled, @@ -440,7 +440,7 @@ impl NetworkService { event = self.libp2p.next_event() => self.on_libp2p_event(event, &mut shutdown_sender).await, - Some(_) = &mut self.next_fork_update => self.update_next_fork(), + Some(_) = &mut self.next_digest_update => self.update_next_fork_digest(), Some(_) = &mut self.next_unsubscribe => { let new_enr_fork_id = self.beacon_chain.enr_fork_id(); @@ -449,13 +449,13 @@ impl NetworkService { self.next_unsubscribe = Box::pin(None.into()); } - Some(_) = &mut self.next_fork_subscriptions => { + Some(_) = &mut self.next_topic_subscriptions => { if let Some((epoch, _)) = self.beacon_chain.duration_to_next_digest() { let fork_name = self.beacon_chain.spec.fork_name_at_epoch(epoch); let fork_digest = self.beacon_chain.spec.compute_fork_digest(self.beacon_chain.genesis_validators_root, epoch); info!("Subscribing to new fork topics"); self.libp2p.subscribe_new_fork_topics(fork_name, fork_digest); - self.next_fork_subscriptions = Box::pin(None.into()); + self.next_topic_subscriptions = Box::pin(None.into()); } else { error!( "Fork subscription scheduled but no fork scheduled"); @@ -815,9 +815,15 @@ impl NetworkService { } } - fn update_next_fork(&mut self) { + fn update_next_fork_digest(&mut self) { let new_enr_fork_id = self.beacon_chain.enr_fork_id(); - let current_epoch = self.beacon_chain.epoch().expect("dont fail!!"); + // if we are unable to read the slot clock we assume that it is prior to genesis + let current_epoch = self.beacon_chain.epoch().unwrap_or( + self.beacon_chain + .spec + .genesis_slot + .epoch(T::EthSpec::slots_per_epoch()), + ); let new_fork_digest = new_enr_fork_id.fork_digest; let fork_context = &self.fork_context; @@ -843,16 +849,16 @@ impl NetworkService { self.libp2p.update_fork_version(new_enr_fork_id); // Reinitialize the next_fork_update - self.next_fork_update = Box::pin(next_digest_delay(&self.beacon_chain).into()); + self.next_digest_update = Box::pin(next_digest_delay(&self.beacon_chain).into()); // Set the next_unsubscribe delay. let epoch_duration = self.beacon_chain.spec.seconds_per_slot * T::EthSpec::slots_per_epoch(); let unsubscribe_delay = Duration::from_secs(UNSUBSCRIBE_DELAY_EPOCHS * epoch_duration); - // Update the `next_fork_subscriptions` timer if the next fork is known. - self.next_fork_subscriptions = - Box::pin(next_digest_subscriptions_delay(&self.beacon_chain).into()); + // Update the `next_topic_subscriptions` timer if the next change in the fork digest is known. + self.next_topic_subscriptions = + Box::pin(next_topic_subscriptions_delay(&self.beacon_chain).into()); self.next_unsubscribe = Box::pin(Some(tokio::time::sleep(unsubscribe_delay)).into()); info!( remaining_epochs = UNSUBSCRIBE_DELAY_EPOCHS, @@ -882,16 +888,8 @@ impl NetworkService { } } -/// Returns a `Sleep` that triggers after the next change in the beacon chain fork version. +/// Returns a `Sleep` that triggers after the next change in the fork digest. /// If there is no scheduled fork, `None` is returned. -fn next_fork_delay( - beacon_chain: &BeaconChain, -) -> Option { - beacon_chain - .duration_to_next_fork() - .map(|(_, until_fork)| tokio::time::sleep(until_fork)) -} - fn next_digest_delay( beacon_chain: &BeaconChain, ) -> Option { @@ -900,23 +898,9 @@ fn next_digest_delay( .map(|(_, until_epoch)| tokio::time::sleep(until_epoch)) } -/// Returns a `Sleep` that triggers `SUBSCRIBE_DELAY_SLOTS` before the next fork. +/// Returns a `Sleep` that triggers `SUBSCRIBE_DELAY_SLOTS` before the next fork digest changes. /// Returns `None` if there are no scheduled forks or we are already past `current_slot + SUBSCRIBE_DELAY_SLOTS > fork_slot`. -fn next_fork_subscriptions_delay( - beacon_chain: &BeaconChain, -) -> Option { - if let Some((_, duration_to_fork)) = beacon_chain.duration_to_next_fork() { - let duration_to_subscription = duration_to_fork.saturating_sub(Duration::from_secs( - beacon_chain.spec.seconds_per_slot * SUBSCRIBE_DELAY_SLOTS, - )); - if !duration_to_subscription.is_zero() { - return Some(tokio::time::sleep(duration_to_subscription)); - } - } - None -} - -fn next_digest_subscriptions_delay( +fn next_topic_subscriptions_delay( beacon_chain: &BeaconChain, ) -> Option { if let Some((_, duration_to_epoch)) = beacon_chain.duration_to_next_digest() { diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 7f91e2efb73..4c7c1e514bd 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -288,7 +288,6 @@ impl ChainSpec { next_fork_version: self.next_fork_version::(slot), next_fork_epoch: self .next_digest_epoch(slot.epoch(E::slots_per_epoch())) - .map(|e| e) .unwrap_or(self.far_future_epoch), } } @@ -577,7 +576,9 @@ impl ChainSpec { // XOR the base digest with the first 4 bytes of the hash let mut masked_digest = [0u8; 4]; for (i, (a, b)) in base_digest.iter().zip(hash.iter()).enumerate() { - masked_digest[i] = a ^ b; + if let Some(x) = masked_digest.get_mut(i){ + *x = a ^ b; + } } masked_digest } diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index 58f33e18f01..044a7ee72bc 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -30,7 +30,6 @@ impl ForkContext { let epoch_to_digest: HashMap<_, _> = spec .all_digest_epochs() - .into_iter() .map(|epoch| { let fork_digest = spec.compute_fork_digest(genesis_validators_root, epoch); (epoch, fork_digest) From e3b654c0d39eb09851ba6b32ce0e519681bc5b21 Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Thu, 26 Jun 2025 16:25:45 -0500 Subject: [PATCH 08/14] Clean up some TODOs --- .../lighthouse_network/src/rpc/handler.rs | 4 ++-- .../lighthouse_network/src/rpc/protocol.rs | 23 +++++++++---------- .../src/rpc/rate_limiter.rs | 12 +++++----- consensus/types/src/chain_spec.rs | 2 +- 4 files changed, 20 insertions(+), 21 deletions(-) diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 8c35bf71459..bc420de1a07 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -951,7 +951,7 @@ where }; let max_responses = - req.max_responses(self.fork_context.current_fork(), &self.fork_context.spec); + req.max_responses(self.fork_context.digest_epoch(), &self.fork_context.spec); // store requests that expect responses if max_responses > 0 { @@ -1022,7 +1022,7 @@ where // add the stream to substreams if we expect a response, otherwise drop the stream. let max_responses = - request.max_responses(self.fork_context.current_fork(), &self.fork_context.spec); + request.max_responses(self.fork_context.digest_epoch(), &self.fork_context.spec); if max_responses > 0 { let max_remaining_chunks = if request.expect_exactly_one_response() { // Currently enforced only for multiple responses diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 8f613dcbf9e..9b502384108 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -18,10 +18,10 @@ use tokio_util::{ }; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BlobSidecar, ChainSpec, DataColumnSidecar, - EmptyBlock, EthSpec, EthSpecId, ForkContext, ForkName, LightClientBootstrap, + EmptyBlock, Epoch, EthSpec, EthSpecId, ForkContext, ForkName, LightClientBootstrap, LightClientBootstrapAltair, LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, LightClientUpdate, - MainnetEthSpec, MinimalEthSpec, Signature, SignedBeaconBlock, Slot, + MainnetEthSpec, MinimalEthSpec, Signature, SignedBeaconBlock, }; // Note: Hardcoding the `EthSpec` type for `SignedBeaconBlock` as min/max values is @@ -550,10 +550,10 @@ impl ProtocolId { Protocol::BlobsByRange => rpc_blob_limits::(), Protocol::BlobsByRoot => rpc_blob_limits::(), Protocol::DataColumnsByRoot => { - rpc_data_column_limits::(fork_context.current_fork(), &fork_context.spec) + rpc_data_column_limits::(fork_context.digest_epoch(), &fork_context.spec) } Protocol::DataColumnsByRange => { - rpc_data_column_limits::(fork_context.current_fork(), &fork_context.spec) + rpc_data_column_limits::(fork_context.digest_epoch(), &fork_context.spec) } Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), @@ -635,11 +635,13 @@ pub fn rpc_blob_limits() -> RpcLimits { } } -pub fn rpc_data_column_limits(fork_name: ForkName, spec: &ChainSpec) -> RpcLimits { +pub fn rpc_data_column_limits( + current_digest_epoch: Epoch, + spec: &ChainSpec, +) -> RpcLimits { RpcLimits::new( DataColumnSidecar::::min_size(), - // TODO(EIP-7892): fix this once we change fork-version on BPO forks - DataColumnSidecar::::max_size(spec.max_blobs_per_block_within_fork(fork_name) as usize), + DataColumnSidecar::::max_size(spec.max_blobs_per_block(current_digest_epoch) as usize), ) } @@ -738,16 +740,13 @@ impl RequestType { /* These functions are used in the handler for stream management */ /// Maximum number of responses expected for this request. - /// TODO(EIP-7892): refactor this to remove `_current_fork` - pub fn max_responses(&self, _current_fork: ForkName, spec: &ChainSpec) -> u64 { + pub fn max_responses(&self, digest_epoch: Epoch, spec: &ChainSpec) -> u64 { match self { RequestType::Status(_) => 1, RequestType::Goodbye(_) => 0, RequestType::BlocksByRange(req) => *req.count(), RequestType::BlocksByRoot(req) => req.block_roots().len() as u64, - RequestType::BlobsByRange(req) => { - req.max_blobs_requested(Slot::new(req.start_slot).epoch(E::slots_per_epoch()), spec) - } + RequestType::BlobsByRange(req) => req.max_blobs_requested(digest_epoch, spec), RequestType::BlobsByRoot(req) => req.blob_ids.len() as u64, RequestType::DataColumnsByRoot(req) => req.max_requested() as u64, RequestType::DataColumnsByRange(req) => req.max_requested::(), diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index 6e66999612a..c190b4cbf55 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -13,7 +13,7 @@ use std::sync::Arc; use std::task::{Context, Poll}; use std::time::{Duration, Instant}; use tokio::time::Interval; -use types::{ChainSpec, EthSpec, ForkContext, ForkName}; +use types::{ChainSpec, Epoch, EthSpec, ForkContext}; /// Nanoseconds since a given time. // Maintained as u64 to reduce footprint @@ -267,7 +267,7 @@ impl RPCRateLimiterBuilder { pub trait RateLimiterItem { fn protocol(&self) -> Protocol; - fn max_responses(&self, current_fork: ForkName, spec: &ChainSpec) -> u64; + fn max_responses(&self, digest_epoch: Epoch, spec: &ChainSpec) -> u64; } impl RateLimiterItem for super::RequestType { @@ -275,8 +275,8 @@ impl RateLimiterItem for super::RequestType { self.versioned_protocol().protocol() } - fn max_responses(&self, current_fork: ForkName, spec: &ChainSpec) -> u64 { - self.max_responses(current_fork, spec) + fn max_responses(&self, digest_epoch: Epoch, spec: &ChainSpec) -> u64 { + self.max_responses(digest_epoch, spec) } } @@ -285,7 +285,7 @@ impl RateLimiterItem for (super::RpcResponse, Protocol) { self.1 } - fn max_responses(&self, _current_fork: ForkName, _spec: &ChainSpec) -> u64 { + fn max_responses(&self, _digest_epoch: Epoch, _spec: &ChainSpec) -> u64 { // A response chunk consumes one token of the rate limiter. 1 } @@ -353,7 +353,7 @@ impl RPCRateLimiter { ) -> Result<(), RateLimitedErr> { let time_since_start = self.init_time.elapsed(); let tokens = request - .max_responses(self.fork_context.current_fork(), &self.fork_context.spec) + .max_responses(self.fork_context.digest_epoch(), &self.fork_context.spec) .max(1); let check = diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 4c7c1e514bd..58e55d32e59 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -576,7 +576,7 @@ impl ChainSpec { // XOR the base digest with the first 4 bytes of the hash let mut masked_digest = [0u8; 4]; for (i, (a, b)) in base_digest.iter().zip(hash.iter()).enumerate() { - if let Some(x) = masked_digest.get_mut(i){ + if let Some(x) = masked_digest.get_mut(i) { *x = a ^ b; } } From 1c3ceba3fb3252c84c8d540bd2d0ca8098d9b8b3 Mon Sep 17 00:00:00 2001 From: Mark Mackey Date: Thu, 26 Jun 2025 16:51:30 -0500 Subject: [PATCH 09/14] nfd accessor functions --- beacon_node/lighthouse_network/src/discovery/enr.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index d6fe4e3a3ee..017db1fdc73 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -44,6 +44,9 @@ pub trait Eth2Enr { /// The peerdas custody group count associated with the ENR. fn custody_group_count(&self, spec: &ChainSpec) -> Result; + /// The next fork digest associated with the ENR. + fn next_fork_digest(&self) -> Result<[u8; 4], &'static str>; + fn eth2(&self) -> Result; } @@ -83,6 +86,13 @@ impl Eth2Enr for Enr { } } + fn next_fork_digest(&self) -> Result<[u8; 4], &'static str> { + self + .get_decodable::<[u8; 4]>(NEXT_FORK_DIGEST_ENR_KEY) + .ok_or("ENR next fork digest non-existent")? + .map_err(|_| "Could not decode the ENR next fork digest") + } + fn eth2(&self) -> Result { let eth2_bytes: Bytes = self .get_decodable(ETH2_ENR_KEY) From ca9d36e161f22aff086e02d7a3c9e6662011495e Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Thu, 3 Jul 2025 15:22:35 -0700 Subject: [PATCH 10/14] Quick fix for crash --- consensus/types/src/fork_context.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index 044a7ee72bc..86a93323d95 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -1,7 +1,7 @@ use parking_lot::RwLock; use crate::{ChainSpec, Epoch, EthSpec, ForkName, Hash256, Slot}; -use std::collections::{HashMap, HashSet}; +use std::collections::{ HashMap, HashSet}; /// Provides fork specific info like the current fork name and the fork digests corresponding to every valid fork. #[derive(Debug)] @@ -44,7 +44,15 @@ impl ForkContext { }) .collect(); - let digest_epoch = RwLock::new(current_slot.epoch(E::slots_per_epoch())); + let current_epoch = current_slot.epoch(E::slots_per_epoch()); + let digest_epoch = RwLock::new( + epoch_to_digest + .keys() + .filter(|&&epoch| epoch <= current_epoch) + .max() + .cloned() + .expect("should match atleast genesis epoch"), + ); Self { digest_epoch, From 6114bf60a4128469a0d10e70c56cb098268b9bfc Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Fri, 4 Jul 2025 11:57:51 +1000 Subject: [PATCH 11/14] Bump fusaka devnet version. --- scripts/tests/checkpoint-sync-config-devnet.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/tests/checkpoint-sync-config-devnet.yaml b/scripts/tests/checkpoint-sync-config-devnet.yaml index de3020a8847..a5093631b44 100644 --- a/scripts/tests/checkpoint-sync-config-devnet.yaml +++ b/scripts/tests/checkpoint-sync-config-devnet.yaml @@ -3,18 +3,18 @@ participants: - cl_type: lighthouse cl_image: lighthouse:local el_type: geth - el_image: ethpandaops/geth:fusaka-devnet-1 + el_image: ethpandaops/geth:fusaka-devnet-2 supernode: true - cl_type: lighthouse cl_image: lighthouse:local el_type: geth - el_image: ethpandaops/geth:fusaka-devnet-1 + el_image: ethpandaops/geth:fusaka-devnet-2 supernode: false checkpoint_sync_enabled: true -checkpoint_sync_url: "https://checkpoint-sync.fusaka-devnet-1.ethpandaops.io" +checkpoint_sync_url: "https://checkpoint-sync.fusaka-devnet-2.ethpandaops.io" global_log_level: debug network_params: - network: fusaka-devnet-1 + network: fusaka-devnet-2 From d90fbd707a88544ca6a04eb78a7cc255537a5bd4 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Tue, 8 Jul 2025 17:37:51 +1000 Subject: [PATCH 12/14] Improve data column KZG verification buckets. --- beacon_node/beacon_chain/src/metrics.rs | 40 ++++++++++++++----------- 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 5ca764821f2..fad0cef9272 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1803,26 +1803,30 @@ pub static KZG_VERIFICATION_BATCH_TIMES: LazyLock> = LazyLock: "Runtime of batched kzg verification", ) }); +/// For reference on how the kzg data column verification buckets were set, here are some numbers for 48 blobs: +/// * 1 column batch: 5.76 ms +/// * 8 columns batch: 34.3 ms +/// * 64 columns batch: 257 ms +/// * 128 columns batch: 508 ms pub static KZG_VERIFICATION_DATA_COLUMN_SINGLE_TIMES: LazyLock> = - LazyLock::new(|| { - try_create_histogram_with_buckets( - "beacon_kzg_verification_data_column_single_seconds", - "Runtime of single data column kzg verification", - Ok(vec![ - 0.0005, 0.001, 0.0015, 0.002, 0.003, 0.004, 0.005, 0.007, 0.01, 0.02, 0.05, - ]), - ) - }); + // 5 exponential buckets between 0.002 and 0.032 seconds, with more granularity on the lower end. + LazyLock::new(|| { + try_create_histogram_with_buckets( + "beacon_kzg_verification_data_column_single_seconds", + "Runtime of single data column kzg verification", + exponential_buckets(0.002, 2.0, 5), + ) + }); pub static KZG_VERIFICATION_DATA_COLUMN_BATCH_TIMES: LazyLock> = - LazyLock::new(|| { - try_create_histogram_with_buckets( - "beacon_kzg_verification_data_column_batch_seconds", - "Runtime of batched data column kzg verification", - Ok(vec![ - 0.002, 0.004, 0.006, 0.008, 0.01, 0.012, 0.015, 0.02, 0.03, 0.05, 0.07, - ]), - ) - }); + // 10 exponential buckets between 0.002 and 1.024 seconds, with more + // granularity on the lower end. + LazyLock::new(|| { + try_create_histogram_with_buckets( + "beacon_kzg_verification_data_column_batch_seconds", + "Runtime of batched data column kzg verification", + exponential_buckets(0.002, 2.0, 10), + ) + }); pub static BLOCK_PRODUCTION_BLOBS_VERIFICATION_TIMES: LazyLock> = LazyLock::new( || { From 7f4acf9efa1e1ec65a69d3ad81361d81b48c976f Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Thu, 10 Jul 2025 17:03:24 +1000 Subject: [PATCH 13/14] Add more buckets to the KZG verification metric. --- beacon_node/beacon_chain/src/metrics.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index fad0cef9272..23d7a1542db 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1809,12 +1809,12 @@ pub static KZG_VERIFICATION_BATCH_TIMES: LazyLock> = LazyLock: /// * 64 columns batch: 257 ms /// * 128 columns batch: 508 ms pub static KZG_VERIFICATION_DATA_COLUMN_SINGLE_TIMES: LazyLock> = - // 5 exponential buckets between 0.002 and 0.032 seconds, with more granularity on the lower end. + // 7 exponential buckets between 0.002 and 0.128 seconds, with more granularity on the lower end. LazyLock::new(|| { try_create_histogram_with_buckets( "beacon_kzg_verification_data_column_single_seconds", "Runtime of single data column kzg verification", - exponential_buckets(0.002, 2.0, 5), + exponential_buckets(0.002, 2.0, 7), ) }); pub static KZG_VERIFICATION_DATA_COLUMN_BATCH_TIMES: LazyLock> = From 20627a32df473a3d5899feb5593af54fb4f77f90 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Fri, 11 Jul 2025 04:17:13 +1000 Subject: [PATCH 14/14] `fusaksa-devnet-2` clean up and fixes (#7722) * Clean up and add more code comments on BPO forks. * Fix tests. * Fix tests. * Fix moar tests. * Fix moar tests. * Ensure the same `ChainSpec` is used throughout the tests. * Refactor `ForkContext` to avoid recomputing fork digests and using cached values. * Fix failing tests due to mismatching chainspec and block slots. * Review comments - adjust code style and add docs. * make sure all forks are covered in tests --------- Co-authored-by: Pawan Dhananjay --- beacon_node/beacon_chain/src/beacon_chain.rs | 9 + beacon_node/http_api/src/light_client.rs | 10 +- beacon_node/lighthouse_network/src/config.rs | 2 +- .../lighthouse_network/src/discovery/enr.rs | 31 +- .../lighthouse_network/src/discovery/mod.rs | 12 +- .../lighthouse_network/src/rpc/codec.rs | 335 ++++++++++-------- .../lighthouse_network/src/rpc/handler.rs | 14 +- .../lighthouse_network/src/rpc/methods.rs | 6 +- .../lighthouse_network/src/rpc/protocol.rs | 16 +- .../src/rpc/rate_limiter.rs | 5 +- .../lighthouse_network/src/service/mod.rs | 16 +- .../lighthouse_network/src/types/pubsub.rs | 169 ++++----- .../lighthouse_network/tests/common.rs | 53 +-- .../lighthouse_network/tests/rpc_tests.rs | 46 ++- beacon_node/network/src/service.rs | 20 +- beacon_node/network/src/service/tests.rs | 9 +- beacon_node/network/src/sync/manager.rs | 2 +- .../network/src/sync/network_context.rs | 2 +- consensus/types/src/beacon_block.rs | 35 +- consensus/types/src/chain_spec.rs | 41 +-- consensus/types/src/enr_fork_id.rs | 4 + consensus/types/src/fork_context.rs | 275 +++++++++++--- consensus/types/src/fork_name.rs | 2 - consensus/types/src/signed_beacon_block.rs | 16 +- lcli/src/generate_bootnode_enr.rs | 7 +- 25 files changed, 703 insertions(+), 434 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index ed8cc37cc42..e0fc03de06d 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -6804,6 +6804,15 @@ impl BeaconChain { .enr_fork_id::(slot, self.genesis_validators_root) } + /// Returns the fork_digest corresponding to an epoch. + /// See [`ChainSpec::compute_fork_digest`] + pub fn compute_fork_digest(&self, epoch: Epoch) -> [u8; 4] { + self.spec + .compute_fork_digest(self.genesis_validators_root, epoch) + } + + /// Calculates the `Duration` to the next fork digest (this could be either a regular or BPO + /// hard fork) if it exists and returns it with its corresponding `Epoch`. pub fn duration_to_next_digest(&self) -> Option<(Epoch, Duration)> { // If we are unable to read the slot clock we assume that it is prior to genesis and // therefore use the genesis slot. diff --git a/beacon_node/http_api/src/light_client.rs b/beacon_node/http_api/src/light_client.rs index a51c4acc719..f9559d738ea 100644 --- a/beacon_node/http_api/src/light_client.rs +++ b/beacon_node/http_api/src/light_client.rs @@ -150,12 +150,10 @@ fn map_light_client_update_to_ssz_chunk( chain: &BeaconChain, light_client_update: &LightClientUpdate, ) -> LightClientUpdateResponseChunk { - let fork_digest = chain.spec.compute_fork_digest( - chain.genesis_validators_root, - light_client_update - .attested_header_slot() - .epoch(T::EthSpec::slots_per_epoch()), - ); + let epoch = light_client_update + .attested_header_slot() + .epoch(T::EthSpec::slots_per_epoch()); + let fork_digest = chain.compute_fork_digest(epoch); let payload = light_client_update.as_ssz_bytes(); let response_chunk_len = fork_digest.len() + payload.len(); diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index bd72a5d51a2..aee53a469c4 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -457,7 +457,7 @@ pub fn gossipsub_config( ) -> Vec { let topic_bytes = message.topic.as_str().as_bytes(); - if fork_context.current_fork().altair_enabled() { + if fork_context.current_fork_name().altair_enabled() { let topic_len_bytes = topic_bytes.len().to_le_bytes(); let mut vec = Vec::with_capacity( prefix.len() + topic_len_bytes.len() + topic_bytes.len() + message.data.len(), diff --git a/beacon_node/lighthouse_network/src/discovery/enr.rs b/beacon_node/lighthouse_network/src/discovery/enr.rs index 017db1fdc73..4c055604979 100644 --- a/beacon_node/lighthouse_network/src/discovery/enr.rs +++ b/beacon_node/lighthouse_network/src/discovery/enr.rs @@ -3,6 +3,7 @@ pub use discv5::enr::CombinedKey; use super::enr_ext::CombinedKeyExt; +use super::enr_ext::{EnrExt, QUIC6_ENR_KEY, QUIC_ENR_KEY}; use super::ENR_FILENAME; use crate::types::{Enr, EnrAttestationBitfield, EnrSyncCommitteeBitfield}; use crate::NetworkConfig; @@ -18,8 +19,6 @@ use std::str::FromStr; use tracing::{debug, warn}; use types::{ChainSpec, EnrForkId, EthSpec}; -use super::enr_ext::{EnrExt, QUIC6_ENR_KEY, QUIC_ENR_KEY}; - /// The ENR field specifying the fork id. pub const ETH2_ENR_KEY: &str = "eth2"; /// The ENR field specifying the next fork digest. @@ -87,8 +86,7 @@ impl Eth2Enr for Enr { } fn next_fork_digest(&self) -> Result<[u8; 4], &'static str> { - self - .get_decodable::<[u8; 4]>(NEXT_FORK_DIGEST_ENR_KEY) + self.get_decodable::<[u8; 4]>(NEXT_FORK_DIGEST_ENR_KEY) .ok_or("ENR next fork digest non-existent")? .map_err(|_| "Could not decode the ENR next fork digest") } @@ -161,14 +159,14 @@ pub fn build_or_load_enr( local_key: Keypair, config: &NetworkConfig, enr_fork_id: &EnrForkId, + next_fork_digest: [u8; 4], spec: &ChainSpec, - nfd: [u8; 4], ) -> Result { // Build the local ENR. // Note: Discovery should update the ENR record's IP to the external IP as seen by the // majority of our peers, if the CLI doesn't expressly forbid it. let enr_key = CombinedKey::from_libp2p(local_key)?; - let mut local_enr = build_enr::(&enr_key, config, enr_fork_id, spec, nfd)?; + let mut local_enr = build_enr::(&enr_key, config, enr_fork_id, next_fork_digest, spec)?; use_or_load_enr(&enr_key, &mut local_enr, config)?; Ok(local_enr) @@ -179,8 +177,8 @@ pub fn build_enr( enr_key: &CombinedKey, config: &NetworkConfig, enr_fork_id: &EnrForkId, + next_fork_digest: [u8; 4], spec: &ChainSpec, - nfd: [u8; 4], ) -> Result { let mut builder = discv5::enr::Enr::builder(); let (maybe_ipv4_address, maybe_ipv6_address) = &config.enr_address; @@ -271,7 +269,7 @@ pub fn build_enr( &bitfield.as_ssz_bytes().into(), ); - // only set `cgc` if PeerDAS fork epoch has been scheduled + // only set `cgc` and `nfd` if PeerDAS fork (Fulu) epoch has been scheduled if spec.is_peer_das_scheduled() { let custody_group_count = if let Some(false_cgc) = config.advertise_false_custody_group_count { @@ -282,11 +280,7 @@ pub fn build_enr( spec.custody_requirement }; builder.add_value(PEERDAS_CUSTODY_GROUP_COUNT_ENR_KEY, &custody_group_count); - } - - // only set `nfd` if peer das is scheduled - if spec.is_peer_das_scheduled() { - builder.add_value(NEXT_FORK_DIGEST_ENR_KEY, &nfd); + builder.add_value(NEXT_FORK_DIGEST_ENR_KEY, &next_fork_digest); } builder @@ -359,6 +353,7 @@ mod test { use types::{Epoch, MainnetEthSpec}; type E = MainnetEthSpec; + const TEST_NFD: [u8; 4] = [0x01, 0x02, 0x03, 0x04]; fn make_fulu_spec() -> ChainSpec { let mut spec = E::default_spec(); @@ -370,11 +365,17 @@ mod test { let keypair = libp2p::identity::secp256k1::Keypair::generate(); let enr_key = CombinedKey::from_secp256k1(&keypair); let enr_fork_id = EnrForkId::default(); - let nfd = [0; 4]; // placeholder - let enr = build_enr::(&enr_key, &config, &enr_fork_id, spec, nfd).unwrap(); + let enr = build_enr::(&enr_key, &config, &enr_fork_id, TEST_NFD, spec).unwrap(); (enr, enr_key) } + #[test] + fn test_nfd_enr_encoding() { + let spec = make_fulu_spec(); + let enr = build_enr_with_config(NetworkConfig::default(), &spec).0; + assert_eq!(enr.next_fork_digest().unwrap(), TEST_NFD); + } + #[test] fn custody_group_count_default() { let config = NetworkConfig { diff --git a/beacon_node/lighthouse_network/src/discovery/mod.rs b/beacon_node/lighthouse_network/src/discovery/mod.rs index f3351719e7f..df866dfc646 100644 --- a/beacon_node/lighthouse_network/src/discovery/mod.rs +++ b/beacon_node/lighthouse_network/src/discovery/mod.rs @@ -1230,9 +1230,15 @@ mod tests { config.set_listening_addr(crate::ListenAddress::unused_v4_ports()); let config = Arc::new(config); let enr_key: CombinedKey = CombinedKey::from_secp256k1(&keypair); - let nfd = [0; 4]; // placeholder - let enr: Enr = - build_enr::(&enr_key, &config, &EnrForkId::default(), &spec, nfd).unwrap(); + let next_fork_digest = [0; 4]; + let enr: Enr = build_enr::( + &enr_key, + &config, + &EnrForkId::default(), + next_fork_digest, + &spec, + ) + .unwrap(); let globals = NetworkGlobals::new( enr, MetaData::V2(MetaDataV2 { diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 553ed9aff97..d01b3b76ca1 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -193,7 +193,7 @@ impl Decoder for SSZSnappyInboundCodec { handle_rpc_request( self.protocol.versioned_protocol, &decoded_buffer, - self.fork_context.current_fork(), + self.fork_context.current_fork_name(), &self.fork_context.spec, ) } @@ -882,7 +882,7 @@ fn context_bytes_to_fork_name( fork_context: Arc, ) -> Result { fork_context - .from_context_bytes(context_bytes) + .get_fork_from_context_bytes(context_bytes) .cloned() .ok_or_else(|| { let encoded = hex::encode(context_bytes); @@ -910,69 +910,88 @@ mod tests { type Spec = types::MainnetEthSpec; - fn fork_context(fork_name: ForkName) -> ForkContext { + fn spec_with_all_forks_enabled() -> ChainSpec { let mut chain_spec = Spec::default_spec(); - let altair_fork_epoch = Epoch::new(1); - let bellatrix_fork_epoch = Epoch::new(2); - let capella_fork_epoch = Epoch::new(3); - let deneb_fork_epoch = Epoch::new(4); - let electra_fork_epoch = Epoch::new(5); - let fulu_fork_epoch = Epoch::new(6); - - chain_spec.altair_fork_epoch = Some(altair_fork_epoch); - chain_spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); - chain_spec.capella_fork_epoch = Some(capella_fork_epoch); - chain_spec.deneb_fork_epoch = Some(deneb_fork_epoch); - chain_spec.electra_fork_epoch = Some(electra_fork_epoch); - chain_spec.fulu_fork_epoch = Some(fulu_fork_epoch); - - let current_slot = match fork_name { - ForkName::Base => Slot::new(0), - ForkName::Altair => altair_fork_epoch.start_slot(Spec::slots_per_epoch()), - ForkName::Bellatrix => bellatrix_fork_epoch.start_slot(Spec::slots_per_epoch()), - ForkName::Capella => capella_fork_epoch.start_slot(Spec::slots_per_epoch()), - ForkName::Deneb => deneb_fork_epoch.start_slot(Spec::slots_per_epoch()), - ForkName::Electra => electra_fork_epoch.start_slot(Spec::slots_per_epoch()), - ForkName::Fulu => fulu_fork_epoch.start_slot(Spec::slots_per_epoch()), + chain_spec.altair_fork_epoch = Some(Epoch::new(1)); + chain_spec.bellatrix_fork_epoch = Some(Epoch::new(2)); + chain_spec.capella_fork_epoch = Some(Epoch::new(3)); + chain_spec.deneb_fork_epoch = Some(Epoch::new(4)); + chain_spec.electra_fork_epoch = Some(Epoch::new(5)); + chain_spec.fulu_fork_epoch = Some(Epoch::new(6)); + + // check that we have all forks covered + assert!(chain_spec.fork_epoch(ForkName::latest()).is_some()); + chain_spec + } + + fn fork_context(fork_name: ForkName, spec: &ChainSpec) -> ForkContext { + let current_epoch = match fork_name { + ForkName::Base => Some(Epoch::new(0)), + ForkName::Altair => spec.altair_fork_epoch, + ForkName::Bellatrix => spec.bellatrix_fork_epoch, + ForkName::Capella => spec.capella_fork_epoch, + ForkName::Deneb => spec.deneb_fork_epoch, + ForkName::Electra => spec.electra_fork_epoch, + ForkName::Fulu => spec.fulu_fork_epoch, }; - ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) + let current_slot = current_epoch.unwrap().start_slot(Spec::slots_per_epoch()); + ForkContext::new::(current_slot, Hash256::zero(), spec) } /// Smallest sized block across all current forks. Useful for testing /// min length check conditions. - fn empty_base_block() -> SignedBeaconBlock { - let empty_block = BeaconBlock::Base(BeaconBlockBase::::empty(&Spec::default_spec())); + fn empty_base_block(spec: &ChainSpec) -> SignedBeaconBlock { + let empty_block = BeaconBlock::Base(BeaconBlockBase::::empty(spec)); SignedBeaconBlock::from_block(empty_block, Signature::empty()) } - fn altair_block() -> SignedBeaconBlock { - let full_block = - BeaconBlock::Altair(BeaconBlockAltair::::full(&Spec::default_spec())); + fn altair_block(spec: &ChainSpec) -> SignedBeaconBlock { + // The context bytes are now derived from the block epoch, so we need to have the slot set + // here. + let full_block = BeaconBlock::Altair(BeaconBlockAltair::::full(spec)); SignedBeaconBlock::from_block(full_block, Signature::empty()) } - fn empty_blob_sidecar() -> Arc> { - Arc::new(BlobSidecar::empty()) + fn empty_blob_sidecar(spec: &ChainSpec) -> Arc> { + // The context bytes are now derived from the block epoch, so we need to have the slot set + // here. + let mut blob_sidecar = BlobSidecar::::empty(); + blob_sidecar.signed_block_header.message.slot = spec + .deneb_fork_epoch + .expect("deneb fork epoch must be set") + .start_slot(Spec::slots_per_epoch()); + Arc::new(blob_sidecar) } - fn empty_data_column_sidecar() -> Arc> { - Arc::new(DataColumnSidecar { + fn empty_data_column_sidecar(spec: &ChainSpec) -> Arc> { + // The context bytes are now derived from the block epoch, so we need to have the slot set + // here. + let data_column_sidecar = DataColumnSidecar { index: 0, column: VariableList::new(vec![Cell::::default()]).unwrap(), kzg_commitments: VariableList::new(vec![KzgCommitment::empty_for_testing()]).unwrap(), kzg_proofs: VariableList::new(vec![KzgProof::empty()]).unwrap(), signed_block_header: SignedBeaconBlockHeader { - message: BeaconBlockHeader::empty(), + message: BeaconBlockHeader { + slot: spec + .fulu_fork_epoch + .expect("fulu fork epoch must be set") + .start_slot(Spec::slots_per_epoch()), + ..BeaconBlockHeader::empty() + }, signature: Signature::empty(), }, kzg_commitments_inclusion_proof: Default::default(), - }) + }; + Arc::new(data_column_sidecar) } /// Bellatrix block with length < max_rpc_size. fn bellatrix_block_small(spec: &ChainSpec) -> SignedBeaconBlock { + // The context bytes are now derived from the block epoch, so we need to have the slot set + // here. let mut block: BeaconBlockBellatrix<_, FullPayload> = - BeaconBlockBellatrix::empty(&Spec::default_spec()); + BeaconBlockBellatrix::empty(spec); let tx = VariableList::from(vec![0; 1024]); let txs = VariableList::from(std::iter::repeat_n(tx, 5000).collect::>()); @@ -988,8 +1007,10 @@ mod tests { /// The max limit for a Bellatrix block is in the order of ~16GiB which wouldn't fit in memory. /// Hence, we generate a Bellatrix block just greater than `MAX_RPC_SIZE` to test rejection on the rpc layer. fn bellatrix_block_large(spec: &ChainSpec) -> SignedBeaconBlock { + // The context bytes are now derived from the block epoch, so we need to have the slot set + // here. let mut block: BeaconBlockBellatrix<_, FullPayload> = - BeaconBlockBellatrix::empty(&Spec::default_spec()); + BeaconBlockBellatrix::empty(spec); let tx = VariableList::from(vec![0; 1024]); let txs = VariableList::from(std::iter::repeat_n(tx, 100000).collect::>()); @@ -1045,7 +1066,7 @@ mod tests { } } - fn dcbroot_request(spec: &ChainSpec, fork_name: ForkName) -> DataColumnsByRootRequest { + fn dcbroot_request(fork_name: ForkName, spec: &ChainSpec) -> DataColumnsByRootRequest { let number_of_columns = spec.number_of_columns as usize; DataColumnsByRootRequest { data_column_ids: RuntimeVariableList::new( @@ -1059,21 +1080,21 @@ mod tests { } } - fn bbroot_request_v1(fork_name: ForkName) -> BlocksByRootRequest { - BlocksByRootRequest::new_v1(vec![Hash256::zero()], &fork_context(fork_name)) + fn bbroot_request_v1(fork_name: ForkName, spec: &ChainSpec) -> BlocksByRootRequest { + BlocksByRootRequest::new_v1(vec![Hash256::zero()], &fork_context(fork_name, spec)) } - fn bbroot_request_v2(fork_name: ForkName) -> BlocksByRootRequest { - BlocksByRootRequest::new(vec![Hash256::zero()], &fork_context(fork_name)) + fn bbroot_request_v2(fork_name: ForkName, spec: &ChainSpec) -> BlocksByRootRequest { + BlocksByRootRequest::new(vec![Hash256::zero()], &fork_context(fork_name, spec)) } - fn blbroot_request(fork_name: ForkName) -> BlobsByRootRequest { + fn blbroot_request(fork_name: ForkName, spec: &ChainSpec) -> BlobsByRootRequest { BlobsByRootRequest::new( vec![BlobIdentifier { block_root: Hash256::zero(), index: 0, }], - &fork_context(fork_name), + &fork_context(fork_name, spec), ) } @@ -1116,7 +1137,7 @@ mod tests { spec: &ChainSpec, ) -> Result { let snappy_protocol_id = ProtocolId::new(protocol, Encoding::SSZSnappy); - let fork_context = Arc::new(fork_context(fork_name)); + let fork_context = Arc::new(fork_context(fork_name, spec)); let max_packet_size = spec.max_payload_size as usize; let mut buf = BytesMut::new(); @@ -1130,12 +1151,13 @@ mod tests { fn encode_without_length_checks( bytes: Vec, fork_name: ForkName, + spec: &ChainSpec, ) -> Result { - let fork_context = fork_context(fork_name); + let fork_context = fork_context(fork_name, spec); let mut dst = BytesMut::new(); // Add context bytes if required - dst.extend_from_slice(&fork_context.to_context_bytes(fork_name).unwrap()); + dst.extend_from_slice(&fork_context.context_bytes(fork_context.current_fork_epoch())); let mut uvi_codec: Uvi = Uvi::default(); @@ -1163,7 +1185,7 @@ mod tests { spec: &ChainSpec, ) -> Result>, RPCError> { let snappy_protocol_id = ProtocolId::new(protocol, Encoding::SSZSnappy); - let fork_context = Arc::new(fork_context(fork_name)); + let fork_context = Arc::new(fork_context(fork_name, spec)); let max_packet_size = spec.max_payload_size as usize; let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new(snappy_protocol_id, max_packet_size, fork_context); @@ -1184,7 +1206,7 @@ mod tests { /// Verifies that requests we send are encoded in a way that we would correctly decode too. fn encode_then_decode_request(req: RequestType, fork_name: ForkName, spec: &ChainSpec) { - let fork_context = Arc::new(fork_context(fork_name)); + let fork_context = Arc::new(fork_context(fork_name, spec)); let max_packet_size = spec.max_payload_size as usize; let protocol = ProtocolId::new(req.versioned_protocol(), Encoding::SSZSnappy); // Encode a request we send @@ -1255,7 +1277,7 @@ mod tests { // Test RPCResponse encoding/decoding for V1 messages #[test] fn test_encode_then_decode_v1() { - let chain_spec = Spec::default_spec(); + let chain_spec = spec_with_all_forks_enabled(); assert_eq!( encode_then_decode_response( @@ -1292,13 +1314,13 @@ mod tests { encode_then_decode_response( SupportedProtocol::BlocksByRangeV1, RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( - empty_base_block() + empty_base_block(&chain_spec) ))), ForkName::Base, &chain_spec, ), Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( - empty_base_block() + empty_base_block(&chain_spec) )))) ); @@ -1307,7 +1329,7 @@ mod tests { encode_then_decode_response( SupportedProtocol::BlocksByRangeV1, RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( - altair_block() + altair_block(&chain_spec) ))), ForkName::Altair, &chain_spec, @@ -1322,13 +1344,13 @@ mod tests { encode_then_decode_response( SupportedProtocol::BlocksByRootV1, RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( - empty_base_block() + empty_base_block(&chain_spec) ))), ForkName::Base, &chain_spec, ), Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( - empty_base_block() + empty_base_block(&chain_spec) )))) ); @@ -1336,9 +1358,9 @@ mod tests { matches!( encode_then_decode_response( SupportedProtocol::BlocksByRootV1, - RpcResponse::Success(RpcSuccessResponse::BlocksByRoot( - Arc::new(altair_block()) - )), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new(altair_block( + &chain_spec + )))), ForkName::Altair, &chain_spec, ) @@ -1383,74 +1405,98 @@ mod tests { assert_eq!( encode_then_decode_response( SupportedProtocol::BlobsByRangeV1, - RpcResponse::Success(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar())), + RpcResponse::Success(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar( + &chain_spec + ))), ForkName::Deneb, &chain_spec ), - Ok(Some(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar()))), + Ok(Some(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar( + &chain_spec + )))), ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlobsByRangeV1, - RpcResponse::Success(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar())), + RpcResponse::Success(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar( + &chain_spec + ))), ForkName::Electra, &chain_spec ), - Ok(Some(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar()))), + Ok(Some(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar( + &chain_spec + )))), ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlobsByRangeV1, - RpcResponse::Success(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar())), + RpcResponse::Success(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar( + &chain_spec + ))), ForkName::Fulu, &chain_spec ), - Ok(Some(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar()))), + Ok(Some(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar( + &chain_spec + )))), ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlobsByRootV1, - RpcResponse::Success(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar())), + RpcResponse::Success(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar( + &chain_spec + ))), ForkName::Deneb, &chain_spec ), - Ok(Some(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar()))), + Ok(Some(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar( + &chain_spec + )))), ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlobsByRootV1, - RpcResponse::Success(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar())), + RpcResponse::Success(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar( + &chain_spec + ))), ForkName::Electra, &chain_spec ), - Ok(Some(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar()))), + Ok(Some(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar( + &chain_spec + )))), ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlobsByRootV1, - RpcResponse::Success(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar())), + RpcResponse::Success(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar( + &chain_spec + ))), ForkName::Fulu, &chain_spec ), - Ok(Some(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar()))), + Ok(Some(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar( + &chain_spec + )))), ); assert_eq!( encode_then_decode_response( SupportedProtocol::DataColumnsByRangeV1, RpcResponse::Success(RpcSuccessResponse::DataColumnsByRange( - empty_data_column_sidecar() + empty_data_column_sidecar(&chain_spec) )), ForkName::Deneb, &chain_spec ), Ok(Some(RpcSuccessResponse::DataColumnsByRange( - empty_data_column_sidecar() + empty_data_column_sidecar(&chain_spec) ))), ); @@ -1458,13 +1504,13 @@ mod tests { encode_then_decode_response( SupportedProtocol::DataColumnsByRangeV1, RpcResponse::Success(RpcSuccessResponse::DataColumnsByRange( - empty_data_column_sidecar() + empty_data_column_sidecar(&chain_spec) )), ForkName::Electra, &chain_spec ), Ok(Some(RpcSuccessResponse::DataColumnsByRange( - empty_data_column_sidecar() + empty_data_column_sidecar(&chain_spec) ))), ); @@ -1472,13 +1518,13 @@ mod tests { encode_then_decode_response( SupportedProtocol::DataColumnsByRangeV1, RpcResponse::Success(RpcSuccessResponse::DataColumnsByRange( - empty_data_column_sidecar() + empty_data_column_sidecar(&chain_spec) )), ForkName::Fulu, &chain_spec ), Ok(Some(RpcSuccessResponse::DataColumnsByRange( - empty_data_column_sidecar() + empty_data_column_sidecar(&chain_spec) ))), ); @@ -1486,13 +1532,13 @@ mod tests { encode_then_decode_response( SupportedProtocol::DataColumnsByRootV1, RpcResponse::Success(RpcSuccessResponse::DataColumnsByRoot( - empty_data_column_sidecar() + empty_data_column_sidecar(&chain_spec) )), ForkName::Deneb, &chain_spec ), Ok(Some(RpcSuccessResponse::DataColumnsByRoot( - empty_data_column_sidecar() + empty_data_column_sidecar(&chain_spec) ))), ); @@ -1500,13 +1546,13 @@ mod tests { encode_then_decode_response( SupportedProtocol::DataColumnsByRootV1, RpcResponse::Success(RpcSuccessResponse::DataColumnsByRoot( - empty_data_column_sidecar() + empty_data_column_sidecar(&chain_spec) )), ForkName::Electra, &chain_spec ), Ok(Some(RpcSuccessResponse::DataColumnsByRoot( - empty_data_column_sidecar() + empty_data_column_sidecar(&chain_spec) ))), ); @@ -1514,13 +1560,13 @@ mod tests { encode_then_decode_response( SupportedProtocol::DataColumnsByRootV1, RpcResponse::Success(RpcSuccessResponse::DataColumnsByRoot( - empty_data_column_sidecar() + empty_data_column_sidecar(&chain_spec) )), ForkName::Fulu, &chain_spec ), Ok(Some(RpcSuccessResponse::DataColumnsByRoot( - empty_data_column_sidecar() + empty_data_column_sidecar(&chain_spec) ))), ); } @@ -1528,19 +1574,19 @@ mod tests { // Test RPCResponse encoding/decoding for V1 messages #[test] fn test_encode_then_decode_v2() { - let chain_spec = Spec::default_spec(); + let chain_spec = spec_with_all_forks_enabled(); assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRangeV2, RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( - empty_base_block() + empty_base_block(&chain_spec) ))), ForkName::Base, &chain_spec, ), Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( - empty_base_block() + empty_base_block(&chain_spec) )))) ); @@ -1551,25 +1597,27 @@ mod tests { encode_then_decode_response( SupportedProtocol::BlocksByRangeV2, RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( - empty_base_block() + empty_base_block(&chain_spec) ))), ForkName::Altair, &chain_spec, ), Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( - empty_base_block() + empty_base_block(&chain_spec) )))) ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRangeV2, - RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new(altair_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new(altair_block( + &chain_spec + )))), ForkName::Altair, &chain_spec, ), Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( - altair_block() + altair_block(&chain_spec) )))) ); @@ -1590,9 +1638,12 @@ mod tests { )))) ); - let mut encoded = - encode_without_length_checks(bellatrix_block_large.as_ssz_bytes(), ForkName::Bellatrix) - .unwrap(); + let mut encoded = encode_without_length_checks( + bellatrix_block_large.as_ssz_bytes(), + ForkName::Bellatrix, + &chain_spec, + ) + .unwrap(); assert!( matches!( @@ -1612,13 +1663,13 @@ mod tests { encode_then_decode_response( SupportedProtocol::BlocksByRootV2, RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( - empty_base_block() + empty_base_block(&chain_spec) ))), ForkName::Base, &chain_spec, ), Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( - empty_base_block() + empty_base_block(&chain_spec) )))), ); @@ -1629,25 +1680,27 @@ mod tests { encode_then_decode_response( SupportedProtocol::BlocksByRootV2, RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( - empty_base_block() + empty_base_block(&chain_spec) ))), ForkName::Altair, &chain_spec, ), Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( - empty_base_block() + empty_base_block(&chain_spec) )))) ); assert_eq!( encode_then_decode_response( SupportedProtocol::BlocksByRootV2, - RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new(altair_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new(altair_block( + &chain_spec + )))), ForkName::Altair, &chain_spec, ), Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( - altair_block() + altair_block(&chain_spec) )))) ); @@ -1665,9 +1718,12 @@ mod tests { )))) ); - let mut encoded = - encode_without_length_checks(bellatrix_block_large.as_ssz_bytes(), ForkName::Bellatrix) - .unwrap(); + let mut encoded = encode_without_length_checks( + bellatrix_block_large.as_ssz_bytes(), + ForkName::Bellatrix, + &chain_spec, + ) + .unwrap(); assert!( matches!( @@ -1729,15 +1785,14 @@ mod tests { // Test RPCResponse encoding/decoding for V2 messages #[test] fn test_context_bytes_v2() { - let fork_context = fork_context(ForkName::Altair); - - let chain_spec = Spec::default_spec(); + let chain_spec = spec_with_all_forks_enabled(); + let fork_context = fork_context(ForkName::Altair, &chain_spec); // Removing context bytes for v2 messages should error let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRangeV2, RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( - empty_base_block(), + empty_base_block(&chain_spec), ))), ForkName::Base, &chain_spec, @@ -1760,7 +1815,7 @@ mod tests { let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRootV2, RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( - empty_base_block(), + empty_base_block(&chain_spec), ))), ForkName::Base, &chain_spec, @@ -1784,7 +1839,7 @@ mod tests { let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRangeV2, RpcResponse::Success(RpcSuccessResponse::BlocksByRange(Arc::new( - empty_base_block(), + empty_base_block(&chain_spec), ))), ForkName::Altair, &chain_spec, @@ -1792,8 +1847,8 @@ mod tests { .unwrap(); let mut wrong_fork_bytes = BytesMut::new(); - wrong_fork_bytes - .extend_from_slice(&fork_context.to_context_bytes(ForkName::Altair).unwrap()); + let altair_epoch = chain_spec.altair_fork_epoch.unwrap(); + wrong_fork_bytes.extend_from_slice(&fork_context.context_bytes(altair_epoch)); wrong_fork_bytes.extend_from_slice(&encoded_bytes.split_off(4)); assert!(matches!( @@ -1810,14 +1865,18 @@ mod tests { // Trying to decode an altair block with base context bytes should give ssz decoding error let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRootV2, - RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new(altair_block()))), + RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new(altair_block( + &chain_spec, + )))), ForkName::Altair, &chain_spec, ) .unwrap(); let mut wrong_fork_bytes = BytesMut::new(); - wrong_fork_bytes.extend_from_slice(&fork_context.to_context_bytes(ForkName::Base).unwrap()); + wrong_fork_bytes.extend_from_slice( + &fork_context.context_bytes(chain_spec.genesis_slot.epoch(Spec::slots_per_epoch())), + ); wrong_fork_bytes.extend_from_slice(&encoded_bytes.split_off(4)); assert!(matches!( @@ -1833,7 +1892,7 @@ mod tests { // Adding context bytes to Protocols that don't require it should return an error let mut encoded_bytes = BytesMut::new(); - encoded_bytes.extend_from_slice(&fork_context.to_context_bytes(ForkName::Altair).unwrap()); + encoded_bytes.extend_from_slice(&fork_context.context_bytes(altair_epoch)); encoded_bytes.extend_from_slice( &encode_response( SupportedProtocol::MetaDataV2, @@ -1856,7 +1915,7 @@ mod tests { let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRootV2, RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( - empty_base_block(), + empty_base_block(&chain_spec), ))), ForkName::Altair, &chain_spec, @@ -1882,7 +1941,7 @@ mod tests { let mut encoded_bytes = encode_response( SupportedProtocol::BlocksByRootV2, RpcResponse::Success(RpcSuccessResponse::BlocksByRoot(Arc::new( - empty_base_block(), + empty_base_block(&chain_spec), ))), ForkName::Altair, &chain_spec, @@ -1904,8 +1963,7 @@ mod tests { #[test] fn test_encode_then_decode_request() { - let fork_context = fork_context(ForkName::Electra); - let chain_spec = fork_context.spec.clone(); + let chain_spec = spec_with_all_forks_enabled(); let requests: &[RequestType] = &[ RequestType::Ping(ping_message()), @@ -1929,10 +1987,10 @@ mod tests { // Handled separately to have consistent `ForkName` across request and responses let fork_dependent_requests = |fork_name| { [ - RequestType::BlobsByRoot(blbroot_request(fork_name)), - RequestType::BlocksByRoot(bbroot_request_v1(fork_name)), - RequestType::BlocksByRoot(bbroot_request_v2(fork_name)), - RequestType::DataColumnsByRoot(dcbroot_request(&chain_spec, fork_name)), + RequestType::BlobsByRoot(blbroot_request(fork_name, &chain_spec)), + RequestType::BlocksByRoot(bbroot_request_v1(fork_name, &chain_spec)), + RequestType::BlocksByRoot(bbroot_request_v2(fork_name, &chain_spec)), + RequestType::DataColumnsByRoot(dcbroot_request(fork_name, &chain_spec)), ] }; for fork_name in ForkName::list_all() { @@ -1992,7 +2050,7 @@ mod tests { assert_eq!(writer.get_ref().len(), 42); dst.extend_from_slice(writer.get_ref()); - let chain_spec = Spec::default_spec(); + let chain_spec = spec_with_all_forks_enabled(); // 10 (for stream identifier) + 80 + 42 = 132 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`. assert!(matches!( decode_response( @@ -2010,7 +2068,8 @@ mod tests { /// sends a valid message filled with a stream of useless padding before the actual message. #[test] fn test_decode_malicious_v2_message() { - let fork_context = Arc::new(fork_context(ForkName::Altair)); + let chain_spec = spec_with_all_forks_enabled(); + let fork_context = Arc::new(fork_context(ForkName::Altair, &chain_spec)); // 10 byte snappy stream identifier let stream_identifier: &'static [u8] = b"\xFF\x06\x00\x00sNaPpY"; @@ -2022,7 +2081,7 @@ mod tests { let malicious_padding: &'static [u8] = b"\xFE\x00\x00\x00"; // Full altair block is 157916 bytes uncompressed. `max_compressed_len` is 32 + 157916 + 157916/6 = 184267. - let block_message_bytes = altair_block().as_ssz_bytes(); + let block_message_bytes = altair_block(&fork_context.spec).as_ssz_bytes(); assert_eq!(block_message_bytes.len(), 157916); assert_eq!( @@ -2034,7 +2093,8 @@ mod tests { let mut dst = BytesMut::with_capacity(1024); // Insert context bytes - dst.extend_from_slice(&fork_context.to_context_bytes(ForkName::Altair).unwrap()); + let altair_epoch = fork_context.spec.altair_fork_epoch.unwrap(); + dst.extend_from_slice(&fork_context.context_bytes(altair_epoch)); // Insert length-prefix uvi_codec @@ -2049,14 +2109,14 @@ mod tests { dst.extend_from_slice(malicious_padding); } - // Insert payload (8103 bytes compressed) + // Insert payload (8102 bytes compressed) let mut writer = FrameEncoder::new(Vec::new()); writer.write_all(&block_message_bytes).unwrap(); writer.flush().unwrap(); - assert_eq!(writer.get_ref().len(), 8103); + assert_eq!(writer.get_ref().len(), 8102); dst.extend_from_slice(writer.get_ref()); - let chain_spec = Spec::default_spec(); + let chain_spec = spec_with_all_forks_enabled(); // 10 (for stream identifier) + 176156 + 8103 = 184269 > `max_compressed_len`. Hence, decoding should fail with `InvalidData`. assert!(matches!( @@ -2092,7 +2152,7 @@ mod tests { let mut uvi_codec: Uvi = Uvi::default(); let mut dst = BytesMut::with_capacity(1024); - let chain_spec = Spec::default_spec(); + let chain_spec = spec_with_all_forks_enabled(); // Insert length-prefix uvi_codec @@ -2128,9 +2188,8 @@ mod tests { let snappy_protocol_id = ProtocolId::new(SupportedProtocol::StatusV1, Encoding::SSZSnappy); - let fork_context = Arc::new(fork_context(ForkName::Base)); - - let chain_spec = Spec::default_spec(); + let chain_spec = spec_with_all_forks_enabled(); + let fork_context = Arc::new(fork_context(ForkName::Base, &chain_spec)); let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new( snappy_protocol_id, @@ -2164,9 +2223,8 @@ mod tests { let snappy_protocol_id = ProtocolId::new(SupportedProtocol::StatusV1, Encoding::SSZSnappy); - let fork_context = Arc::new(fork_context(ForkName::Base)); - - let chain_spec = Spec::default_spec(); + let chain_spec = spec_with_all_forks_enabled(); + let fork_context = Arc::new(fork_context(ForkName::Base, &chain_spec)); let mut snappy_outbound_codec = SSZSnappyOutboundCodec::::new( snappy_protocol_id, @@ -2195,9 +2253,8 @@ mod tests { let protocol_id = ProtocolId::new(SupportedProtocol::BlocksByRangeV1, Encoding::SSZSnappy); // Response limits - let fork_context = Arc::new(fork_context(ForkName::Base)); - - let chain_spec = Spec::default_spec(); + let chain_spec = spec_with_all_forks_enabled(); + let fork_context = Arc::new(fork_context(ForkName::Base, &chain_spec)); let max_rpc_size = chain_spec.max_payload_size as usize; let limit = protocol_id.rpc_response_limits::(&fork_context); diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 823416b8e8d..fe7be936622 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -912,7 +912,7 @@ where } let (req, substream) = substream; - let current_fork = self.fork_context.current_fork(); + let current_fork = self.fork_context.current_fork_name(); let spec = &self.fork_context.spec; match &req { @@ -950,8 +950,10 @@ where _ => {} }; - let max_responses = - req.max_responses(self.fork_context.digest_epoch(), &self.fork_context.spec); + let max_responses = req.max_responses( + self.fork_context.current_fork_epoch(), + &self.fork_context.spec, + ); // store requests that expect responses if max_responses > 0 { @@ -1021,8 +1023,10 @@ where } // add the stream to substreams if we expect a response, otherwise drop the stream. - let max_responses = - request.max_responses(self.fork_context.digest_epoch(), &self.fork_context.spec); + let max_responses = request.max_responses( + self.fork_context.current_fork_epoch(), + &self.fork_context.spec, + ); if max_responses > 0 { let max_remaining_chunks = if request.expect_exactly_one_response() { // Currently enforced only for multiple responses diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index 8e065ba6e53..53005448211 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -484,7 +484,7 @@ impl BlocksByRootRequest { pub fn new(block_roots: Vec, fork_context: &ForkContext) -> Self { let max_request_blocks = fork_context .spec - .max_request_blocks(fork_context.current_fork()); + .max_request_blocks(fork_context.current_fork_name()); let block_roots = RuntimeVariableList::from_vec(block_roots, max_request_blocks); Self::V2(BlocksByRootRequestV2 { block_roots }) } @@ -492,7 +492,7 @@ impl BlocksByRootRequest { pub fn new_v1(block_roots: Vec, fork_context: &ForkContext) -> Self { let max_request_blocks = fork_context .spec - .max_request_blocks(fork_context.current_fork()); + .max_request_blocks(fork_context.current_fork_name()); let block_roots = RuntimeVariableList::from_vec(block_roots, max_request_blocks); Self::V1(BlocksByRootRequestV1 { block_roots }) } @@ -509,7 +509,7 @@ impl BlobsByRootRequest { pub fn new(blob_ids: Vec, fork_context: &ForkContext) -> Self { let max_request_blob_sidecars = fork_context .spec - .max_request_blob_sidecars(fork_context.current_fork()); + .max_request_blob_sidecars(fork_context.current_fork_name()); let blob_ids = RuntimeVariableList::from_vec(blob_ids, max_request_blob_sidecars); Self { blob_ids } } diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 9b502384108..500e98d5c33 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -545,15 +545,15 @@ impl ProtocolId { ::ssz_fixed_len(), ), Protocol::Goodbye => RpcLimits::new(0, 0), // Goodbye request has no response - Protocol::BlocksByRange => rpc_block_limits_by_fork(fork_context.current_fork()), - Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()), + Protocol::BlocksByRange => rpc_block_limits_by_fork(fork_context.current_fork_name()), + Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork_name()), Protocol::BlobsByRange => rpc_blob_limits::(), Protocol::BlobsByRoot => rpc_blob_limits::(), Protocol::DataColumnsByRoot => { - rpc_data_column_limits::(fork_context.digest_epoch(), &fork_context.spec) + rpc_data_column_limits::(fork_context.current_fork_epoch(), &fork_context.spec) } Protocol::DataColumnsByRange => { - rpc_data_column_limits::(fork_context.digest_epoch(), &fork_context.spec) + rpc_data_column_limits::(fork_context.current_fork_epoch(), &fork_context.spec) } Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), @@ -564,16 +564,16 @@ impl ProtocolId { as Encode>::ssz_fixed_len(), ), Protocol::LightClientBootstrap => { - rpc_light_client_bootstrap_limits_by_fork(fork_context.current_fork()) + rpc_light_client_bootstrap_limits_by_fork(fork_context.current_fork_name()) } Protocol::LightClientOptimisticUpdate => { - rpc_light_client_optimistic_update_limits_by_fork(fork_context.current_fork()) + rpc_light_client_optimistic_update_limits_by_fork(fork_context.current_fork_name()) } Protocol::LightClientFinalityUpdate => { - rpc_light_client_finality_update_limits_by_fork(fork_context.current_fork()) + rpc_light_client_finality_update_limits_by_fork(fork_context.current_fork_name()) } Protocol::LightClientUpdatesByRange => { - rpc_light_client_updates_by_range_limits_by_fork(fork_context.current_fork()) + rpc_light_client_updates_by_range_limits_by_fork(fork_context.current_fork_name()) } } } diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index c190b4cbf55..f8fd54eb2a9 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -353,7 +353,10 @@ impl RPCRateLimiter { ) -> Result<(), RateLimitedErr> { let time_since_start = self.init_time.elapsed(); let tokens = request - .max_responses(self.fork_context.digest_epoch(), &self.fork_context.spec) + .max_responses( + self.fork_context.current_fork_epoch(), + &self.fork_context.spec, + ) .max(1); let check = diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index 033bc1346b6..06aebeb4aa7 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -194,12 +194,16 @@ impl Network { // set up a collection of variables accessible outside of the network crate // Create an ENR or load from disk if appropriate + let next_fork_digest = ctx + .fork_context + .next_fork_digest() + .unwrap_or_else(|| ctx.fork_context.current_fork_digest()); let enr = crate::discovery::enr::build_or_load_enr::( local_keypair.clone(), &config, &ctx.enr_fork_id, + next_fork_digest, &ctx.chain_spec, - ctx.fork_context.next_fork_digest(), )?; // Construct the metadata @@ -282,7 +286,7 @@ impl Network { // Set up a scoring update interval let update_gossipsub_scores = tokio::time::interval(params.decay_interval); - let current_digest_epoch = ctx.fork_context.digest_epoch(); + let current_digest_epoch = ctx.fork_context.current_fork_epoch(); let current_and_future_digests = ctx.chain_spec .all_digest_epochs() @@ -1361,15 +1365,9 @@ impl Network { self.enr_fork_id = enr_fork_id; } - #[instrument(parent = None, - level = "trace", - fields(service = "libp2p"), - name = "libp2p", - skip_all - )] pub fn update_nfd(&mut self, nfd: [u8; 4]) { if let Err(e) = self.discovery_mut().update_enr_nfd(nfd) { - warn!(error = %e, "Could not update nfd in ENR"); + crit!(error = e, "Could not update nfd in ENR"); } } diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 21df75a648c..601c59a9c84 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -171,28 +171,29 @@ impl PubsubMessage { // the ssz decoders match gossip_topic.kind() { GossipKind::BeaconAggregateAndProof => { - let signed_aggregate_and_proof = - match fork_context.from_context_bytes(gossip_topic.fork_digest) { - Some(&fork_name) => { - if fork_name.electra_enabled() { - SignedAggregateAndProof::Electra( - SignedAggregateAndProofElectra::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ) - } else { - SignedAggregateAndProof::Base( - SignedAggregateAndProofBase::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ) - } - } - None => { - return Err(format!( - "Unknown gossipsub fork digest: {:?}", - gossip_topic.fork_digest - )) + let signed_aggregate_and_proof = match fork_context + .get_fork_from_context_bytes(gossip_topic.fork_digest) + { + Some(&fork_name) => { + if fork_name.electra_enabled() { + SignedAggregateAndProof::Electra( + SignedAggregateAndProofElectra::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ) + } else { + SignedAggregateAndProof::Base( + SignedAggregateAndProofBase::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ) } - }; + } + None => { + return Err(format!( + "Unknown gossipsub fork digest: {:?}", + gossip_topic.fork_digest + )) + } + }; Ok(PubsubMessage::AggregateAndProofAttestation(Box::new( signed_aggregate_and_proof, ))) @@ -206,48 +207,49 @@ impl PubsubMessage { )))) } GossipKind::BeaconBlock => { - let beacon_block = - match fork_context.from_context_bytes(gossip_topic.fork_digest) { - Some(ForkName::Base) => SignedBeaconBlock::::Base( - SignedBeaconBlockBase::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ), - Some(ForkName::Altair) => SignedBeaconBlock::::Altair( - SignedBeaconBlockAltair::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ), - Some(ForkName::Bellatrix) => SignedBeaconBlock::::Bellatrix( - SignedBeaconBlockBellatrix::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ), - Some(ForkName::Capella) => SignedBeaconBlock::::Capella( - SignedBeaconBlockCapella::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ), - Some(ForkName::Deneb) => SignedBeaconBlock::::Deneb( - SignedBeaconBlockDeneb::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ), - Some(ForkName::Electra) => SignedBeaconBlock::::Electra( - SignedBeaconBlockElectra::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ), - Some(ForkName::Fulu) => SignedBeaconBlock::::Fulu( - SignedBeaconBlockFulu::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ), - None => { - return Err(format!( - "Unknown gossipsub fork digest: {:?}", - gossip_topic.fork_digest - )) - } - }; + let beacon_block = match fork_context + .get_fork_from_context_bytes(gossip_topic.fork_digest) + { + Some(ForkName::Base) => SignedBeaconBlock::::Base( + SignedBeaconBlockBase::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), + Some(ForkName::Altair) => SignedBeaconBlock::::Altair( + SignedBeaconBlockAltair::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), + Some(ForkName::Bellatrix) => SignedBeaconBlock::::Bellatrix( + SignedBeaconBlockBellatrix::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), + Some(ForkName::Capella) => SignedBeaconBlock::::Capella( + SignedBeaconBlockCapella::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), + Some(ForkName::Deneb) => SignedBeaconBlock::::Deneb( + SignedBeaconBlockDeneb::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), + Some(ForkName::Electra) => SignedBeaconBlock::::Electra( + SignedBeaconBlockElectra::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), + Some(ForkName::Fulu) => SignedBeaconBlock::::Fulu( + SignedBeaconBlockFulu::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), + None => { + return Err(format!( + "Unknown gossipsub fork digest: {:?}", + gossip_topic.fork_digest + )) + } + }; Ok(PubsubMessage::BeaconBlock(Arc::new(beacon_block))) } GossipKind::BlobSidecar(blob_index) => { if let Some(fork_name) = - fork_context.from_context_bytes(gossip_topic.fork_digest) + fork_context.get_fork_from_context_bytes(gossip_topic.fork_digest) { if fork_name.deneb_enabled() { let blob_sidecar = Arc::new( @@ -267,7 +269,7 @@ impl PubsubMessage { )) } GossipKind::DataColumnSidecar(subnet_id) => { - match fork_context.from_context_bytes(gossip_topic.fork_digest) { + match fork_context.get_fork_from_context_bytes(gossip_topic.fork_digest) { Some(fork) if fork.fulu_enabled() => { let col_sidecar = Arc::new( DataColumnSidecar::from_ssz_bytes(data) @@ -295,28 +297,29 @@ impl PubsubMessage { Ok(PubsubMessage::ProposerSlashing(Box::new(proposer_slashing))) } GossipKind::AttesterSlashing => { - let attester_slashing = - match fork_context.from_context_bytes(gossip_topic.fork_digest) { - Some(&fork_name) => { - if fork_name.electra_enabled() { - AttesterSlashing::Electra( - AttesterSlashingElectra::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ) - } else { - AttesterSlashing::Base( - AttesterSlashingBase::from_ssz_bytes(data) - .map_err(|e| format!("{:?}", e))?, - ) - } - } - None => { - return Err(format!( - "Unknown gossipsub fork digest: {:?}", - gossip_topic.fork_digest - )) + let attester_slashing = match fork_context + .get_fork_from_context_bytes(gossip_topic.fork_digest) + { + Some(&fork_name) => { + if fork_name.electra_enabled() { + AttesterSlashing::Electra( + AttesterSlashingElectra::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ) + } else { + AttesterSlashing::Base( + AttesterSlashingBase::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ) } - }; + } + None => { + return Err(format!( + "Unknown gossipsub fork digest: {:?}", + gossip_topic.fork_digest + )) + } + }; Ok(PubsubMessage::AttesterSlashing(Box::new(attester_slashing))) } GossipKind::SignedContributionAndProof => { @@ -343,7 +346,7 @@ impl PubsubMessage { ))) } GossipKind::LightClientFinalityUpdate => { - let light_client_finality_update = match fork_context.from_context_bytes(gossip_topic.fork_digest) { + let light_client_finality_update = match fork_context.get_fork_from_context_bytes(gossip_topic.fork_digest) { Some(&fork_name) => { LightClientFinalityUpdate::from_ssz_bytes(data, fork_name) .map_err(|e| format!("{:?}", e))? @@ -358,7 +361,7 @@ impl PubsubMessage { ))) } GossipKind::LightClientOptimisticUpdate => { - let light_client_optimistic_update = match fork_context.from_context_bytes(gossip_topic.fork_digest) { + let light_client_optimistic_update = match fork_context.get_fork_from_context_bytes(gossip_topic.fork_digest) { Some(&fork_name) => { LightClientOptimisticUpdate::from_ssz_bytes(data, fork_name) .map_err(|e| format!("{:?}", e))? diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index 0dac126909c..61f48a9a6fd 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -11,7 +11,7 @@ use tracing::{debug, error, info_span, Instrument}; use tracing_subscriber::EnvFilter; use types::{ ChainSpec, EnrForkId, Epoch, EthSpec, FixedBytesExtended, ForkContext, ForkName, Hash256, - MinimalEthSpec, Slot, + MinimalEthSpec, }; type E = MinimalEthSpec; @@ -19,33 +19,36 @@ type E = MinimalEthSpec; use lighthouse_network::rpc::config::InboundRateLimiterConfig; use tempfile::Builder as TempBuilder; -/// Returns a dummy fork context -pub fn fork_context(fork_name: ForkName) -> ForkContext { +/// Returns a chain spec with all forks enabled. +pub fn spec_with_all_forks_enabled() -> ChainSpec { let mut chain_spec = E::default_spec(); - let altair_fork_epoch = Epoch::new(1); - let bellatrix_fork_epoch = Epoch::new(2); - let capella_fork_epoch = Epoch::new(3); - let deneb_fork_epoch = Epoch::new(4); - let electra_fork_epoch = Epoch::new(5); - let fulu_fork_epoch = Epoch::new(6); + chain_spec.altair_fork_epoch = Some(Epoch::new(1)); + chain_spec.bellatrix_fork_epoch = Some(Epoch::new(2)); + chain_spec.capella_fork_epoch = Some(Epoch::new(3)); + chain_spec.deneb_fork_epoch = Some(Epoch::new(4)); + chain_spec.electra_fork_epoch = Some(Epoch::new(5)); + chain_spec.fulu_fork_epoch = Some(Epoch::new(6)); - chain_spec.altair_fork_epoch = Some(altair_fork_epoch); - chain_spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); - chain_spec.capella_fork_epoch = Some(capella_fork_epoch); - chain_spec.deneb_fork_epoch = Some(deneb_fork_epoch); - chain_spec.electra_fork_epoch = Some(electra_fork_epoch); - chain_spec.fulu_fork_epoch = Some(fulu_fork_epoch); + // check that we have all forks covered + assert!(chain_spec.fork_epoch(ForkName::latest()).is_some()); + chain_spec +} - let current_slot = match fork_name { - ForkName::Base => Slot::new(0), - ForkName::Altair => altair_fork_epoch.start_slot(E::slots_per_epoch()), - ForkName::Bellatrix => bellatrix_fork_epoch.start_slot(E::slots_per_epoch()), - ForkName::Capella => capella_fork_epoch.start_slot(E::slots_per_epoch()), - ForkName::Deneb => deneb_fork_epoch.start_slot(E::slots_per_epoch()), - ForkName::Electra => electra_fork_epoch.start_slot(E::slots_per_epoch()), - ForkName::Fulu => fulu_fork_epoch.start_slot(E::slots_per_epoch()), +/// Returns a dummy fork context +pub fn fork_context(fork_name: ForkName, spec: &ChainSpec) -> ForkContext { + let current_epoch = match fork_name { + ForkName::Base => Some(Epoch::new(0)), + ForkName::Altair => spec.altair_fork_epoch, + ForkName::Bellatrix => spec.bellatrix_fork_epoch, + ForkName::Capella => spec.capella_fork_epoch, + ForkName::Deneb => spec.deneb_fork_epoch, + ForkName::Electra => spec.electra_fork_epoch, + ForkName::Fulu => spec.fulu_fork_epoch, }; - ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) + let current_slot = current_epoch + .unwrap_or_else(|| panic!("expect fork {fork_name} to be scheduled")) + .start_slot(E::slots_per_epoch()); + ForkContext::new::(current_slot, Hash256::zero(), spec) } pub struct Libp2pInstance( @@ -122,7 +125,7 @@ pub async fn build_libp2p_instance( let libp2p_context = lighthouse_network::Context { config, enr_fork_id: EnrForkId::default(), - fork_context: Arc::new(fork_context(fork_name)), + fork_context: Arc::new(fork_context(fork_name, &chain_spec)), chain_spec, libp2p_registry: None, }; diff --git a/beacon_node/lighthouse_network/tests/rpc_tests.rs b/beacon_node/lighthouse_network/tests/rpc_tests.rs index e50f70e43a0..11fe93288f7 100644 --- a/beacon_node/lighthouse_network/tests/rpc_tests.rs +++ b/beacon_node/lighthouse_network/tests/rpc_tests.rs @@ -2,6 +2,7 @@ mod common; +use crate::common::spec_with_all_forks_enabled; use common::{build_tracing_subscriber, Protocol}; use lighthouse_network::rpc::{methods::*, RequestType}; use lighthouse_network::service::api_types::AppRequestId; @@ -60,7 +61,7 @@ fn test_tcp_status_rpc() { let rt = Arc::new(Runtime::new().unwrap()); - let spec = Arc::new(E::default_spec()); + let spec = Arc::new(spec_with_all_forks_enabled()); rt.block_on(async { // get sender/receiver @@ -168,7 +169,7 @@ fn test_tcp_blocks_by_range_chunked_rpc() { let rt = Arc::new(Runtime::new().unwrap()); - let spec = Arc::new(E::default_spec()); + let spec = Arc::new(spec_with_all_forks_enabled()); rt.block_on(async { // get sender/receiver @@ -318,7 +319,7 @@ fn test_blobs_by_range_chunked_rpc() { rt.block_on(async { // get sender/receiver - let spec = Arc::new(E::default_spec()); + let spec = Arc::new(spec_with_all_forks_enabled()); let (mut sender, mut receiver) = common::build_node_pair( Arc::downgrade(&rt), ForkName::Deneb, @@ -330,13 +331,18 @@ fn test_blobs_by_range_chunked_rpc() { .await; // BlobsByRange Request + let deneb_slot = spec + .deneb_fork_epoch + .expect("deneb must be scheduled") + .start_slot(E::slots_per_epoch()); let rpc_request = RequestType::BlobsByRange(BlobsByRangeRequest { - start_slot: 0, + start_slot: deneb_slot.as_u64(), count: slot_count, }); - // BlocksByRange Response - let blob = BlobSidecar::::empty(); + // BlobsByRange Response + let mut blob = BlobSidecar::::empty(); + blob.signed_block_header.message.slot = deneb_slot; let rpc_response = Response::BlobsByRange(Some(Arc::new(blob))); @@ -438,7 +444,7 @@ fn test_tcp_blocks_by_range_over_limit() { let rt = Arc::new(Runtime::new().unwrap()); - let spec = Arc::new(E::default_spec()); + let spec = Arc::new(spec_with_all_forks_enabled()); rt.block_on(async { // get sender/receiver @@ -545,7 +551,7 @@ fn test_tcp_blocks_by_range_chunked_rpc_terminates_correctly() { let rt = Arc::new(Runtime::new().unwrap()); - let spec = Arc::new(E::default_spec()); + let spec = Arc::new(spec_with_all_forks_enabled()); rt.block_on(async { // get sender/receiver @@ -681,7 +687,7 @@ fn test_tcp_blocks_by_range_single_empty_rpc() { let rt = Arc::new(Runtime::new().unwrap()); - let spec = Arc::new(E::default_spec()); + let spec = Arc::new(spec_with_all_forks_enabled()); rt.block_on(async { // get sender/receiver @@ -804,14 +810,15 @@ fn test_tcp_blocks_by_root_chunked_rpc() { let messages_to_send = 6; - let spec = Arc::new(E::default_spec()); + let spec = Arc::new(spec_with_all_forks_enabled()); + let current_fork_name = ForkName::Bellatrix; let rt = Arc::new(Runtime::new().unwrap()); // get sender/receiver rt.block_on(async { let (mut sender, mut receiver) = common::build_node_pair( Arc::downgrade(&rt), - ForkName::Bellatrix, + current_fork_name, spec.clone(), Protocol::Tcp, false, @@ -831,7 +838,7 @@ fn test_tcp_blocks_by_root_chunked_rpc() { Hash256::zero(), Hash256::zero(), ], - spec.max_request_blocks_upper_bound(), + spec.max_request_blocks(current_fork_name), ), })); @@ -934,7 +941,7 @@ fn test_tcp_blocks_by_root_chunked_rpc() { tokio::select! { _ = sender_future => {} _ = receiver_future => {} - _ = sleep(Duration::from_secs(30)) => { + _ = sleep(Duration::from_secs(300)) => { panic!("Future timed out"); } } @@ -952,14 +959,15 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { let messages_to_send: u64 = 10; let extra_messages_to_send: u64 = 10; - let spec = Arc::new(E::default_spec()); + let spec = Arc::new(spec_with_all_forks_enabled()); + let current_fork = ForkName::Base; let rt = Arc::new(Runtime::new().unwrap()); // get sender/receiver rt.block_on(async { let (mut sender, mut receiver) = common::build_node_pair( Arc::downgrade(&rt), - ForkName::Base, + current_fork, spec.clone(), Protocol::Tcp, false, @@ -983,7 +991,7 @@ fn test_tcp_blocks_by_root_chunked_rpc_terminates_correctly() { Hash256::zero(), Hash256::zero(), ], - spec.max_request_blocks_upper_bound(), + spec.max_request_blocks(current_fork), ), })); @@ -1098,7 +1106,7 @@ fn goodbye_test(log_level: &str, enable_logging: bool, protocol: Protocol) { let rt = Arc::new(Runtime::new().unwrap()); - let spec = Arc::new(E::default_spec()); + let spec = Arc::new(spec_with_all_forks_enabled()); // get sender/receiver rt.block_on(async { @@ -1180,7 +1188,7 @@ fn test_delayed_rpc_response() { // Set up the logging. build_tracing_subscriber("debug", true); let rt = Arc::new(Runtime::new().unwrap()); - let spec = Arc::new(E::default_spec()); + let spec = Arc::new(spec_with_all_forks_enabled()); // Allow 1 token to be use used every 3 seconds. const QUOTA_SEC: u64 = 3; @@ -1314,7 +1322,7 @@ fn test_active_requests() { // Set up the logging. build_tracing_subscriber("debug", true); let rt = Arc::new(Runtime::new().unwrap()); - let spec = Arc::new(E::default_spec()); + let spec = Arc::new(spec_with_all_forks_enabled()); rt.block_on(async { // Get sender/receiver. diff --git a/beacon_node/network/src/service.rs b/beacon_node/network/src/service.rs index 81b1b64d429..325feda0d48 100644 --- a/beacon_node/network/src/service.rs +++ b/beacon_node/network/src/service.rs @@ -452,7 +452,7 @@ impl NetworkService { Some(_) = &mut self.next_topic_subscriptions => { if let Some((epoch, _)) = self.beacon_chain.duration_to_next_digest() { let fork_name = self.beacon_chain.spec.fork_name_at_epoch(epoch); - let fork_digest = self.beacon_chain.spec.compute_fork_digest(self.beacon_chain.genesis_validators_root, epoch); + let fork_digest = self.beacon_chain.compute_fork_digest(epoch); info!("Subscribing to new fork topics"); self.libp2p.subscribe_new_fork_topics(fork_name, fork_digest); self.next_topic_subscriptions = Box::pin(None.into()); @@ -687,7 +687,7 @@ impl NetworkService { let mut subscribed_topics: Vec = vec![]; for topic_kind in core_topics_to_subscribe::( - self.fork_context.current_fork(), + self.fork_context.current_fork_name(), &self.network_globals.as_topic_config(), &self.fork_context.spec, ) { @@ -827,24 +827,26 @@ impl NetworkService { let new_fork_digest = new_enr_fork_id.fork_digest; let fork_context = &self.fork_context; - if let Some(new_fork_name) = fork_context.from_context_bytes(new_fork_digest) { - if fork_context.current_fork() == *new_fork_name { - // BPO FORK + if let Some(new_fork_name) = fork_context.get_fork_from_context_bytes(new_fork_digest) { + if fork_context.current_fork_name() == *new_fork_name { info!( epoch = ?current_epoch, "BPO Fork Triggered" ) } else { info!( - old_fork = ?fork_context.current_fork(), + old_fork = ?fork_context.current_fork_name(), new_fork = ?new_fork_name, "Transitioned to new fork" ); } - fork_context.update_digest_epoch(current_epoch); + fork_context.update_current_fork(*new_fork_name, new_fork_digest, current_epoch); if self.beacon_chain.spec.is_peer_das_scheduled() { - self.libp2p.update_nfd(fork_context.next_fork_digest()); + let next_fork_digest = fork_context + .next_fork_digest() + .unwrap_or_else(|| fork_context.current_fork_digest()); + self.libp2p.update_nfd(next_fork_digest); } self.libp2p.update_fork_version(new_enr_fork_id); @@ -875,7 +877,7 @@ impl NetworkService { fn subscribed_core_topics(&self) -> bool { let core_topics = core_topics_to_subscribe::( - self.fork_context.current_fork(), + self.fork_context.current_fork_name(), &self.network_globals.as_topic_config(), &self.fork_context.spec, ); diff --git a/beacon_node/network/src/service/tests.rs b/beacon_node/network/src/service/tests.rs index db342117473..a8f68384a02 100644 --- a/beacon_node/network/src/service/tests.rs +++ b/beacon_node/network/src/service/tests.rs @@ -11,7 +11,7 @@ use lighthouse_network::{Enr, GossipTopic}; use std::str::FromStr; use std::sync::Arc; use tokio::runtime::Runtime; -use types::{Epoch, EthSpec, ForkName, MinimalEthSpec, SubnetId}; +use types::{Epoch, EthSpec, MinimalEthSpec, SubnetId}; impl NetworkService { fn get_topic_params(&self, topic: GossipTopic) -> Option<&gossipsub::TopicScoreParams> { @@ -106,8 +106,8 @@ fn test_removing_topic_weight_on_old_topics() { .mock_execution_layer() .build() .chain; - let (next_fork_name, _) = beacon_chain.duration_to_next_fork().expect("next fork"); - assert_eq!(next_fork_name, ForkName::Capella); + let (next_fork_epoch, _) = beacon_chain.duration_to_next_digest().expect("next fork"); + assert_eq!(Some(next_fork_epoch), spec.capella_fork_epoch); // Build network service. let (mut network_service, network_globals, _network_senders) = runtime.block_on(async { @@ -189,9 +189,8 @@ fn test_removing_topic_weight_on_old_topics() { beacon_chain.slot_clock.advance_slot(); } - // Run `NetworkService::update_next_fork()`. runtime.block_on(async { - network_service.update_next_fork(); + network_service.update_next_fork_digest(); }); // Check that topic_weight on the old topics has been zeroed. diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index d11a18ed0ae..81b22b99e89 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -264,7 +264,7 @@ pub fn spawn( fork_context: Arc, ) { assert!( - beacon_chain.spec.max_request_blocks(fork_context.current_fork()) as u64 >= T::EthSpec::slots_per_epoch() * EPOCHS_PER_BATCH, + beacon_chain.spec.max_request_blocks(fork_context.current_fork_name()) as u64 >= T::EthSpec::slots_per_epoch() * EPOCHS_PER_BATCH, "Max blocks that can be requested in a single batch greater than max allowed blocks in a single request" ); diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index d0e62e4ada7..2f74bdc7337 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -879,7 +879,7 @@ impl SyncNetworkContext { request: RequestType::DataColumnsByRoot( request .clone() - .try_into_request(self.fork_context.current_fork(), &self.chain.spec)?, + .try_into_request(self.fork_context.current_fork_name(), &self.chain.spec)?, ), app_request_id: AppRequestId::Sync(SyncRequestId::DataColumnsByRoot(id)), })?; diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 385cd0fcf51..9168a3feee0 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -414,7 +414,10 @@ impl> EmptyBlock for BeaconBlockAlta /// Returns an empty Altair block to be used during genesis. fn empty(spec: &ChainSpec) -> Self { BeaconBlockAltair { - slot: spec.genesis_slot, + slot: spec + .altair_fork_epoch + .expect("altair enabled") + .start_slot(E::slots_per_epoch()), proposer_index: 0, parent_root: Hash256::zero(), state_root: Hash256::zero(), @@ -447,7 +450,10 @@ impl> BeaconBlockAltair sync_committee_bits: BitVector::default(), }; BeaconBlockAltair { - slot: spec.genesis_slot, + slot: spec + .altair_fork_epoch + .expect("altair enabled") + .start_slot(E::slots_per_epoch()), proposer_index: 0, parent_root: Hash256::zero(), state_root: Hash256::zero(), @@ -475,7 +481,10 @@ impl> EmptyBlock for BeaconBlockBell /// Returns an empty Bellatrix block to be used during genesis. fn empty(spec: &ChainSpec) -> Self { BeaconBlockBellatrix { - slot: spec.genesis_slot, + slot: spec + .bellatrix_fork_epoch + .expect("bellatrix enabled") + .start_slot(E::slots_per_epoch()), proposer_index: 0, parent_root: Hash256::zero(), state_root: Hash256::zero(), @@ -503,7 +512,10 @@ impl> EmptyBlock for BeaconBlockCape /// Returns an empty Capella block to be used during genesis. fn empty(spec: &ChainSpec) -> Self { BeaconBlockCapella { - slot: spec.genesis_slot, + slot: spec + .capella_fork_epoch + .expect("capella enabled") + .start_slot(E::slots_per_epoch()), proposer_index: 0, parent_root: Hash256::zero(), state_root: Hash256::zero(), @@ -532,7 +544,10 @@ impl> EmptyBlock for BeaconBlockDene /// Returns an empty Deneb block to be used during genesis. fn empty(spec: &ChainSpec) -> Self { BeaconBlockDeneb { - slot: spec.genesis_slot, + slot: spec + .deneb_fork_epoch + .expect("deneb enabled") + .start_slot(E::slots_per_epoch()), proposer_index: 0, parent_root: Hash256::zero(), state_root: Hash256::zero(), @@ -562,7 +577,10 @@ impl> EmptyBlock for BeaconBlockElec /// Returns an empty Electra block to be used during genesis. fn empty(spec: &ChainSpec) -> Self { BeaconBlockElectra { - slot: spec.genesis_slot, + slot: spec + .electra_fork_epoch + .expect("electra enabled") + .start_slot(E::slots_per_epoch()), proposer_index: 0, parent_root: Hash256::zero(), state_root: Hash256::zero(), @@ -593,7 +611,10 @@ impl> EmptyBlock for BeaconBlockFulu /// Returns an empty Fulu block to be used during genesis. fn empty(spec: &ChainSpec) -> Self { BeaconBlockFulu { - slot: spec.genesis_slot, + slot: spec + .fulu_fork_epoch + .expect("fulu enabled") + .start_slot(E::slots_per_epoch()), proposer_index: 0, parent_root: Hash256::zero(), state_root: Hash256::zero(), diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 59d428315d0..4476cd69b3a 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -246,7 +246,7 @@ pub struct ChainSpec { /* * Networking Fulu */ - blob_schedule: BlobSchedule, + pub(crate) blob_schedule: BlobSchedule, min_epochs_for_data_column_sidecars_requests: u64, /* @@ -441,8 +441,13 @@ impl ChainSpec { .is_some_and(|fulu_fork_epoch| block_epoch >= fulu_fork_epoch) } - /// Returns true if `FULU_FORK_EPOCH` is set and is not set to `FAR_FUTURE_EPOCH`. + /// Returns true if PeerDAS is scheduled. Alias for [`Self::is_fulu_scheduled`] pub fn is_peer_das_scheduled(&self) -> bool { + self.is_fulu_scheduled() + } + + /// Returns true if `FULU_FORK_EPOCH` is set and is not set to `FAR_FUTURE_EPOCH`. + pub fn is_fulu_scheduled(&self) -> bool { self.fulu_fork_epoch .is_some_and(|fulu_fork_epoch| fulu_fork_epoch != self.far_future_epoch) } @@ -593,7 +598,7 @@ impl ChainSpec { .filter_map(|(_, epoch)| epoch) .collect::>(); - if self.fulu_fork_epoch.is_some() { + if self.is_fulu_scheduled() { for blob_parameters in &self.blob_schedule { relevant_epochs.insert(blob_parameters.epoch); } @@ -671,17 +676,6 @@ impl ChainSpec { } } - /// Returns the highest possible value for max_request_blocks based on enabled forks. - /// - /// This is useful for upper bounds in testing. - pub fn max_request_blocks_upper_bound(&self) -> usize { - if self.deneb_fork_epoch.is_some() { - self.max_request_blocks_deneb as usize - } else { - self.max_request_blocks as usize - } - } - pub fn max_request_blob_sidecars(&self, fork_name: ForkName) -> usize { if fork_name.electra_enabled() { self.max_request_blob_sidecars_electra as usize @@ -717,7 +711,8 @@ impl ChainSpec { } } - pub fn get_blob_parameters(&self, epoch: Epoch) -> Option { + /// Return the blob parameters at a given epoch. + fn get_blob_parameters(&self, epoch: Epoch) -> Option { match self.fulu_fork_epoch { Some(fulu_epoch) if epoch >= fulu_epoch => self .blob_schedule @@ -730,19 +725,7 @@ impl ChainSpec { max_blobs_per_block: self.max_blobs_per_block_electra, }) }), - _ => match self.electra_fork_epoch { - Some(electra_epoch) if epoch >= electra_epoch => Some(BlobParameters { - epoch: electra_epoch, - max_blobs_per_block: self.max_blobs_per_block_electra, - }), - _ => match self.deneb_fork_epoch { - Some(deneb_epoch) if epoch >= deneb_epoch => Some(BlobParameters { - epoch: deneb_epoch, - max_blobs_per_block: self.max_blobs_per_block, - }), - _ => None, - }, - }, + _ => None, } } @@ -1522,7 +1505,6 @@ impl BlobSchedule { } pub const fn default() -> Self { - // TODO(EIP-7892): think about what the default should be Self(vec![]) } @@ -1769,7 +1751,6 @@ fn default_bellatrix_fork_version() -> [u8; 4] { } fn default_capella_fork_version() -> [u8; 4] { - // TODO: determine if the bellatrix example should be copied like this [0xff, 0xff, 0xff, 0xff] } diff --git a/consensus/types/src/enr_fork_id.rs b/consensus/types/src/enr_fork_id.rs index 3ae7c39cfe9..e3742cb96c1 100644 --- a/consensus/types/src/enr_fork_id.rs +++ b/consensus/types/src/enr_fork_id.rs @@ -24,10 +24,14 @@ use tree_hash_derive::TreeHash; TestRandom, )] pub struct EnrForkId { + /// Fork digest of the current fork computed from [`ChainSpec::compute_fork_digest`]. #[serde(with = "serde_utils::bytes_4_hex")] pub fork_digest: [u8; 4], + /// `next_fork_version` is the fork version corresponding to the next planned fork at a future + /// epoch. The fork version will only change for regular forks, not BPO forks. #[serde(with = "serde_utils::bytes_4_hex")] pub next_fork_version: [u8; 4], + /// `next_fork_epoch` is the epoch at which the next fork (whether a regular fork or a BPO fork) is planned pub next_fork_epoch: Epoch, } diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index 86a93323d95..aeb14934f49 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -1,15 +1,39 @@ use parking_lot::RwLock; use crate::{ChainSpec, Epoch, EthSpec, ForkName, Hash256, Slot}; -use std::collections::{ HashMap, HashSet}; +use std::collections::BTreeMap; + +/// Represents a hard fork in the consensus protocol. +/// +/// A hard fork can be one of two types: +/// * A named fork (represented by `ForkName`) which introduces protocol changes. +/// * A blob-parameter-only (BPO) fork which only modifies blob parameters. +/// +/// For BPO forks, the `fork_name` remains unchanged from the previous fork, +/// but the `fork_epoch` and `fork_digest` will be different to reflect the +/// new blob parameter changes. +#[derive(Debug, Clone)] +pub struct HardFork { + fork_name: ForkName, + fork_epoch: Epoch, + fork_digest: [u8; 4], +} + +impl HardFork { + pub fn new(fork_name: ForkName, fork_digest: [u8; 4], fork_epoch: Epoch) -> HardFork { + HardFork { + fork_name, + fork_epoch, + fork_digest, + } + } +} /// Provides fork specific info like the current fork name and the fork digests corresponding to every valid fork. #[derive(Debug)] pub struct ForkContext { - digest_epoch: RwLock, - enabled_forks: HashSet, - genesis_validators_root: Hash256, - digest_to_fork: HashMap<[u8; 4], ForkName>, + current_fork: RwLock, + epoch_to_forks: BTreeMap, pub spec: ChainSpec, } @@ -23,96 +47,233 @@ impl ForkContext { genesis_validators_root: Hash256, spec: &ChainSpec, ) -> Self { - let enabled_forks = ForkName::list_all() - .into_iter() - .filter(|fork| spec.fork_epoch(*fork).is_some()) - .collect(); - - let epoch_to_digest: HashMap<_, _> = spec + let epoch_to_forks: BTreeMap<_, _> = spec .all_digest_epochs() .map(|epoch| { + let fork_name = spec.fork_name_at_epoch(epoch); let fork_digest = spec.compute_fork_digest(genesis_validators_root, epoch); - (epoch, fork_digest) - }) - .collect(); - - let digest_to_fork = epoch_to_digest - .iter() - .map(|(epoch, digest)| { - let fork_name = spec.fork_name_at_epoch(*epoch); - (*digest, fork_name) + (epoch, HardFork::new(fork_name, fork_digest, epoch)) }) .collect(); let current_epoch = current_slot.epoch(E::slots_per_epoch()); - let digest_epoch = RwLock::new( - epoch_to_digest - .keys() - .filter(|&&epoch| epoch <= current_epoch) - .max() - .cloned() - .expect("should match atleast genesis epoch"), - ); + let current_fork = epoch_to_forks + .values() + .filter(|&fork| fork.fork_epoch <= current_epoch) + .next_back() + .cloned() + .expect("should match at least genesis epoch"); Self { - digest_epoch, - enabled_forks, - genesis_validators_root, - digest_to_fork, + current_fork: RwLock::new(current_fork), + epoch_to_forks, spec: spec.clone(), } } /// Returns `true` if the provided `fork_name` exists in the `ForkContext` object. pub fn fork_exists(&self, fork_name: ForkName) -> bool { - self.enabled_forks.contains(&fork_name) + self.spec.fork_epoch(fork_name).is_some() } - /// Returns the `current_fork`. - pub fn current_fork(&self) -> ForkName { - self.spec.fork_name_at_epoch(self.digest_epoch()) + /// Returns the current fork name. + pub fn current_fork_name(&self) -> ForkName { + self.current_fork.read().fork_name } - /// Returns the current digest epoch - pub fn digest_epoch(&self) -> Epoch { - *self.digest_epoch.read() + /// Returns the current fork epoch. + pub fn current_fork_epoch(&self) -> Epoch { + self.current_fork.read().fork_epoch } - pub fn next_fork_digest(&self) -> [u8; 4] { - self.spec - .next_digest_epoch(self.digest_epoch()) - .map(|epoch| { - self.spec - .compute_fork_digest(self.genesis_validators_root, epoch) - }) - .unwrap_or_default() + /// Returns the current fork digest. + pub fn current_fork_digest(&self) -> [u8; 4] { + self.current_fork.read().fork_digest + } + + /// Returns the next fork digest. If there's no future fork, returns the current fork digest. + pub fn next_fork_digest(&self) -> Option<[u8; 4]> { + let current_fork_epoch = self.current_fork_epoch(); + self.epoch_to_forks + .range(current_fork_epoch..) + .nth(1) + .map(|(_, fork)| fork.fork_digest) } /// Updates the `digest_epoch` field to a new digest epoch. - pub fn update_digest_epoch(&self, epoch: Epoch) { - *self.digest_epoch.write() = epoch; + pub fn update_current_fork( + &self, + new_fork_name: ForkName, + new_fork_digest: [u8; 4], + new_fork_epoch: Epoch, + ) { + debug_assert!(self.epoch_to_forks.contains_key(&new_fork_epoch)); + *self.current_fork.write() = HardFork::new(new_fork_name, new_fork_digest, new_fork_epoch); } /// Returns the context bytes/fork_digest corresponding to the genesis fork version. pub fn genesis_context_bytes(&self) -> [u8; 4] { - self.spec - .compute_fork_digest(self.genesis_validators_root, Epoch::new(0)) + self.epoch_to_forks + .first_key_value() + .expect("must contain genesis epoch") + .1 + .fork_digest } /// Returns the fork type given the context bytes/fork_digest. /// Returns `None` if context bytes doesn't correspond to any valid `ForkName`. - pub fn from_context_bytes(&self, context: [u8; 4]) -> Option<&ForkName> { - self.digest_to_fork.get(&context) + pub fn get_fork_from_context_bytes(&self, context: [u8; 4]) -> Option<&ForkName> { + self.epoch_to_forks + .values() + .find(|fork| fork.fork_digest == context) + .map(|fork| &fork.fork_name) } - // TODO: we *may* delete this entire object and just use the spec + /// Returns the context bytes/fork_digest corresponding to an epoch. + /// See [`ChainSpec::compute_fork_digest`] pub fn context_bytes(&self, epoch: Epoch) -> [u8; 4] { - self.spec - .compute_fork_digest(self.genesis_validators_root, epoch) + self.epoch_to_forks + .range(..=epoch) + .next_back() + .expect("should match at least genesis epoch") + .1 + .fork_digest } /// Returns all `fork_digest`s that are currently in the `ForkContext` object. pub fn all_fork_digests(&self) -> Vec<[u8; 4]> { - self.digest_to_fork.keys().cloned().collect() + self.epoch_to_forks + .values() + .map(|fork| fork.fork_digest) + .collect() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::chain_spec::{BlobParameters, BlobSchedule}; + use crate::MainnetEthSpec; + + type E = MainnetEthSpec; + + fn make_chain_spec() -> ChainSpec { + let blob_parameters = vec![ + BlobParameters { + epoch: Epoch::new(6), + max_blobs_per_block: 12, + }, + BlobParameters { + epoch: Epoch::new(50), + max_blobs_per_block: 24, + }, + BlobParameters { + epoch: Epoch::new(100), + max_blobs_per_block: 48, + }, + ]; + + let mut spec = E::default_spec(); + spec.altair_fork_epoch = Some(Epoch::new(1)); + spec.bellatrix_fork_epoch = Some(Epoch::new(2)); + spec.capella_fork_epoch = Some(Epoch::new(3)); + spec.deneb_fork_epoch = Some(Epoch::new(4)); + spec.electra_fork_epoch = Some(Epoch::new(5)); + spec.fulu_fork_epoch = Some(Epoch::new(6)); + spec.blob_schedule = BlobSchedule::new(blob_parameters); + spec + } + + #[test] + fn test_fork_exists() { + let spec = make_chain_spec(); + let genesis_root = Hash256::ZERO; + let current_slot = Slot::new(7); + + let context = ForkContext::new::(current_slot, genesis_root, &spec); + + assert!(context.fork_exists(ForkName::Electra)); + assert!(context.fork_exists(ForkName::Fulu)); + } + + #[test] + fn test_current_fork_name_and_epoch() { + let spec = make_chain_spec(); + let electra_epoch = spec.electra_fork_epoch.unwrap(); + let electra_slot = electra_epoch.end_slot(E::slots_per_epoch()); + let genesis_root = Hash256::ZERO; + + let context = ForkContext::new::(electra_slot, genesis_root, &spec); + + assert_eq!(context.current_fork_name(), ForkName::Electra); + assert_eq!(context.current_fork_epoch(), electra_epoch); + } + + #[test] + fn test_next_fork_digest() { + let spec = make_chain_spec(); + let electra_epoch = spec.electra_fork_epoch.unwrap(); + let electra_slot = electra_epoch.end_slot(E::slots_per_epoch()); + let genesis_root = Hash256::ZERO; + + let context = ForkContext::new::(electra_slot, genesis_root, &spec); + + let next_digest = context.next_fork_digest().unwrap(); + let expected_digest = spec.compute_fork_digest(genesis_root, spec.fulu_fork_epoch.unwrap()); + assert_eq!(next_digest, expected_digest); + } + + #[test] + fn test_get_fork_from_context_bytes() { + let spec = make_chain_spec(); + let genesis_root = Hash256::ZERO; + let current_slot = Slot::new(0); + + let context = ForkContext::new::(current_slot, genesis_root, &spec); + + let electra_digest = spec.compute_fork_digest(genesis_root, Epoch::new(5)); + assert_eq!( + context.get_fork_from_context_bytes(electra_digest), + Some(&ForkName::Electra) + ); + + let invalid_digest = [9, 9, 9, 9]; + assert!(context + .get_fork_from_context_bytes(invalid_digest) + .is_none()); + } + + #[test] + fn test_context_bytes() { + let spec = make_chain_spec(); + let genesis_root = Hash256::ZERO; + let current_slot = Slot::new(0); + + let context = ForkContext::new::(current_slot, genesis_root, &spec); + + assert_eq!( + context.context_bytes(Epoch::new(0)), + spec.compute_fork_digest(genesis_root, Epoch::new(0)) + ); + + assert_eq!( + context.context_bytes(Epoch::new(12)), + spec.compute_fork_digest(genesis_root, Epoch::new(10)) + ); + } + + #[test] + fn test_all_fork_digests() { + let spec = make_chain_spec(); + let genesis_root = Hash256::ZERO; + let current_slot = Slot::new(20); + + let context = ForkContext::new::(current_slot, genesis_root, &spec); + + // Get all enabled fork digests + let fork_digests = context.all_fork_digests(); + let expected_digest_count = spec.all_digest_epochs().count(); + + assert_eq!(fork_digests.len(), expected_digest_count); } } diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index e92db494851..4fc26ccffa8 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -36,8 +36,6 @@ impl ForkName { pub fn list_all_fork_epochs(spec: &ChainSpec) -> Vec<(ForkName, Option)> { ForkName::list_all() .into_iter() - // Skip Base - .skip(1) .map(|fork| (fork, spec.fork_epoch(fork))) .collect() } diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index 85bed35a19c..64dce93aefb 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -883,11 +883,25 @@ mod test { } } + fn spec_with_all_forks_enabled() -> ChainSpec { + let mut chain_spec = E::default_spec(); + chain_spec.altair_fork_epoch = Some(Epoch::new(1)); + chain_spec.bellatrix_fork_epoch = Some(Epoch::new(2)); + chain_spec.capella_fork_epoch = Some(Epoch::new(3)); + chain_spec.deneb_fork_epoch = Some(Epoch::new(4)); + chain_spec.electra_fork_epoch = Some(Epoch::new(5)); + chain_spec.fulu_fork_epoch = Some(Epoch::new(6)); + + // check that we have all forks covered + assert!(chain_spec.fork_epoch(ForkName::latest()).is_some()); + chain_spec + } + #[test] fn test_ssz_tagged_signed_beacon_block() { type E = MainnetEthSpec; - let spec = &E::default_spec(); + let spec = &spec_with_all_forks_enabled::(); let sig = Signature::empty(); let blocks = vec![ SignedBeaconBlock::::from_block( diff --git a/lcli/src/generate_bootnode_enr.rs b/lcli/src/generate_bootnode_enr.rs index 7334f552576..98ef0b96d43 100644 --- a/lcli/src/generate_bootnode_enr.rs +++ b/lcli/src/generate_bootnode_enr.rs @@ -15,7 +15,6 @@ pub fn run(matches: &ArgMatches, spec: &ChainSpec) -> Result<(), Str let udp_port: NonZeroU16 = clap_utils::parse_required(matches, "udp-port")?; let tcp_port: NonZeroU16 = clap_utils::parse_required(matches, "tcp-port")?; let output_dir: PathBuf = clap_utils::parse_required(matches, "output-dir")?; - // FIXME: why is this being read from.. somewhere rather than just using the spec? let genesis_fork_version: [u8; 4] = clap_utils::parse_ssz_required(matches, "genesis-fork-version")?; @@ -33,13 +32,13 @@ pub fn run(matches: &ArgMatches, spec: &ChainSpec) -> Result<(), Str let secp256k1_keypair = secp256k1::Keypair::generate(); let enr_key = CombinedKey::from_secp256k1(&secp256k1_keypair); + let genesis_fork_digest = spec.compute_fork_digest(Hash256::zero(), Epoch::new(0)); let enr_fork_id = EnrForkId { - fork_digest: spec.compute_fork_digest(Hash256::zero(), Epoch::new(0)), + fork_digest: genesis_fork_digest, next_fork_version: genesis_fork_version, next_fork_epoch: Epoch::max_value(), // FAR_FUTURE_EPOCH }; - // FIXME: need the next fork digest - let enr = build_enr::(&enr_key, &config, &enr_fork_id, spec, [0; 4]) + let enr = build_enr::(&enr_key, &config, &enr_fork_id, genesis_fork_digest, spec) .map_err(|e| format!("Unable to create ENR: {:?}", e))?; fs::create_dir_all(&output_dir).map_err(|e| format!("Unable to create output-dir: {:?}", e))?;