diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 152de1a20b9..3e02baf9017 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -21,6 +21,7 @@ use crate::block_verification_types::{ }; pub use crate::canonical_head::CanonicalHead; use crate::chain_config::ChainConfig; +use crate::custody_context::CustodyContextSsz; use crate::data_availability_checker::{ Availability, AvailabilityCheckError, AvailableBlock, AvailableBlockData, DataAvailabilityChecker, DataColumnReconstructionResult, @@ -64,7 +65,6 @@ use crate::shuffling_cache::{BlockShufflingIds, ShufflingCache}; use crate::sync_committee_verification::{ Error as SyncCommitteeError, VerifiedSyncCommitteeMessage, VerifiedSyncContribution, }; -use crate::validator_custody::CustodyContextSsz; use crate::validator_monitor::{ HISTORIC_EPOCHS as VALIDATOR_MONITOR_HISTORIC_EPOCHS, ValidatorMonitor, get_slot_delay_ms, timestamp_now, diff --git a/beacon_node/beacon_chain/src/builder.rs b/beacon_node/beacon_chain/src/builder.rs index 5564c7916fa..750cde14cac 100644 --- a/beacon_node/beacon_chain/src/builder.rs +++ b/beacon_node/beacon_chain/src/builder.rs @@ -4,6 +4,7 @@ use crate::beacon_chain::{ BEACON_CHAIN_DB_KEY, CanonicalHead, LightClientProducerEvent, OP_POOL_DB_KEY, }; use crate::beacon_proposer_cache::BeaconProposerCache; +use crate::custody_context::NodeCustodyType; use crate::data_availability_checker::DataAvailabilityChecker; use crate::fork_choice_signal::ForkChoiceSignalTx; use crate::fork_revert::{reset_fork_choice_to_finalization, revert_to_fork_boundary}; @@ -100,7 +101,7 @@ pub struct BeaconChainBuilder { kzg: Arc, task_executor: Option, validator_monitor_config: Option, - import_all_data_columns: bool, + node_custody_type: NodeCustodyType, rng: Option>, } @@ -139,7 +140,7 @@ where kzg, task_executor: None, validator_monitor_config: None, - import_all_data_columns: false, + node_custody_type: NodeCustodyType::Fullnode, rng: None, } } @@ -640,9 +641,9 @@ where self } - /// Sets whether to require and import all data columns when importing block. - pub fn import_all_data_columns(mut self, import_all_data_columns: bool) -> Self { - self.import_all_data_columns = import_all_data_columns; + /// Sets the node custody type for data column import. + pub fn node_custody_type(mut self, node_custody_type: NodeCustodyType) -> Self { + self.node_custody_type = node_custody_type; self } @@ -935,10 +936,11 @@ where { Arc::new(CustodyContext::new_from_persisted_custody_context( custody, - self.import_all_data_columns, + self.node_custody_type, + &self.spec, )) } else { - Arc::new(CustodyContext::new(self.import_all_data_columns)) + Arc::new(CustodyContext::new(self.node_custody_type, &self.spec)) }; debug!(?custody_context, "Loading persisted custody context"); diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index a7defa9fa2a..1f5abc4891b 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -1,3 +1,4 @@ +use crate::custody_context::NodeCustodyType; pub use proto_array::{DisallowedReOrgOffsets, ReOrgThreshold}; use serde::{Deserialize, Serialize}; use std::str::FromStr; @@ -118,6 +119,8 @@ pub struct ChainConfig { pub invalid_block_roots: HashSet, /// Disable the getBlobs optimisation to fetch blobs from the EL mempool. pub disable_get_blobs: bool, + /// The node's custody type, determining how many data columns to custody and sample. + pub node_custody_type: NodeCustodyType, } impl Default for ChainConfig { @@ -158,6 +161,7 @@ impl Default for ChainConfig { data_column_publishing_delay: None, invalid_block_roots: HashSet::new(), disable_get_blobs: false, + node_custody_type: NodeCustodyType::Fullnode, } } } diff --git a/beacon_node/beacon_chain/src/validator_custody.rs b/beacon_node/beacon_chain/src/custody_context.rs similarity index 72% rename from beacon_node/beacon_chain/src/validator_custody.rs rename to beacon_node/beacon_chain/src/custody_context.rs index ea1dfdaae04..7ec13a8b519 100644 --- a/beacon_node/beacon_chain/src/validator_custody.rs +++ b/beacon_node/beacon_chain/src/custody_context.rs @@ -1,4 +1,5 @@ use parking_lot::RwLock; +use serde::{Deserialize, Serialize}; use ssz_derive::{Decode, Encode}; use std::marker::PhantomData; use std::sync::OnceLock; @@ -6,6 +7,7 @@ use std::{ collections::{BTreeMap, HashMap}, sync::atomic::{AtomicU64, Ordering}, }; +use tracing::warn; use types::data_column_custody_group::{CustodyIndex, compute_columns_for_custody_group}; use types::{ChainSpec, ColumnIndex, Epoch, EthSpec, Slot}; @@ -34,10 +36,32 @@ struct ValidatorRegistrations { /// that are then backfilled to epoch 10, the value at epoch 11 will be removed and epoch 10 /// will be added to the map instead. This should keep map size constrained to a maximum /// value of 128. + /// + /// If the node's is started with a cgc override (i.e. supernode/semi-supernode flag), the cgc + /// value is inserted into this map on initialisation with epoch set to 0. For a semi-supernode, + /// this means the custody requirement can still be increased if validator custody exceeds + /// 64 columns. epoch_validator_custody_requirements: BTreeMap, } impl ValidatorRegistrations { + /// Initialise the validator registration with some default custody requirements. + /// + /// If a `cgc_override` value is specified, the cgc value is inserted into the registration map + /// and is equivalent to registering validator(s) with the same custody requirement. + fn new(cgc_override: Option) -> Self { + let mut registrations = ValidatorRegistrations { + validators: Default::default(), + epoch_validator_custody_requirements: Default::default(), + }; + if let Some(custody_count) = cgc_override { + registrations + .epoch_validator_custody_requirements + .insert(Epoch::new(0), custody_count); + } + registrations + } + /// Returns the validator custody requirement at the latest epoch. fn latest_validator_custody_requirement(&self) -> Option { self.epoch_validator_custody_requirements @@ -139,6 +163,51 @@ fn get_validators_custody_requirement(validator_custody_units: u64, spec: &Chain ) } +/// Indicates the different "modes" that a node can run based on the cli +/// parameters that are relevant for computing the custody count. +/// +/// The custody count is derived from 2 values: +/// 1. The number of validators attached to the node and the spec parameters +/// that attach custody weight to attached validators. +/// 2. The cli parameters that the current node is running with. +/// +/// We always persist the validator custody units to the db across restarts +/// such that we know the validator custody units at any given epoch in the past. +/// However, knowing the cli parameter at any given epoch is a pain to maintain +/// and unnecessary. +/// +/// Therefore, the custody count at any point in time is calculated as the max of +/// the validator custody at that time and the current cli params. +/// +/// Choosing the max ensures that we always have the minimum required columns and +/// we can adjust the `status.earliest_available_slot` value to indicate to our peers +/// the columns that we can guarantee to serve. +#[derive(Debug, Copy, Clone, PartialEq, Eq, Default, Deserialize, Serialize)] +pub enum NodeCustodyType { + /// The node is running with cli parameters to indicate that it + /// wants to subscribe to all columns. + Supernode, + /// The node is running with cli parameters to indicate that it + /// wants to subscribe to the minimum number of columns to enable + /// reconstruction (50%) of the full blob data on demand. + SemiSupernode, + /// The node isn't running with with any explicit cli parameters + /// or is running with cli parameters to indicate that it wants + /// to only subscribe to the minimal custody requirements. + #[default] + Fullnode, +} + +impl NodeCustodyType { + pub fn get_custody_count_override(&self, spec: &ChainSpec) -> Option { + match self { + Self::Fullnode => None, + Self::SemiSupernode => Some(spec.number_of_custody_groups / 2), + Self::Supernode => Some(spec.number_of_custody_groups), + } + } +} + /// Contains all the information the node requires to calculate the /// number of columns to be custodied when checking for DA. #[derive(Debug)] @@ -150,15 +219,6 @@ pub struct CustodyContext { /// we require for data availability check, and we use to advertise to our peers in the metadata /// and enr values. validator_custody_count: AtomicU64, - /// Is the node run as a supernode based on current cli parameters. - current_is_supernode: bool, - /// The persisted value for `is_supernode` based on the previous run of this node. - /// - /// Note: We require this value because if a user restarts the node with a higher cli custody - /// count value than in the previous run, then we should continue advertising the custody - /// count based on the old value than the new one since we haven't backfilled the required - /// columns. - persisted_is_supernode: bool, /// Maintains all the validators that this node is connected to currently validator_registrations: RwLock, /// Stores an immutable, ordered list of all custody columns as determined by the node's NodeID @@ -171,26 +231,45 @@ impl CustodyContext { /// Create a new custody default custody context object when no persisted object /// exists. /// - /// The `is_supernode` value is based on current cli parameters. - pub fn new(is_supernode: bool) -> Self { + /// The `node_custody_type` value is based on current cli parameters. + pub fn new(node_custody_type: NodeCustodyType, spec: &ChainSpec) -> Self { + let cgc_override = node_custody_type.get_custody_count_override(spec); + // If there's no override, we initialise `validator_custody_count` to 0. This has been the + // existing behaviour and we maintain this for now to avoid a semantic schema change until + // a later release. Self { - validator_custody_count: AtomicU64::new(0), - current_is_supernode: is_supernode, - persisted_is_supernode: is_supernode, - validator_registrations: Default::default(), + validator_custody_count: AtomicU64::new(cgc_override.unwrap_or(0)), + validator_registrations: RwLock::new(ValidatorRegistrations::new(cgc_override)), all_custody_columns_ordered: OnceLock::new(), _phantom_data: PhantomData, } } + /// Restore the custody context from disk. + /// + /// * If NodeCustodyType::custody_count < validator_custody_at_head, it means the attached + /// validate stake has increased the node's CGC. We ignore the CLI input. + /// * If NodeCustodyType::custody_count > validator_custody_at_head, it means the user has + /// changed the node's custody type via either the --supernode or --semi-supernode flags, + /// and will require a resync until we implement column backfill for this scenario. pub fn new_from_persisted_custody_context( ssz_context: CustodyContextSsz, - is_supernode: bool, + node_custody_type: NodeCustodyType, + spec: &ChainSpec, ) -> Self { + let cgc_override = node_custody_type.get_custody_count_override(spec); + if let Some(cgc_from_cli) = cgc_override + && cgc_from_cli > ssz_context.validator_custody_at_head + { + warn!( + info = "node will continue to run with the current custody count", + current_custody_count = ssz_context.validator_custody_at_head, + node_custody_type = ?node_custody_type, + "Changing node type is currently not supported without a resync and will have no effect", + ); + } CustodyContext { validator_custody_count: AtomicU64::new(ssz_context.validator_custody_at_head), - current_is_supernode: is_supernode, - persisted_is_supernode: ssz_context.persisted_is_supernode, validator_registrations: RwLock::new(ValidatorRegistrations { validators: Default::default(), epoch_validator_custody_requirements: ssz_context @@ -249,12 +328,11 @@ impl CustodyContext { return None; }; - let current_cgc = self.custody_group_count_at_head(spec); - let validator_custody_count_at_head = self.validator_custody_count.load(Ordering::Relaxed); + let current_cgc = self.validator_custody_count.load(Ordering::Relaxed); - if new_validator_custody != validator_custody_count_at_head { + if new_validator_custody != current_cgc { tracing::debug!( - old_count = validator_custody_count_at_head, + old_count = current_cgc, new_count = new_validator_custody, "Validator count at head updated" ); @@ -285,9 +363,6 @@ impl CustodyContext { /// Do NOT use this directly for data availability check, use `self.sampling_size` instead as /// CGC can change over epochs. pub fn custody_group_count_at_head(&self, spec: &ChainSpec) -> u64 { - if self.current_is_supernode { - return spec.number_of_custody_groups; - } let validator_custody_count_at_head = self.validator_custody_count.load(Ordering::Relaxed); // If there are no validators, return the minimum custody_requirement @@ -305,14 +380,10 @@ impl CustodyContext { /// /// See also: [`Self::num_of_custody_groups_to_sample`]. pub fn custody_group_count_at_epoch(&self, epoch: Epoch, spec: &ChainSpec) -> u64 { - if self.current_is_supernode { - spec.number_of_custody_groups - } else { - self.validator_registrations - .read() - .custody_requirement_at_epoch(epoch) - .unwrap_or(spec.custody_requirement) - } + self.validator_registrations + .read() + .custody_requirement_at_epoch(epoch) + .unwrap_or(spec.custody_requirement) } /// Returns the count of custody groups this node must _sample_ for a block at `epoch` to import. @@ -406,6 +477,7 @@ pub struct CustodyCountChanged { #[derive(Debug, Encode, Decode, Clone)] pub struct CustodyContextSsz { pub validator_custody_at_head: u64, + /// DEPRECATED. This field is no longer in used and will be removed in a future release. pub persisted_is_supernode: bool, pub epoch_validator_custody_requirements: Vec<(Epoch, u64)>, } @@ -414,7 +486,8 @@ impl From<&CustodyContext> for CustodyContextSsz { fn from(context: &CustodyContext) -> Self { CustodyContextSsz { validator_custody_at_head: context.validator_custody_count.load(Ordering::Relaxed), - persisted_is_supernode: context.persisted_is_supernode, + // This field is deprecated and has no effect + persisted_is_supernode: false, epoch_validator_custody_requirements: context .validator_registrations .read() @@ -438,8 +511,8 @@ mod tests { #[test] fn no_validators_supernode_default() { - let custody_context = CustodyContext::::new(true); let spec = E::default_spec(); + let custody_context = CustodyContext::::new(NodeCustodyType::Supernode, &spec); assert_eq!( custody_context.custody_group_count_at_head(&spec), spec.number_of_custody_groups @@ -450,10 +523,24 @@ mod tests { ); } + #[test] + fn no_validators_semi_supernode_default() { + let spec = E::default_spec(); + let custody_context = CustodyContext::::new(NodeCustodyType::SemiSupernode, &spec); + assert_eq!( + custody_context.custody_group_count_at_head(&spec), + spec.number_of_custody_groups / 2 + ); + assert_eq!( + custody_context.num_of_custody_groups_to_sample(Epoch::new(0), &spec), + spec.number_of_custody_groups / 2 + ); + } + #[test] fn no_validators_fullnode_default() { - let custody_context = CustodyContext::::new(false); let spec = E::default_spec(); + let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); assert_eq!( custody_context.custody_group_count_at_head(&spec), spec.custody_requirement, @@ -467,8 +554,8 @@ mod tests { #[test] fn register_single_validator_should_update_cgc() { - let custody_context = CustodyContext::::new(false); let spec = E::default_spec(); + let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); let bal_per_additional_group = spec.balance_per_additional_custody_group; let min_val_custody_requirement = spec.validator_custody_requirement; // One single node increases its balance over 3 epochs. @@ -491,8 +578,8 @@ mod tests { #[test] fn register_multiple_validators_should_update_cgc() { - let custody_context = CustodyContext::::new(false); let spec = E::default_spec(); + let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); let bal_per_additional_group = spec.balance_per_additional_custody_group; let min_val_custody_requirement = spec.validator_custody_requirement; // Add 3 validators over 3 epochs. @@ -528,8 +615,8 @@ mod tests { #[test] fn register_validators_should_not_update_cgc_for_supernode() { - let custody_context = CustodyContext::::new(true); let spec = E::default_spec(); + let custody_context = CustodyContext::::new(NodeCustodyType::Supernode, &spec); let bal_per_additional_group = spec.balance_per_additional_custody_group; // Add 3 validators over 3 epochs. @@ -566,8 +653,8 @@ mod tests { #[test] fn cgc_change_should_be_effective_to_sampling_after_delay() { - let custody_context = CustodyContext::::new(false); let spec = E::default_spec(); + let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); let current_slot = Slot::new(10); let current_epoch = current_slot.epoch(E::slots_per_epoch()); let default_sampling_size = @@ -597,8 +684,8 @@ mod tests { #[test] fn validator_dropped_after_no_registrations_within_expiry_should_not_reduce_cgc() { - let custody_context = CustodyContext::::new(false); let spec = E::default_spec(); + let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); let current_slot = Slot::new(10); let val_custody_units_1 = 10; let val_custody_units_2 = 5; @@ -639,8 +726,8 @@ mod tests { #[test] fn validator_dropped_after_no_registrations_within_expiry() { - let custody_context = CustodyContext::::new(false); let spec = E::default_spec(); + let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); let current_slot = Slot::new(10); let val_custody_units_1 = 10; let val_custody_units_2 = 5; @@ -690,7 +777,7 @@ mod tests { #[test] fn should_init_ordered_data_columns_and_return_sampling_columns() { let spec = E::default_spec(); - let custody_context = CustodyContext::::new(false); + let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); let sampling_size = custody_context.num_of_data_columns_to_sample(Epoch::new(0), &spec); // initialise ordered columns @@ -742,8 +829,8 @@ mod tests { #[test] fn custody_columns_for_epoch_no_validators_fullnode() { - let custody_context = CustodyContext::::new(false); let spec = E::default_spec(); + let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); let all_custody_groups_ordered = (0..spec.number_of_custody_groups).collect::>(); custody_context @@ -758,8 +845,8 @@ mod tests { #[test] fn custody_columns_for_epoch_no_validators_supernode() { - let custody_context = CustodyContext::::new(true); let spec = E::default_spec(); + let custody_context = CustodyContext::::new(NodeCustodyType::Supernode, &spec); let all_custody_groups_ordered = (0..spec.number_of_custody_groups).collect::>(); custody_context @@ -774,8 +861,8 @@ mod tests { #[test] fn custody_columns_for_epoch_with_validators_should_match_cgc() { - let custody_context = CustodyContext::::new(false); let spec = E::default_spec(); + let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); let all_custody_groups_ordered = (0..spec.number_of_custody_groups).collect::>(); let val_custody_units = 10; @@ -800,8 +887,8 @@ mod tests { #[test] fn custody_columns_for_epoch_specific_epoch_uses_epoch_cgc() { - let custody_context = CustodyContext::::new(false); let spec = E::default_spec(); + let custody_context = CustodyContext::::new(NodeCustodyType::Fullnode, &spec); let all_custody_groups_ordered = (0..spec.number_of_custody_groups).collect::>(); let test_epoch = Epoch::new(5); @@ -817,4 +904,133 @@ mod tests { expected_cgc as usize ); } + + #[test] + fn restore_from_persisted_fullnode_no_validators() { + let spec = E::default_spec(); + let ssz_context = CustodyContextSsz { + validator_custody_at_head: 0, // no validators + persisted_is_supernode: false, + epoch_validator_custody_requirements: vec![], + }; + + let custody_context = CustodyContext::::new_from_persisted_custody_context( + ssz_context, + NodeCustodyType::Fullnode, + &spec, + ); + + assert_eq!( + custody_context.custody_group_count_at_head(&spec), + spec.custody_requirement, + "restored custody group count should match fullnode default" + ); + } + + #[test] + fn restore_fullnode_then_switch_to_supernode_has_no_effect() { + let spec = E::default_spec(); + let ssz_context = CustodyContextSsz { + validator_custody_at_head: 0, // no validators + persisted_is_supernode: false, + epoch_validator_custody_requirements: vec![], + }; + + // Attempt to restore as supernode (wants 128), but should use original persisted value + let custody_context = CustodyContext::::new_from_persisted_custody_context( + ssz_context, + NodeCustodyType::Supernode, + &spec, + ); + + assert_eq!( + custody_context.custody_group_count_at_head(&spec), + spec.custody_requirement, + "should use original fullnode cgc, not supernode cgc" + ); + } + + #[test] + fn restore_supernode_then_switch_to_fullnode_uses_persisted() { + let spec = E::default_spec(); + let supernode_cgc = spec.number_of_custody_groups; // supernode cgc + + let ssz_context = CustodyContextSsz { + validator_custody_at_head: supernode_cgc, + persisted_is_supernode: false, + epoch_validator_custody_requirements: vec![(Epoch::new(0), supernode_cgc)], + }; + + // Attempt to restore as fullnode (wants 8), but should keep persisted value (128) + let custody_context = CustodyContext::::new_from_persisted_custody_context( + ssz_context, + NodeCustodyType::Fullnode, + &spec, + ); + + assert_eq!( + custody_context.custody_group_count_at_head(&spec), + supernode_cgc, + "should use persisted supernode cgc, not fullnode cgc" + ); + } + + #[test] + fn restore_with_validator_custody_history_across_epochs() { + let spec = E::default_spec(); + let initial_cgc = 8u64; + let increased_cgc = 16u64; + let final_cgc = 32u64; + + let ssz_context = CustodyContextSsz { + validator_custody_at_head: final_cgc, + persisted_is_supernode: false, + epoch_validator_custody_requirements: vec![ + (Epoch::new(0), initial_cgc), + (Epoch::new(10), increased_cgc), + (Epoch::new(20), final_cgc), + ], + }; + + let custody_context = CustodyContext::::new_from_persisted_custody_context( + ssz_context, + NodeCustodyType::Fullnode, + &spec, + ); + + // Verify head uses latest value + assert_eq!( + custody_context.custody_group_count_at_head(&spec), + final_cgc + ); + + // Verify historical epoch lookups work correctly + assert_eq!( + custody_context.custody_group_count_at_epoch(Epoch::new(5), &spec), + initial_cgc, + "epoch 5 should use initial cgc" + ); + assert_eq!( + custody_context.custody_group_count_at_epoch(Epoch::new(15), &spec), + increased_cgc, + "epoch 15 should use increased cgc" + ); + assert_eq!( + custody_context.custody_group_count_at_epoch(Epoch::new(25), &spec), + final_cgc, + "epoch 25 should use final cgc" + ); + + // Verify sampling size calculation uses correct historical values + assert_eq!( + custody_context.num_of_custody_groups_to_sample(Epoch::new(5), &spec), + spec.samples_per_slot, + "sampling at epoch 5 should use spec minimum since cgc is at minimum" + ); + assert_eq!( + custody_context.num_of_custody_groups_to_sample(Epoch::new(25), &spec), + final_cgc, + "sampling at epoch 25 should match final cgc" + ); + } } diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index c937c32c68e..d6cc8d89477 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -868,6 +868,7 @@ impl MaybeAvailableBlock { mod test { use super::*; use crate::CustodyContext; + use crate::custody_context::NodeCustodyType; use crate::test_utils::{ EphemeralHarnessType, NumBlobs, generate_rand_block_and_data_columns, get_kzg, }; @@ -1201,7 +1202,7 @@ mod test { ); let kzg = get_kzg(&spec); let store = Arc::new(HotColdDB::open_ephemeral(<_>::default(), spec.clone()).unwrap()); - let custody_context = Arc::new(CustodyContext::new(false)); + let custody_context = Arc::new(CustodyContext::new(NodeCustodyType::Fullnode, &spec)); let complete_blob_backfill = false; DataAvailabilityChecker::new( complete_blob_backfill, diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 42f6dbd8567..b842a1a3f99 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -827,6 +827,7 @@ mod test { blob_verification::GossipVerifiedBlob, block_verification::PayloadVerificationOutcome, block_verification_types::{AsBlock, BlockImportData}, + custody_context::NodeCustodyType, data_availability_checker::STATE_LRU_CAPACITY, test_utils::{BaseHarnessType, BeaconChainHarness, DiskHarnessType}, }; @@ -1021,7 +1022,7 @@ mod test { let spec = harness.spec.clone(); let test_store = harness.chain.store.clone(); let capacity_non_zero = new_non_zero_usize(capacity); - let custody_context = Arc::new(CustodyContext::new(false)); + let custody_context = Arc::new(CustodyContext::new(NodeCustodyType::Fullnode, &spec)); let cache = Arc::new( DataAvailabilityCheckerInner::::new( capacity_non_zero, diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index fd2162e7d31..4ac3e54742d 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -17,6 +17,7 @@ pub mod block_verification_types; pub mod builder; pub mod canonical_head; pub mod chain_config; +pub mod custody_context; pub mod data_availability_checker; pub mod data_column_verification; mod early_attester_cache; @@ -55,7 +56,6 @@ pub mod summaries_dag; pub mod sync_committee_rewards; pub mod sync_committee_verification; pub mod test_utils; -pub mod validator_custody; pub mod validator_monitor; pub mod validator_pubkey_cache; @@ -84,6 +84,7 @@ pub use block_verification::{ pub use block_verification_types::AvailabilityPendingExecutedBlock; pub use block_verification_types::ExecutedBlock; pub use canonical_head::{CachedHead, CanonicalHead, CanonicalHeadRwLock}; +pub use custody_context::CustodyContext; pub use events::ServerSentEventHandler; pub use execution_layer::EngineState; pub use execution_payload::NotifyExecutionLayer; @@ -99,4 +100,3 @@ pub use state_processing::per_block_processing::errors::{ }; pub use store; pub use types; -pub use validator_custody::CustodyContext; diff --git a/beacon_node/beacon_chain/src/persisted_custody.rs b/beacon_node/beacon_chain/src/persisted_custody.rs index b685ea36b75..ba221c67b5f 100644 --- a/beacon_node/beacon_chain/src/persisted_custody.rs +++ b/beacon_node/beacon_chain/src/persisted_custody.rs @@ -1,4 +1,4 @@ -use crate::validator_custody::CustodyContextSsz; +use crate::custody_context::CustodyContextSsz; use ssz::{Decode, Encode}; use std::sync::Arc; use store::{DBColumn, Error as StoreError, HotColdDB, ItemStore, StoreItem}; diff --git a/beacon_node/beacon_chain/src/schema_change/migration_schema_v26.rs b/beacon_node/beacon_chain/src/schema_change/migration_schema_v26.rs index 661d015942e..38714ea0605 100644 --- a/beacon_node/beacon_chain/src/schema_change/migration_schema_v26.rs +++ b/beacon_node/beacon_chain/src/schema_change/migration_schema_v26.rs @@ -1,6 +1,6 @@ use crate::BeaconChainTypes; +use crate::custody_context::CustodyContextSsz; use crate::persisted_custody::{CUSTODY_DB_KEY, PersistedCustody}; -use crate::validator_custody::CustodyContextSsz; use ssz::{Decode, Encode}; use ssz_derive::{Decode, Encode}; use std::sync::Arc; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 0b125efa32d..38797d0264d 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -1,5 +1,6 @@ use crate::blob_verification::GossipVerifiedBlob; use crate::block_verification_types::{AsBlock, RpcBlock}; +use crate::custody_context::NodeCustodyType; use crate::data_column_verification::CustodyDataColumn; use crate::kzg_utils::build_data_column_sidecars; use crate::observed_operations::ObservationOutcome; @@ -210,7 +211,7 @@ pub struct Builder { testing_slot_clock: Option, validator_monitor_config: Option, genesis_state_builder: Option>, - import_all_data_columns: bool, + node_custody_type: NodeCustodyType, runtime: TestRuntime, } @@ -356,7 +357,7 @@ where testing_slot_clock: None, validator_monitor_config: None, genesis_state_builder: None, - import_all_data_columns: false, + node_custody_type: NodeCustodyType::Fullnode, runtime, } } @@ -442,8 +443,8 @@ where self } - pub fn import_all_data_columns(mut self, import_all_data_columns: bool) -> Self { - self.import_all_data_columns = import_all_data_columns; + pub fn node_custody_type(mut self, node_custody_type: NodeCustodyType) -> Self { + self.node_custody_type = node_custody_type; self } @@ -565,7 +566,7 @@ where .execution_layer(self.execution_layer) .shutdown_sender(shutdown_tx) .chain_config(chain_config) - .import_all_data_columns(self.import_all_data_columns) + .node_custody_type(self.node_custody_type) .event_handler(Some(ServerSentEventHandler::new_with_capacity(5))) .validator_monitor_config(validator_monitor_config) .rng(Box::new(StdRng::seed_from_u64(42))); diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 47f5be02cb4..7dfef50ea11 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -4,6 +4,7 @@ use beacon_chain::block_verification_types::{AsBlock, ExecutedBlock, RpcBlock}; use beacon_chain::data_column_verification::CustodyDataColumn; use beacon_chain::{ AvailabilityProcessingStatus, BeaconChain, BeaconChainTypes, ExecutionPendingBlock, + custody_context::NodeCustodyType, test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, test_spec, }, @@ -45,7 +46,7 @@ async fn get_chain_segment() -> (Vec>, Vec (Vec>, Vec BeaconChainHarness> { let harness = BeaconChainHarness::builder(MainnetEthSpec) .default_spec() @@ -115,7 +116,7 @@ fn get_harness( ..ChainConfig::default() }) .keypairs(KEYPAIRS[0..validator_count].to_vec()) - .import_all_data_columns(supernode) + .node_custody_type(node_custody_type) .fresh_ephemeral_store() .mock_execution_layer() .build(); @@ -259,7 +260,7 @@ fn update_data_column_signed_header( #[tokio::test] async fn chain_segment_full_segment() { - let harness = get_harness(VALIDATOR_COUNT, false); + let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode); let (chain_segment, chain_segment_blobs) = get_chain_segment().await; let blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) .into_iter() @@ -297,7 +298,7 @@ async fn chain_segment_full_segment() { #[tokio::test] async fn chain_segment_varying_chunk_size() { for chunk_size in &[1, 2, 3, 5, 31, 32, 33, 42] { - let harness = get_harness(VALIDATOR_COUNT, false); + let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode); let (chain_segment, chain_segment_blobs) = get_chain_segment().await; let blocks: Vec> = chain_segment_blocks(&chain_segment, &chain_segment_blobs) .into_iter() @@ -329,7 +330,7 @@ async fn chain_segment_varying_chunk_size() { #[tokio::test] async fn chain_segment_non_linear_parent_roots() { - let harness = get_harness(VALIDATOR_COUNT, false); + let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode); let (chain_segment, chain_segment_blobs) = get_chain_segment().await; harness @@ -386,7 +387,7 @@ async fn chain_segment_non_linear_parent_roots() { #[tokio::test] async fn chain_segment_non_linear_slots() { - let harness = get_harness(VALIDATOR_COUNT, false); + let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode); let (chain_segment, chain_segment_blobs) = get_chain_segment().await; harness .chain @@ -528,7 +529,7 @@ async fn assert_invalid_signature( async fn get_invalid_sigs_harness( chain_segment: &[BeaconSnapshot], ) -> BeaconChainHarness> { - let harness = get_harness(VALIDATOR_COUNT, false); + let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode); harness .chain .slot_clock @@ -986,7 +987,7 @@ fn unwrap_err(result: Result) -> U { #[tokio::test] async fn block_gossip_verification() { - let harness = get_harness(VALIDATOR_COUNT, false); + let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode); let (chain_segment, chain_segment_blobs) = get_chain_segment().await; let block_index = CHAIN_SEGMENT_LENGTH - 2; @@ -1389,7 +1390,7 @@ async fn verify_block_for_gossip_slashing_detection() { #[tokio::test] async fn verify_block_for_gossip_doppelganger_detection() { - let harness = get_harness(VALIDATOR_COUNT, false); + let harness = get_harness(VALIDATOR_COUNT, NodeCustodyType::Fullnode); let state = harness.get_current_state(); let ((block, _), _) = harness.make_block(state.clone(), Slot::new(1)).await; diff --git a/beacon_node/beacon_chain/tests/column_verification.rs b/beacon_node/beacon_chain/tests/column_verification.rs index 5cd3811ea52..229ae1e1998 100644 --- a/beacon_node/beacon_chain/tests/column_verification.rs +++ b/beacon_node/beacon_chain/tests/column_verification.rs @@ -1,5 +1,6 @@ #![cfg(not(debug_assertions))] +use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, EphemeralHarnessType, generate_data_column_sidecars_from_block, test_spec, @@ -24,7 +25,7 @@ static KEYPAIRS: LazyLock> = fn get_harness( validator_count: usize, spec: Arc, - supernode: bool, + node_custody_type: NodeCustodyType, ) -> BeaconChainHarness> { create_test_tracing_subscriber(); let harness = BeaconChainHarness::builder(MainnetEthSpec) @@ -34,7 +35,7 @@ fn get_harness( ..ChainConfig::default() }) .keypairs(KEYPAIRS[0..validator_count].to_vec()) - .import_all_data_columns(supernode) + .node_custody_type(node_custody_type) .fresh_ephemeral_store() .mock_execution_layer() .build(); @@ -54,8 +55,7 @@ async fn rpc_columns_with_invalid_header_signature() { return; } - let supernode = true; - let harness = get_harness(VALIDATOR_COUNT, spec, supernode); + let harness = get_harness(VALIDATOR_COUNT, spec, NodeCustodyType::Supernode); let num_blocks = E::slots_per_epoch() as usize; diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 69d16b30710..53e841692e6 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -3,6 +3,7 @@ use beacon_chain::attestation_verification::Error as AttnError; use beacon_chain::block_verification_types::RpcBlock; use beacon_chain::builder::BeaconChainBuilder; +use beacon_chain::custody_context::CUSTODY_CHANGE_DA_EFFECTIVE_DELAY_SECONDS; use beacon_chain::data_availability_checker::AvailableBlock; use beacon_chain::historical_data_columns::HistoricalDataColumnError; use beacon_chain::schema_change::migrate_schema; @@ -11,13 +12,13 @@ use beacon_chain::test_utils::{ AttestationStrategy, BeaconChainHarness, BlockStrategy, DiskHarnessType, get_kzg, mock_execution_layer_from_parts, test_spec, }; -use beacon_chain::validator_custody::CUSTODY_CHANGE_DA_EFFECTIVE_DELAY_SECONDS; use beacon_chain::{ BeaconChain, BeaconChainError, BeaconChainTypes, BeaconSnapshot, BlockError, ChainConfig, NotifyExecutionLayer, ServerSentEventHandler, WhenSlotSkipped, beacon_proposer_cache::{ compute_proposer_duties_from_head, ensure_state_can_determine_proposers_for_epoch, }, + custody_context::NodeCustodyType, data_availability_checker::MaybeAvailableBlock, historical_blocks::HistoricalBlockError, migrate::MigratorConfig, @@ -98,7 +99,12 @@ fn get_harness( reconstruct_historic_states: true, ..ChainConfig::default() }; - get_harness_generic(store, validator_count, chain_config, false) + get_harness_generic( + store, + validator_count, + chain_config, + NodeCustodyType::Fullnode, + ) } fn get_harness_import_all_data_columns( @@ -110,14 +116,19 @@ fn get_harness_import_all_data_columns( reconstruct_historic_states: true, ..ChainConfig::default() }; - get_harness_generic(store, validator_count, chain_config, true) + get_harness_generic( + store, + validator_count, + chain_config, + NodeCustodyType::Supernode, + ) } fn get_harness_generic( store: Arc, BeaconNodeBackend>>, validator_count: usize, chain_config: ChainConfig, - import_all_data_columns: bool, + node_custody_type: NodeCustodyType, ) -> TestHarness { let harness = TestHarness::builder(MinimalEthSpec) .spec(store.get_chain_spec().clone()) @@ -125,7 +136,7 @@ fn get_harness_generic( .fresh_disk_store(store) .mock_execution_layer() .chain_config(chain_config) - .import_all_data_columns(import_all_data_columns) + .node_custody_type(node_custody_type) .build(); harness.advance_slot(); harness @@ -3420,7 +3431,12 @@ async fn process_blocks_and_attestations_for_unaligned_checkpoint() { reconstruct_historic_states: false, ..ChainConfig::default() }; - let harness = get_harness_generic(store.clone(), LOW_VALIDATOR_COUNT, chain_config, false); + let harness = get_harness_generic( + store.clone(), + LOW_VALIDATOR_COUNT, + chain_config, + NodeCustodyType::Fullnode, + ); let all_validators = (0..LOW_VALIDATOR_COUNT).collect::>(); @@ -3839,14 +3855,13 @@ async fn schema_downgrade_to_min_version( reconstruct_historic_states, ..ChainConfig::default() }; - let import_all_data_columns = false; let store = get_store_generic(&db_path, store_config.clone(), spec.clone()); let harness = get_harness_generic( store.clone(), LOW_VALIDATOR_COUNT, chain_config.clone(), - import_all_data_columns, + NodeCustodyType::Fullnode, ); harness @@ -4862,14 +4877,13 @@ async fn ancestor_state_root_prior_to_split() { reconstruct_historic_states: false, ..ChainConfig::default() }; - let import_all_data_columns = false; let store = get_store_generic(&db_path, store_config, spec); let harness = get_harness_generic( store.clone(), LOW_VALIDATOR_COUNT, chain_config, - import_all_data_columns, + NodeCustodyType::Fullnode, ); // Produce blocks until we have passed through two full snapshot periods. This period length is @@ -4956,14 +4970,13 @@ async fn replay_from_split_state() { reconstruct_historic_states: false, ..ChainConfig::default() }; - let import_all_data_columns = false; let store = get_store_generic(&db_path, store_config.clone(), spec.clone()); let harness = get_harness_generic( store.clone(), LOW_VALIDATOR_COUNT, chain_config, - import_all_data_columns, + NodeCustodyType::Fullnode, ); // Produce blocks until we finalize epoch 3 which will not be stored as a snapshot. diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 02c042bf282..c3c827f0aae 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -202,7 +202,7 @@ where .beacon_graffiti(beacon_graffiti) .event_handler(event_handler) .execution_layer(execution_layer) - .import_all_data_columns(config.network.subscribe_all_data_column_subnets) + .node_custody_type(config.chain.node_custody_type) .validator_monitor_config(config.validator_monitor.clone()) .rng(Box::new( StdRng::try_from_rng(&mut OsRng) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 7c2282a488c..dc2fd4ae440 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1,3 +1,4 @@ +use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::test_utils::RelativeSyncCommittee; use beacon_chain::{ BeaconChain, ChainConfig, StateSkipConfig, WhenSlotSkipped, @@ -90,7 +91,7 @@ struct ApiTester { struct ApiTesterConfig { spec: ChainSpec, retain_historic_states: bool, - import_all_data_columns: bool, + node_custody_type: NodeCustodyType, } impl Default for ApiTesterConfig { @@ -100,7 +101,7 @@ impl Default for ApiTesterConfig { Self { spec, retain_historic_states: false, - import_all_data_columns: false, + node_custody_type: NodeCustodyType::Fullnode, } } } @@ -139,7 +140,7 @@ impl ApiTester { .deterministic_withdrawal_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() .mock_execution_layer() - .import_all_data_columns(config.import_all_data_columns) + .node_custody_type(config.node_custody_type) .build(); harness @@ -7842,8 +7843,7 @@ async fn get_blobs_post_fulu_supernode() { let mut config = ApiTesterConfig { retain_historic_states: false, spec: E::default_spec(), - // For supernode, we import all data columns - import_all_data_columns: true, + node_custody_type: NodeCustodyType::Supernode, }; config.spec.altair_fork_epoch = Some(Epoch::new(0)); config.spec.bellatrix_fork_epoch = Some(Epoch::new(0)); diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index a3aef8f8029..a9794cb5c42 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -9,6 +9,7 @@ use crate::{ sync::{SyncMessage, manager::BlockProcessType}, }; use beacon_chain::block_verification_types::RpcBlock; +use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::data_column_verification::validate_data_column_sidecar_for_gossip; use beacon_chain::kzg_utils::blobs_to_data_column_sidecars; use beacon_chain::observed_data_sidecars::DoNotObserve; @@ -94,20 +95,32 @@ impl TestRig { // This allows for testing voluntary exits without building out a massive chain. let mut spec = test_spec::(); spec.shard_committee_period = 2; - Self::new_parametric(chain_length, BeaconProcessorConfig::default(), false, spec).await + Self::new_parametric( + chain_length, + BeaconProcessorConfig::default(), + NodeCustodyType::Fullnode, + spec, + ) + .await } pub async fn new_supernode(chain_length: u64) -> Self { // This allows for testing voluntary exits without building out a massive chain. let mut spec = test_spec::(); spec.shard_committee_period = 2; - Self::new_parametric(chain_length, BeaconProcessorConfig::default(), true, spec).await + Self::new_parametric( + chain_length, + BeaconProcessorConfig::default(), + NodeCustodyType::Supernode, + spec, + ) + .await } pub async fn new_parametric( chain_length: u64, beacon_processor_config: BeaconProcessorConfig, - import_data_columns: bool, + node_custody_type: NodeCustodyType, spec: ChainSpec, ) -> Self { let spec = Arc::new(spec); @@ -116,7 +129,7 @@ impl TestRig { .deterministic_keypairs(VALIDATOR_COUNT) .fresh_ephemeral_store() .mock_execution_layer() - .import_all_data_columns(import_data_columns) + .node_custody_type(node_custody_type) .chain_config(<_>::default()) .build(); @@ -1610,7 +1623,7 @@ async fn test_backfill_sync_processing_rate_limiting_disabled() { let mut rig = TestRig::new_parametric( SMALL_CHAIN, beacon_processor_config, - false, + NodeCustodyType::Fullnode, test_spec::(), ) .await; @@ -1692,7 +1705,13 @@ async fn test_blobs_by_range_spans_fulu_fork() { spec.fulu_fork_epoch = Some(Epoch::new(1)); spec.gloas_fork_epoch = Some(Epoch::new(2)); - let mut rig = TestRig::new_parametric(64, BeaconProcessorConfig::default(), false, spec).await; + let mut rig = TestRig::new_parametric( + 64, + BeaconProcessorConfig::default(), + NodeCustodyType::Fullnode, + spec, + ) + .await; let start_slot = 16; // This will span from epoch 0 (Electra) to epoch 1 (Fulu) diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 28f355151dc..e4c7c6ff1fe 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -59,6 +59,18 @@ pub fn cli_app() -> Command { helps network resilience by serving all data columns to syncing peers.") .display_order(0) ) + .arg( + Arg::new("semi-supernode") + .long("semi-supernode") + .action(ArgAction::SetTrue) + .help_heading(FLAG_HEADER) + .conflicts_with("supernode") + .help("Run in minimal reconstruction mode. This node will subscribe to and custody \ + half of the data columns (enough for reconstruction), enabling efficient \ + data availability with lower bandwidth and storage requirements compared to \ + a supernode, while still supporting full blob reconstruction.") + .display_order(0) + ) .arg( Arg::new("malicious-withhold-count") .long("malicious-withhold-count") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index acb392779fd..3b0e80e0b7d 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -4,6 +4,7 @@ use beacon_chain::chain_config::{ DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_RE_ORG_PARENT_THRESHOLD, DisallowedReOrgOffsets, INVALID_HOLESKY_BLOCK_ROOT, ReOrgThreshold, }; +use beacon_chain::custody_context::NodeCustodyType; use beacon_chain::graffiti_calculator::GraffitiOrigin; use clap::{ArgMatches, Id, parser::ValueSource}; use clap_utils::flags::DISABLE_MALLOC_TUNING_FLAG; @@ -108,6 +109,19 @@ pub fn get_config( set_network_config(&mut client_config.network, cli_args, &data_dir_ref)?; + // Parse custody mode from CLI flags + let is_supernode = parse_flag(cli_args, "supernode"); + let is_semi_supernode = parse_flag(cli_args, "semi-supernode"); + + client_config.chain.node_custody_type = if is_supernode { + client_config.network.subscribe_all_data_column_subnets = true; + NodeCustodyType::Supernode + } else if is_semi_supernode { + NodeCustodyType::SemiSupernode + } else { + NodeCustodyType::Fullnode + }; + /* * Staking flag * Note: the config values set here can be overwritten by other more specific cli params @@ -1136,10 +1150,6 @@ pub fn set_network_config( config.network_dir = data_dir.join(DEFAULT_NETWORK_DIR); }; - if parse_flag(cli_args, "supernode") { - config.subscribe_all_data_column_subnets = true; - } - if parse_flag(cli_args, "subscribe-all-subnets") { config.subscribe_all_subnets = true; } diff --git a/book/src/help_bn.md b/book/src/help_bn.md index 6680202a277..5f3c43a7e42 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -552,6 +552,12 @@ Flags: When present, Lighthouse will forget the payload statuses of any already-imported blocks. This can assist in the recovery from a consensus failure caused by the execution layer. + --semi-supernode + Run in minimal reconstruction mode. This node will subscribe to and + custody half of the data columns (enough for reconstruction), enabling + efficient data availability with lower bandwidth and storage + requirements compared to a supernode, while still supporting full blob + reconstruction. --shutdown-after-sync Shutdown beacon node as soon as sync is completed. Backfill sync will not be performed before shutdown. diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 5a057d7d7f8..8342b021738 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -4,6 +4,7 @@ use beacon_node::beacon_chain::chain_config::{ DEFAULT_RE_ORG_MAX_EPOCHS_SINCE_FINALIZATION, DEFAULT_SYNC_TOLERANCE_EPOCHS, DisallowedReOrgOffsets, }; +use beacon_node::beacon_chain::custody_context::NodeCustodyType; use beacon_node::{ ClientConfig as Config, beacon_chain::graffiti_calculator::GraffitiOrigin, beacon_chain::store::config::DatabaseBackend as BeaconNodeBackend, @@ -782,20 +783,38 @@ fn network_subscribe_all_data_column_subnets_flag() { CommandLineTest::new() .flag("subscribe-all-data-column-subnets", None) .run_with_zero_port() - .with_config(|config| assert!(config.network.subscribe_all_data_column_subnets)); + .with_config(|config| { + assert_eq!(config.chain.node_custody_type, NodeCustodyType::Supernode) + }); } #[test] fn network_supernode_flag() { CommandLineTest::new() .flag("supernode", None) .run_with_zero_port() - .with_config(|config| assert!(config.network.subscribe_all_data_column_subnets)); + .with_config(|config| { + assert_eq!(config.chain.node_custody_type, NodeCustodyType::Supernode) + }); +} +#[test] +fn network_semi_supernode_flag() { + CommandLineTest::new() + .flag("semi-supernode", None) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.node_custody_type, + NodeCustodyType::SemiSupernode + ) + }); } #[test] -fn network_subscribe_all_data_column_subnets_default() { +fn network_node_custody_type_default() { CommandLineTest::new() .run_with_zero_port() - .with_config(|config| assert!(!config.network.subscribe_all_data_column_subnets)); + .with_config(|config| { + assert_eq!(config.chain.node_custody_type, NodeCustodyType::Fullnode) + }); } #[test] fn blob_publication_batches() { diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 1380e44acdd..47b99023455 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -16,6 +16,7 @@ use beacon_chain::{ VerifiedAttestation, obtain_indexed_attestation_and_committees_per_slot, }, blob_verification::GossipVerifiedBlob, + custody_context::NodeCustodyType, test_utils::{BeaconChainHarness, EphemeralHarnessType}, }; use execution_layer::{PayloadStatusV1, json_structures::JsonPayloadStatusV1Status}; @@ -436,7 +437,7 @@ impl Tester { .genesis_state_ephemeral_store(case.anchor_state.clone()) .mock_execution_layer() .recalculate_fork_times_with_genesis(0) - .import_all_data_columns(true) + .node_custody_type(NodeCustodyType::Supernode) .mock_execution_layer_all_payloads_valid() .build();