Skip to content

Commit 9acb1cf

Browse files
committed
Fix one schema test and add a new one that's broken 😎
1 parent 78d0496 commit 9acb1cf

File tree

4 files changed

+149
-20
lines changed

4 files changed

+149
-20
lines changed

beacon_node/beacon_chain/src/beacon_chain.rs

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6778,13 +6778,21 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
67786778
#[allow(clippy::type_complexity)]
67796779
pub fn chain_dump(
67806780
&self,
6781+
) -> Result<Vec<BeaconSnapshot<T::EthSpec, BlindedPayload<T::EthSpec>>>, Error> {
6782+
self.chain_dump_from_slot(Slot::new(0))
6783+
}
6784+
6785+
/// As for `chain_dump` but dumping only the portion of the chain newer than `from_slot`.
6786+
pub fn chain_dump_from_slot(
6787+
&self,
6788+
from_slot: Slot,
67816789
) -> Result<Vec<BeaconSnapshot<T::EthSpec, BlindedPayload<T::EthSpec>>>, Error> {
67826790
let mut dump = vec![];
67836791

67846792
let mut prev_block_root = None;
67856793
let mut prev_beacon_state = None;
67866794

6787-
for res in self.forwards_iter_block_roots(Slot::new(0))? {
6795+
for res in self.forwards_iter_block_roots(from_slot)? {
67886796
let (beacon_block_root, _) = res?;
67896797

67906798
// Do not include snapshots at skipped slots.

beacon_node/beacon_chain/src/schema_change/migration_schema_v24.rs

Lines changed: 21 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -177,6 +177,14 @@ pub fn upgrade_to_v24<T: BeaconChainTypes>(
177177
// If the node is already an archive node, we can set the anchor slot to 0 and copy
178178
// snapshots and diffs from the freezer DB to the hot DB in order to establish an initial
179179
// hot grid that is aligned/"perfect" (no `start_slot`/`anchor_slot` to worry about).
180+
//
181+
// This only works if all of the following are true:
182+
//
183+
// - We have the previous snapshot for the split state stored in the freezer DB, i.e.
184+
// if `previous_snapshot_slot >= state_upper_limit`.
185+
// - The split state itself will be stored as a diff or snapshot in the new grid. We choose
186+
// not to support a split state that requires block replay, because computing its previous
187+
// state root from the DAG is not straight-forward.
180188
let dummy_start_slot = Slot::new(0);
181189
let closest_layer_points = db
182190
.hierarchy
@@ -191,9 +199,12 @@ pub fn upgrade_to_v24<T: BeaconChainTypes>(
191199
"closest_layer_points must not be empty".to_string(),
192200
))?;
193201

194-
// If we have the previous snapshot stored in the freezer DB, then we can use this
195-
// optimisation.
196-
if previous_snapshot_slot >= anchor_info.state_upper_limit {
202+
if previous_snapshot_slot >= anchor_info.state_upper_limit
203+
&& db
204+
.hierarchy
205+
.storage_strategy(split.slot, dummy_start_slot)
206+
.is_ok_and(|strategy| !strategy.is_replay_from())
207+
{
197208
info!(
198209
%previous_snapshot_slot,
199210
split_slot = %split.slot,
@@ -310,11 +321,13 @@ pub fn upgrade_to_v24<T: BeaconChainTypes>(
310321
StorageStrategy::DiffFrom(_) | StorageStrategy::Snapshot => {
311322
// We choose to not compute states during the epoch with block replay for
312323
// simplicity. Therefore we can only support a hot heriarchy config where the
313-
// lowest layer is >= 5.
324+
// lowest layer is >= log2(SLOTS_PER_EPOCH).
325+
// FIXME(tree-states): we need to remove this restriction.
314326
if slot % T::EthSpec::slots_per_epoch() != 0 {
315-
return Err(Error::MigrationError(
316-
"Hot hierarchy config lowest value must be >= 5".to_owned(),
317-
));
327+
return Err(Error::MigrationError(format!(
328+
"Hot hierarchy config lowest value must be >= {}",
329+
T::EthSpec::slots_per_epoch().trailing_zeros()
330+
)));
318331
}
319332

320333
// Load the full state and re-store it as a snapshot or diff.
@@ -344,7 +357,7 @@ pub fn upgrade_to_v24<T: BeaconChainTypes>(
344357
// 2. Convert the summary to the new format.
345358
if state_root == split.state_root {
346359
return Err(Error::MigrationError(
347-
"unreachable: split state should be stored as a snapshot"
360+
"unreachable: split state should be stored as a snapshot or diff"
348361
.to_string(),
349362
));
350363
}

beacon_node/beacon_chain/tests/store_tests.rs

Lines changed: 107 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -3056,12 +3056,27 @@ async fn revert_minority_fork_on_resume() {
30563056
// version is correct. This is the easiest schema test to write without historic versions of
30573057
// Lighthouse on-hand, but has the disadvantage that the min version needs to be adjusted manually
30583058
// as old downgrades are deprecated.
3059-
#[tokio::test]
3060-
async fn schema_downgrade_to_min_version() {
3059+
async fn schema_downgrade_to_min_version(
3060+
store_config: StoreConfig,
3061+
reconstruct_historic_states: bool,
3062+
) {
30613063
let num_blocks_produced = E::slots_per_epoch() * 4;
30623064
let db_path = tempdir().unwrap();
3063-
let store = get_store(&db_path);
3064-
let harness = get_harness(store.clone(), LOW_VALIDATOR_COUNT);
3065+
let spec = test_spec::<E>();
3066+
3067+
let chain_config = ChainConfig {
3068+
reconstruct_historic_states,
3069+
..ChainConfig::default()
3070+
};
3071+
let import_all_data_columns = false;
3072+
3073+
let store = get_store_generic(&db_path, store_config.clone(), spec.clone());
3074+
let harness = get_harness_generic(
3075+
store.clone(),
3076+
LOW_VALIDATOR_COUNT,
3077+
chain_config.clone(),
3078+
import_all_data_columns,
3079+
);
30653080

30663081
harness
30673082
.extend_chain(
@@ -3082,7 +3097,7 @@ async fn schema_downgrade_to_min_version() {
30823097
drop(harness);
30833098

30843099
// Re-open the store.
3085-
let store = get_store(&db_path);
3100+
let store = get_store_generic(&db_path, store_config, spec);
30863101

30873102
// Downgrade.
30883103
migrate_schema::<DiskHarnessType<E>>(
@@ -3105,16 +3120,28 @@ async fn schema_downgrade_to_min_version() {
31053120
// Recreate the harness.
31063121
let harness = BeaconChainHarness::builder(MinimalEthSpec)
31073122
.default_spec()
3123+
.chain_config(chain_config)
31083124
.keypairs(KEYPAIRS[0..LOW_VALIDATOR_COUNT].to_vec())
31093125
.testing_slot_clock(slot_clock)
31103126
.resumed_disk_store(store.clone())
31113127
.mock_execution_layer()
31123128
.build();
31133129

3130+
// Check chain dump for appropriate range depending on whether this is an archive node.
3131+
let chain_dump_start_slot = if reconstruct_historic_states {
3132+
Slot::new(0)
3133+
} else {
3134+
store.get_split_slot()
3135+
};
3136+
31143137
check_finalization(&harness, num_blocks_produced);
31153138
check_split_slot(&harness, store.clone());
3116-
check_chain_dump(&harness, num_blocks_produced + 1);
3117-
check_iterators(&harness);
3139+
check_chain_dump_from_slot(
3140+
&harness,
3141+
chain_dump_start_slot,
3142+
num_blocks_produced + 1 - chain_dump_start_slot.as_u64(),
3143+
);
3144+
check_iterators_from_slot(&harness, chain_dump_start_slot);
31183145

31193146
// Check that downgrading beyond the minimum version fails (bound is *tight*).
31203147
let min_version_sub_1 = SchemaVersion(min_version.as_u64().checked_sub(1).unwrap());
@@ -3127,6 +3154,67 @@ async fn schema_downgrade_to_min_version() {
31273154
.expect_err("should not downgrade below minimum version");
31283155
}
31293156

3157+
// Schema upgrade/downgrade on an archive node where the optimised migration does apply due
3158+
// to the split state being aligned to a diff layer.
3159+
#[tokio::test]
3160+
async fn schema_downgrade_to_min_version_archive_node_grid_aligned() {
3161+
// Need to use 3 as the hierarchy exponent to get diffs on every epoch boundary with minimal
3162+
// spec.
3163+
schema_downgrade_to_min_version(
3164+
StoreConfig {
3165+
hierarchy_config: HierarchyConfig::from_str("3,4,5").unwrap(),
3166+
prune_payloads: false,
3167+
..StoreConfig::default()
3168+
},
3169+
true,
3170+
)
3171+
.await
3172+
}
3173+
3174+
// Schema upgrade/downgrade on an archive node where the optimised migration DOES NOT apply
3175+
// due to the split state NOT being aligned to a diff layer.
3176+
#[tokio::test]
3177+
async fn schema_downgrade_to_min_version_archive_node_grid_unaligned() {
3178+
schema_downgrade_to_min_version(
3179+
StoreConfig {
3180+
hierarchy_config: HierarchyConfig::from_str("7").unwrap(),
3181+
prune_payloads: false,
3182+
..StoreConfig::default()
3183+
},
3184+
true,
3185+
)
3186+
.await
3187+
}
3188+
3189+
// Schema upgrade/downgrade on a full node with a fairly normal per-epoch diff config.
3190+
#[tokio::test]
3191+
async fn schema_downgrade_to_min_version_full_node_per_epoch_diffs() {
3192+
schema_downgrade_to_min_version(
3193+
StoreConfig {
3194+
hierarchy_config: HierarchyConfig::from_str("3,4,5").unwrap(),
3195+
prune_payloads: false,
3196+
..StoreConfig::default()
3197+
},
3198+
false,
3199+
)
3200+
.await
3201+
}
3202+
3203+
// Schema upgrade/downgrade on a full node with dense per-slot diffs.
3204+
// FIXME(tree-states): this will panic
3205+
#[tokio::test]
3206+
async fn schema_downgrade_to_min_version_full_node_dense_diffs() {
3207+
schema_downgrade_to_min_version(
3208+
StoreConfig {
3209+
hierarchy_config: HierarchyConfig::from_str("0,3,4,5").unwrap(),
3210+
prune_payloads: false,
3211+
..StoreConfig::default()
3212+
},
3213+
true,
3214+
)
3215+
.await
3216+
}
3217+
31303218
/// Check that blob pruning prunes blobs older than the data availability boundary.
31313219
#[tokio::test]
31323220
async fn deneb_prune_blobs_happy_case() {
@@ -3700,7 +3788,11 @@ fn check_split_slot(
37003788

37013789
/// Check that all the states in a chain dump have the correct tree hash.
37023790
fn check_chain_dump(harness: &TestHarness, expected_len: u64) {
3703-
let mut chain_dump = harness.chain.chain_dump().unwrap();
3791+
check_chain_dump_from_slot(harness, Slot::new(0), expected_len)
3792+
}
3793+
3794+
fn check_chain_dump_from_slot(harness: &TestHarness, from_slot: Slot, expected_len: u64) {
3795+
let mut chain_dump = harness.chain.chain_dump_from_slot(from_slot).unwrap();
37043796

37053797
assert_eq!(chain_dump.len() as u64, expected_len);
37063798

@@ -3748,7 +3840,7 @@ fn check_chain_dump(harness: &TestHarness, expected_len: u64) {
37483840

37493841
let mut forward_block_roots = harness
37503842
.chain
3751-
.forwards_iter_block_roots(Slot::new(0))
3843+
.forwards_iter_block_roots(from_slot)
37523844
.expect("should get iter")
37533845
.map(Result::unwrap)
37543846
.collect::<Vec<_>>();
@@ -3769,10 +3861,14 @@ fn check_chain_dump(harness: &TestHarness, expected_len: u64) {
37693861
/// Check that every state from the canonical chain is in the database, and that the
37703862
/// reverse state and block root iterators reach genesis.
37713863
fn check_iterators(harness: &TestHarness) {
3864+
check_iterators_from_slot(harness, Slot::new(0))
3865+
}
3866+
3867+
fn check_iterators_from_slot(harness: &TestHarness, slot: Slot) {
37723868
let mut max_slot = None;
37733869
for (state_root, slot) in harness
37743870
.chain
3775-
.forwards_iter_state_roots(Slot::new(0))
3871+
.forwards_iter_state_roots(slot)
37763872
.expect("should get iter")
37773873
.map(Result::unwrap)
37783874
{
@@ -3794,7 +3890,7 @@ fn check_iterators(harness: &TestHarness) {
37943890
assert_eq!(
37953891
harness
37963892
.chain
3797-
.forwards_iter_block_roots(Slot::new(0))
3893+
.forwards_iter_block_roots(slot)
37983894
.expect("should get iter")
37993895
.last()
38003896
.map(Result::unwrap)

beacon_node/store/src/hdiff.rs

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -788,6 +788,18 @@ impl StorageStrategy {
788788
Self::Snapshot => None,
789789
}
790790
}
791+
792+
pub fn is_replay_from(&self) -> bool {
793+
matches!(self, Self::ReplayFrom(_))
794+
}
795+
796+
pub fn is_diff_from(&self) -> bool {
797+
matches!(self, Self::DiffFrom(_))
798+
}
799+
800+
pub fn is_snapshot(&self) -> bool {
801+
matches!(self, Self::Snapshot)
802+
}
791803
}
792804

793805
#[cfg(test)]

0 commit comments

Comments
 (0)