Skip to content

Commit 7c39d17

Browse files
committed
Fix rebase
1 parent 7dcbac0 commit 7c39d17

File tree

3 files changed

+59
-39
lines changed

3 files changed

+59
-39
lines changed

beacon_node/network/src/sync/range_sync/chain.rs

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -412,14 +412,15 @@ impl<T: BeaconChainTypes> SyncingChain<T> {
412412
BatchState::AwaitingProcessing(..) => {
413413
return self.process_batch(network, self.processing_target);
414414
}
415-
BatchState::Downloading(..) | BatchState::AwaitingDownload => {
415+
BatchState::Downloading(..) => {
416416
// Batch is not ready, nothing to process
417-
// A batch may remain in AwaitingDownload if it doesn't have enough peers yet
418417
}
419418
BatchState::Poisoned => unreachable!("Poisoned batch"),
420-
BatchState::Failed | BatchState::Processing(_) => {
419+
BatchState::Failed | BatchState::AwaitingDownload | BatchState::Processing(_) => {
421420
// these are all inconsistent states:
422421
// - Failed -> non recoverable batch. Chain should have beee removed
422+
// - AwaitingDownload -> A recoverable failed batch should have been
423+
// re-requested.
423424
// - Processing -> `self.current_processing_batch` is None
424425
return Err(RemoveChain::WrongChainState(format!(
425426
"Robust target batch indicates inconsistent chain state: {:?}",

beacon_node/network/src/sync/tests/lookups.rs

Lines changed: 1 addition & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,6 @@ use types::{
4949
const D: Duration = Duration::new(0, 0);
5050
const PARENT_FAIL_TOLERANCE: u8 = SINGLE_BLOCK_LOOKUP_MAX_ATTEMPTS;
5151
const SAMPLING_REQUIRED_SUCCESSES: usize = 2;
52-
5352
type DCByRootIds = Vec<DCByRootId>;
5453
type DCByRootId = (SyncRequestId, Vec<ColumnIndex>);
5554

@@ -390,15 +389,6 @@ impl TestRig {
390389
self.new_connected_supernode_peer();
391390
}
392391

393-
pub fn connected_peers(&self) -> Vec<PeerId> {
394-
self.network_globals
395-
.peers
396-
.read()
397-
.connected_peers()
398-
.map(|(peer, _)| *peer)
399-
.collect()
400-
}
401-
402392
fn parent_chain_processed_success(
403393
&mut self,
404394
chain_hash: Hash256,
@@ -1121,7 +1111,7 @@ impl TestRig {
11211111
}
11221112

11231113
#[track_caller]
1124-
fn expect_empty_network(&mut self) {
1114+
pub fn expect_empty_network(&mut self) {
11251115
self.drain_network_rx();
11261116
if !self.network_rx_queue.is_empty() {
11271117
let n = self.network_rx_queue.len();

beacon_node/network/src/sync/tests/range.rs

Lines changed: 54 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ impl TestRig {
7474
/// Produce a head peer with an advanced head
7575
fn add_head_peer_with_root(&mut self, head_root: Hash256) -> PeerId {
7676
let local_info = self.local_info();
77-
self.add_peer(SyncInfo {
77+
self.add_random_peer(SyncInfo {
7878
head_root,
7979
head_slot: local_info.head_slot + 1 + Slot::new(SLOT_IMPORT_TOLERANCE as u64),
8080
..local_info
@@ -90,18 +90,14 @@ impl TestRig {
9090
fn add_finalized_peer_with_root(&mut self, finalized_root: Hash256) -> PeerId {
9191
let local_info = self.local_info();
9292
let finalized_epoch = local_info.finalized_epoch + 2;
93-
self.add_peer(SyncInfo {
93+
self.add_random_peer(SyncInfo {
9494
finalized_epoch,
9595
finalized_root,
9696
head_slot: finalized_epoch.start_slot(E::slots_per_epoch()),
9797
head_root: Hash256::random(),
9898
})
9999
}
100100

101-
fn add_finalized_peer_advanced_by(&mut self, advanced_epochs: Epoch) -> PeerId {
102-
self.add_peer(self.finalized_remote_info_advanced_by(advanced_epochs))
103-
}
104-
105101
fn finalized_remote_info_advanced_by(&self, advanced_epochs: Epoch) -> SyncInfo {
106102
let local_info = self.local_info();
107103
let finalized_epoch = local_info.finalized_epoch + advanced_epochs;
@@ -129,7 +125,13 @@ impl TestRig {
129125
}
130126
}
131127

132-
fn add_peer(&mut self, remote_info: SyncInfo) -> PeerId {
128+
fn add_random_peer_not_supernode(&mut self, remote_info: SyncInfo) -> PeerId {
129+
let peer_id = self.new_connected_peer();
130+
self.send_sync_message(SyncMessage::AddPeer(peer_id, remote_info));
131+
peer_id
132+
}
133+
134+
fn add_random_peer(&mut self, remote_info: SyncInfo) -> PeerId {
133135
// Create valid peer known to network globals
134136
// TODO(fulu): Using supernode peers to ensure we have peer across all column
135137
// subnets for syncing. Should add tests connecting to full node peers.
@@ -139,6 +141,17 @@ impl TestRig {
139141
peer_id
140142
}
141143

144+
fn add_random_peers(&mut self, remote_info: SyncInfo, count: usize) {
145+
for _ in 0..count {
146+
let peer = self.new_connected_peer();
147+
self.add_peer(peer, remote_info.clone());
148+
}
149+
}
150+
151+
fn add_peer(&mut self, peer: PeerId, remote_info: SyncInfo) {
152+
self.send_sync_message(SyncMessage::AddPeer(peer, remote_info));
153+
}
154+
142155
fn assert_state(&self, state: RangeSyncType) {
143156
assert_eq!(
144157
self.sync_manager
@@ -544,43 +557,59 @@ fn pause_and_resume_on_ee_offline() {
544557
const EXTRA_SYNCED_EPOCHS: u64 = 2 + 1;
545558

546559
#[test]
547-
fn finalized_sync_single_peer_happy_case() {
560+
fn finalized_sync_enough_global_custody_peers_few_chain_peers() {
548561
// Run for all forks
549562
let mut r = TestRig::test_setup();
563+
// This test creates enough global custody peers to satisfy column queries but only adds few
564+
// peers to the chain
550565
r.new_connected_peers_for_peerdas();
551566

552567
let advanced_epochs: u64 = 2;
553-
let block_peer = r.add_finalized_peer_advanced_by(advanced_epochs.into());
568+
let remote_info = r.finalized_remote_info_advanced_by(advanced_epochs.into());
569+
570+
// Current priorization only sends batches to idle peers, so we need enough peers for each batch
571+
// TODO: Test this with a single peer in the chain, it should still work
572+
r.add_random_peers(
573+
remote_info,
574+
(advanced_epochs + EXTRA_SYNCED_EPOCHS) as usize,
575+
);
554576
r.assert_state(RangeSyncType::Finalized);
555577

556-
let last_epoch = 2 + EXTRA_SYNCED_EPOCHS;
557-
r.complete_and_process_range_sync_until(last_epoch, filter().peer(block_peer));
578+
let last_epoch = advanced_epochs + EXTRA_SYNCED_EPOCHS;
579+
r.complete_and_process_range_sync_until(last_epoch, filter());
558580
}
559581

560582
#[test]
561-
fn finalized_sync_initially_no_peers() {
562-
let Some(mut r) = TestRig::test_setup_after_fulu() else {
583+
fn finalized_sync_not_enough_custody_peers_on_start() {
584+
let mut r = TestRig::test_setup();
585+
// Only run post-PeerDAS
586+
if !r.fork_name.fulu_enabled() {
563587
return;
564-
};
565-
r.new_connected_peers_for_peerdas();
588+
}
566589

567590
let advanced_epochs: u64 = 2;
568591
let remote_info = r.finalized_remote_info_advanced_by(advanced_epochs.into());
569592

570593
// Unikely that the single peer we added has enough columns for us. Find a way to make this test
571594
// deterministic.
572-
let block_peer = r.new_connected_peer();
573-
r.send_sync_message(SyncMessage::AddPeer(block_peer, remote_info.clone()));
595+
r.add_random_peer_not_supernode(remote_info.clone());
574596
r.assert_state(RangeSyncType::Finalized);
575-
// Here all batches should be queued but stuck in AwaitingDownload state after not being able to
576-
// find custody peer on some specific column
577597

578-
// Now add the rest of connected peers to the chain, which includes a supernode, and so all
579-
// columns should be requested.
580-
for peer in r.connected_peers() {
581-
r.send_sync_message(SyncMessage::AddPeer(peer, remote_info.clone()));
582-
}
598+
// Because we don't have enough peers on all columns we haven't sent any request.
599+
// NOTE: There's a small chance that this single peer happens to custody exactly the set we
600+
// expect, in that case the test will fail. Find a way to make the test deterministic.
601+
r.expect_empty_network();
583602

584-
let last_epoch = 2 + EXTRA_SYNCED_EPOCHS;
603+
// Generate enough peers and supernodes to cover all custody columns
604+
r.new_connected_peers_for_peerdas();
605+
// Note: not necessary to add this peers to the chain, as we draw from the global pool
606+
// We still need to add enough peers to trigger batch downloads with idle peers. Same issue as
607+
// the test above.
608+
r.add_random_peers(
609+
remote_info,
610+
(advanced_epochs + EXTRA_SYNCED_EPOCHS - 1) as usize,
611+
);
612+
613+
let last_epoch = advanced_epochs + EXTRA_SYNCED_EPOCHS;
585614
r.complete_and_process_range_sync_until(last_epoch, filter());
586615
}

0 commit comments

Comments
 (0)