Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Commit

Permalink
Track occupied depth in backing per parachain (#5778)
Browse files Browse the repository at this point in the history
  • Loading branch information
slumber authored Jul 13, 2022
1 parent 88be445 commit 2c9296d
Show file tree
Hide file tree
Showing 3 changed files with 214 additions and 56 deletions.
39 changes: 28 additions & 11 deletions node/core/backing/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -244,12 +244,13 @@ impl ProspectiveParachainsMode {
struct ActiveLeafState {
prospective_parachains_mode: ProspectiveParachainsMode,
/// The candidates seconded at various depths under this active
/// leaf. A candidate can only be seconded when its hypothetical
/// depth under every active leaf has an empty entry in this map.
/// leaf with respect to parachain id. A candidate can only be
/// seconded when its hypothetical depth under every active leaf
/// has an empty entry in this map.
///
/// When prospective parachains are disabled, the only depth
/// which is allowed is 0.
seconded_at_depth: BTreeMap<usize, CandidateHash>,
seconded_at_depth: HashMap<ParaId, BTreeMap<usize, CandidateHash>>,
}

/// The state of the subsystem.
Expand Down Expand Up @@ -869,7 +870,7 @@ async fn handle_active_leaves_update<Context>(
// when prospective parachains are disabled is the leaf hash and 0,
// respectively. We've just learned about the leaf hash, so we cannot
// have any candidates seconded with it as a relay-parent yet.
seconded_at_depth: BTreeMap::new(),
seconded_at_depth: HashMap::new(),
},
);

Expand All @@ -895,7 +896,8 @@ async fn handle_active_leaves_update<Context>(

for (candidate_hash, para_id) in remaining_seconded {
let (tx, rx) = oneshot::channel();
membership_answers.push(rx.map_ok(move |membership| (candidate_hash, membership)));
membership_answers
.push(rx.map_ok(move |membership| (para_id, candidate_hash, membership)));

ctx.send_message(ProspectiveParachainsMessage::GetTreeMembership(
para_id,
Expand All @@ -905,7 +907,7 @@ async fn handle_active_leaves_update<Context>(
.await;
}

let mut seconded_at_depth = BTreeMap::new();
let mut seconded_at_depth = HashMap::new();
for response in membership_answers.next().await {
match response {
Err(oneshot::Canceled) => {
Expand All @@ -916,15 +918,17 @@ async fn handle_active_leaves_update<Context>(

continue
},
Ok((candidate_hash, membership)) => {
Ok((para_id, candidate_hash, membership)) => {
// This request gives membership in all fragment trees. We have some
// wasted data here, and it can be optimized if it proves
// relevant to performance.
if let Some((_, depths)) =
membership.into_iter().find(|(leaf_hash, _)| leaf_hash == &leaf.hash)
{
let para_entry: &mut BTreeMap<usize, CandidateHash> =
seconded_at_depth.entry(para_id).or_default();
for depth in depths {
seconded_at_depth.insert(depth, candidate_hash);
para_entry.insert(depth, candidate_hash);
}
}
},
Expand Down Expand Up @@ -1163,7 +1167,11 @@ async fn seconding_sanity_check<Context>(
responses.push(rx.map_ok(move |depths| (depths, head, leaf_state)).boxed());
} else {
if head == &candidate_relay_parent {
if leaf_state.seconded_at_depth.contains_key(&0) {
if leaf_state
.seconded_at_depth
.get(&candidate_para)
.map_or(false, |occupied| occupied.contains_key(&0))
{
// The leaf is already occupied.
return SecondingAllowed::No
}
Expand All @@ -1188,7 +1196,11 @@ async fn seconding_sanity_check<Context>(
},
Ok((depths, head, leaf_state)) => {
for depth in &depths {
if leaf_state.seconded_at_depth.contains_key(&depth) {
if leaf_state
.seconded_at_depth
.get(&candidate_para)
.map_or(false, |occupied| occupied.contains_key(&depth))
{
gum::debug!(
target: LOG_TARGET,
?candidate_hash,
Expand Down Expand Up @@ -1323,8 +1335,13 @@ async fn handle_validated_candidate_command<Context>(
Some(d) => d,
};

let seconded_at_depth = leaf_data
.seconded_at_depth
.entry(candidate.descriptor().para_id)
.or_default();

for depth in depths {
leaf_data.seconded_at_depth.insert(depth, candidate_hash);
seconded_at_depth.insert(depth, candidate_hash);
}
}

Expand Down
14 changes: 4 additions & 10 deletions node/core/backing/src/tests/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,7 @@ use polkadot_node_subsystem::{
};
use polkadot_node_subsystem_test_helpers as test_helpers;
use polkadot_primitives::v2::{
CandidateDescriptor, CollatorId, GroupRotationInfo, HeadData, PersistedValidationData,
ScheduledCore,
CandidateDescriptor, GroupRotationInfo, HeadData, PersistedValidationData, ScheduledCore,
};
use sp_application_crypto::AppKey;
use sp_keyring::Sr25519Keyring;
Expand Down Expand Up @@ -90,9 +89,8 @@ impl Default for TestState {
fn default() -> Self {
let chain_a = ParaId::from(1);
let chain_b = ParaId::from(2);
let thread_a = ParaId::from(3);

let chain_ids = vec![chain_a, chain_b, thread_a];
let chain_ids = vec![chain_a, chain_b];

let validators = vec![
Sr25519Keyring::Alice,
Expand All @@ -114,25 +112,21 @@ impl Default for TestState {

let validator_public = validator_pubkeys(&validators);

let validator_groups = vec![vec![2, 0, 3, 5], vec![1], vec![4]]
let validator_groups = vec![vec![2, 0, 3, 5], vec![1]]
.into_iter()
.map(|g| g.into_iter().map(ValidatorIndex).collect())
.collect();
let group_rotation_info =
GroupRotationInfo { session_start_block: 0, group_rotation_frequency: 100, now: 1 };

let thread_collator: CollatorId = Sr25519Keyring::Two.public().into();
let availability_cores = vec![
CoreState::Scheduled(ScheduledCore { para_id: chain_a, collator: None }),
CoreState::Scheduled(ScheduledCore { para_id: chain_b, collator: None }),
CoreState::Scheduled(ScheduledCore {
para_id: thread_a,
collator: Some(thread_collator.clone()),
}),
];

let mut head_data = HashMap::new();
head_data.insert(chain_a, HeadData(vec![4, 5, 6]));
head_data.insert(chain_b, HeadData(vec![5, 6, 7]));

let relay_parent = Hash::repeat_byte(5);

Expand Down
Loading

0 comments on commit 2c9296d

Please sign in to comment.