From 559a46c790e702ba23b02e42b3cc4682802faa51 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 21 Feb 2022 17:46:46 -0600 Subject: [PATCH 01/76] inclusion emulator logic for asynchronous backing (#4790) * initial stab at candidate_context * fmt * docs & more TODOs * some cleanups * reframe as inclusion_emulator * documentations yes * update types * add constraint modifications * watermark * produce modifications * v2 primitives: re-export all v1 for consistency * vstaging primitives * emulator constraints: handle code upgrades * produce outbound HRMP modifications * stack. * method for applying modifications * method just for sanity-checking modifications * fragments produce modifications, not prospectives * make linear * add some TODOs * remove stacking; handle code upgrades * take `fragment` private * reintroduce stacking. * fragment constructor * add TODO * allow validating fragments against future constraints * docs * relay-parent number and min code size checks * check code upgrade restriction * check max hrmp per candidate * fmt * remove GoAhead logic because it wasn't helpful * docs on code upgrade failure * test stacking * test modifications against constraints * fmt * test fragments * descending or duplicate test * fmt * remove unused imports in vstaging * wrong primitives * spellcheck --- node/subsystem-util/Cargo.toml | 1 - .../src/inclusion_emulator/mod.rs | 14 + .../src/inclusion_emulator/staging.rs | 1226 +++++++++++++++++ node/subsystem-util/src/lib.rs | 3 + node/subsystem-util/src/runtime/mod.rs | 2 + primitives/src/lib.rs | 13 + primitives/src/v2/mod.rs | 78 +- primitives/src/vstaging/mod.rs | 28 + 8 files changed, 1325 insertions(+), 40 deletions(-) create mode 100644 node/subsystem-util/src/inclusion_emulator/mod.rs create mode 100644 node/subsystem-util/src/inclusion_emulator/staging.rs create mode 100644 primitives/src/vstaging/mod.rs diff --git a/node/subsystem-util/Cargo.toml b/node/subsystem-util/Cargo.toml index cf22f7916132..2d1688ee0dcf 100644 --- a/node/subsystem-util/Cargo.toml +++ b/node/subsystem-util/Cargo.toml @@ -38,4 +38,3 @@ log = "0.4.13" polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } lazy_static = "1.4.0" polkadot-primitives-test-helpers = { path = "../../primitives/test-helpers" } - diff --git a/node/subsystem-util/src/inclusion_emulator/mod.rs b/node/subsystem-util/src/inclusion_emulator/mod.rs new file mode 100644 index 000000000000..6ab19fa660bd --- /dev/null +++ b/node/subsystem-util/src/inclusion_emulator/mod.rs @@ -0,0 +1,14 @@ +// Copyright 2017-2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +pub mod staging; diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs new file mode 100644 index 000000000000..e886a9a0ff22 --- /dev/null +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -0,0 +1,1226 @@ +// Copyright 2017-2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +//! The implementation of the inclusion emulator for the 'staging' runtime version. +//! +//! This is currently `v1` (`v2`?), but will evolve to `v3`. +// TODO https://github.com/paritytech/polkadot/issues/4803 +//! +//! A set of utilities for node-side code to emulate the logic the runtime uses for checking +//! parachain blocks in order to build prospective parachains that are produced ahead of the +//! relay chain. These utilities allow the node-side to predict, with high accuracy, what +//! the relay-chain will accept in the near future. +//! +//! This module has 2 key data types: [`Constraints`] and [`Fragment`]s. [`Constraints`] exhaustively +//! define the set of valid inputs and outputs to parachain execution. A [`Fragment`] indicates +//! a parachain block, anchored to the relay-chain at a particular relay-chain block, known as the +//! relay-parent. +//! +//! Every relay-parent is implicitly associated with a unique set of [`Constraints`] that describe +//! the properties that must be true for a block to be included in a direct child of that block, +//! assuming there is no intermediate parachain block pending availability. +//! +//! However, the key factor that makes asynchronously-grown prospective chains +//! possible is the fact that the relay-chain accepts candidate blocks based on whether they +//! are valid under the constraints of the present moment, not based on whether they were +//! valid at the time of construction. +//! +//! As such, [`Fragment`]s are often, but not always constructed in such a way that they are +//! invalid at first and become valid later on, as the relay chain grows. +//! +//! # Usage +//! +//! It's expected that the users of this module will be building up trees of +//! [`Fragment`]s and consistently pruning and adding to the tree. +//! +//! ## Operating Constraints +//! +//! The *operating constraints* of a `Fragment` are the constraints with which that fragment +//! was intended to comply. The operating constraints are defined as the base constraints +//! of the relay-parent of the fragment modified by the cumulative modifications of all +//! fragments between the relay-parent and the current fragment. +//! +//! What the operating constraints are, in practice, is a prediction about the state of the +//! relay-chain in the future. The relay-chain is aware of some current state, and we want to +//! make an intelligent prediction about what might be accepted in the future based on +//! prior fragments that also exist off-chain. +//! +//! ## Fragment Trees +//! +//! As the relay-chain grows, some predictions come true and others come false. +//! And new predictions get made. These three changes correspond distinctly to the +//! 3 primary operations on fragment trees. +//! +//! A fragment tree is a mental model for thinking about a forking series of predictions +//! about a single parachain. There may be one or more fragment trees per parachain. +//! +//! In expectation, most parachains will have a plausibly-unique authorship method +//! which means that they should really be much closer to fragment-chains, maybe +//! maybe with an occasional fork. +//! +//! Avoiding fragment-tree blowup is beyond the scope of this module. +//! +//! ### Pruning Fragment Trees +//! +//! When the relay-chain advances, we want to compare the new constraints +//! of that relay-parent to the roots of the fragment trees we have. There are 3 cases. +//! +//! 1. The root fragment is still valid under the new constraints. In this case, we do nothing. +//! This is the "prediction still uncertain" case. +//! 2. The root fragment is invalid under the new constraints because it has been subsumed by the relay-chain. +//! in this case, we can discard the root and split & re-root the fragment tree +//! under its descendents and compare to the new constraints again. +//! This is the "prediction came true" case. +//! 3. The root fragment is invalid under the new constraints because a competing parachain block has been included +//! or it would never be accepted for some other reason. In this case we can discard the entire +//! fragment tree. +//! This is the "prediction came false" case. +//! +//! This is all a bit of a simplification because it assumes that the relay-chain advances without +//! forks and is finalized instantly. In practice, the set of fragment-trees needs to be observable +//! from the perspective of a few different possible forks of the relay-chain and not pruned +//! too eagerly. +//! +//! Note that the fragments themselves don't need to change and the only thing we care about +//! is whether the predictions they represent are still valid. +//! +//! ### Extending Fragment Trees +//! +//! As predictions fade into the past, new ones should be stacked on top. +//! +//! Every new relay-chain block is an opportunity to make a new prediction about the future. +//! higher-level logic should select the leaves of the fragment-trees to build upon or whether +//! to create a new fragment-tree. +//! +//! ### Code Upgrades +//! +//! Code upgrades are the main place where this emulation fails. The on-chain PVF upgrade scheduling +//! logic is very path-dependent and intricate so we just assume that code upgrades +//! can't be initiated and applied within a single fragment-tree. Fragment-trees aren't deep, +//! in practice and code upgrades are fairly rare. So what's likely to happen around code +//! upgrades is that the entire fragment-tree has to get discarded at some point. +//! +//! That means a few blocks of execution time lost, which is not a big deal for code upgrades +//! in practice at most once every few weeks. + +use polkadot_primitives::vstaging::{ + BlockNumber, CandidateCommitments, CollatorId, CollatorSignature, Hash, HeadData, Id as ParaId, + PersistedValidationData, UpgradeRestriction, ValidationCodeHash, +}; +use std::collections::HashMap; + +/// Constraints on inbound HRMP channels. +#[derive(Debug, Clone, PartialEq)] +pub struct InboundHrmpLimitations { + /// An exhaustive set of all valid watermarks, sorted ascending + pub valid_watermarks: Vec, +} + +/// Constraints on outbound HRMP channels. +#[derive(Debug, Clone, PartialEq)] +pub struct OutboundHrmpChannelLimitations { + /// The maximum bytes that can be written to the channel. + pub bytes_remaining: usize, + /// The maximum messages that can be written to the channel. + pub messages_remaining: usize, +} + +/// Constraints on the actions that can be taken by a new parachain +/// block. These limitations are implicitly associated with some particular +/// parachain, which should be apparent from usage. +#[derive(Debug, Clone, PartialEq)] +pub struct Constraints { + /// The minimum relay-parent number accepted under these constraints. + pub min_relay_parent_number: BlockNumber, + /// The maximum Proof-of-Validity size allowed, in bytes. + pub max_pov_size: usize, + /// The maximum new validation code size allowed, in bytes. + pub max_code_size: usize, + /// The amount of UMP messages remaining. + pub ump_remaining: usize, + /// The amount of UMP bytes remaining. + pub ump_remaining_bytes: usize, + /// The amount of remaining DMP messages. + pub dmp_remaining_messages: usize, + /// The limitations of all registered inbound HRMP channels. + pub hrmp_inbound: InboundHrmpLimitations, + /// The limitations of all registered outbound HRMP channels. + pub hrmp_channels_out: HashMap, + /// The maximum number of HRMP messages allowed per candidate. + pub max_hrmp_num_per_candidate: usize, + /// The required parent head-data of the parachain. + pub required_parent: HeadData, + /// The expected validation-code-hash of this parachain. + pub validation_code_hash: ValidationCodeHash, + /// The code upgrade restriction signal as-of this parachain. + pub upgrade_restriction: Option, + /// The future validation code hash, if any, and at what relay-parent + /// number the upgrade would be minimally applied. + pub future_validation_code: Option<(BlockNumber, ValidationCodeHash)>, +} + +/// Kinds of errors that can occur when modifying constraints. +#[derive(Debug, Clone, PartialEq)] +pub enum ModificationError { + /// The HRMP watermark is not allowed. + DisallowedHrmpWatermark(BlockNumber), + /// No such HRMP outbound channel. + NoSuchHrmpChannel(ParaId), + /// Too many messages submitted to HRMP channel. + HrmpMessagesOverflow { + /// The ID of the recipient. + para_id: ParaId, + /// The amount of remaining messages in the capacity of the channel. + messages_remaining: usize, + /// The amount of messages submitted to the channel. + messages_submitted: usize, + }, + /// Too many bytes submitted to HRMP channel. + HrmpBytesOverflow { + /// The ID of the recipient. + para_id: ParaId, + /// The amount of remaining bytes in the capacity of the channel. + bytes_remaining: usize, + /// The amount of bytes submitted to the channel. + bytes_submitted: usize, + }, + /// Too many messages submitted to UMP. + UmpMessagesOverflow { + /// The amount of remaining messages in the capacity of UMP. + messages_remaining: usize, + /// The amount of messages submitted to UMP. + messages_submitted: usize, + }, + /// Too many bytes submitted to UMP. + UmpBytesOverflow { + /// The amount of remaining bytes in the capacity of UMP. + bytes_remaining: usize, + /// The amount of bytes submitted to UMP. + bytes_submitted: usize, + }, + /// Too many messages processed from DMP. + DmpMessagesUnderflow { + /// The amount of messages waiting to be processed from DMP. + messages_remaining: usize, + /// The amount of messages processed. + messages_processed: usize, + }, + /// No validation code upgrade to apply. + AppliedNonexistentCodeUpgrade, +} + +impl Constraints { + /// Check modifications against constraints. + pub fn check_modifications( + &self, + modifications: &ConstraintModifications, + ) -> Result<(), ModificationError> { + if let Some(hrmp_watermark) = modifications.hrmp_watermark { + if self + .hrmp_inbound + .valid_watermarks + .iter() + .position(|w| w == &hrmp_watermark) + .is_none() + { + return Err(ModificationError::DisallowedHrmpWatermark(hrmp_watermark)) + } + } + + for (id, outbound_hrmp_mod) in &modifications.outbound_hrmp { + if let Some(outbound) = self.hrmp_channels_out.get(&id) { + outbound.bytes_remaining.checked_sub(outbound_hrmp_mod.bytes_submitted).ok_or( + ModificationError::HrmpBytesOverflow { + para_id: *id, + bytes_remaining: outbound.bytes_remaining, + bytes_submitted: outbound_hrmp_mod.bytes_submitted, + }, + )?; + + outbound + .messages_remaining + .checked_sub(outbound_hrmp_mod.messages_submitted) + .ok_or(ModificationError::HrmpMessagesOverflow { + para_id: *id, + messages_remaining: outbound.messages_remaining, + messages_submitted: outbound_hrmp_mod.messages_submitted, + })?; + } else { + return Err(ModificationError::NoSuchHrmpChannel(*id)) + } + } + + self.ump_remaining.checked_sub(modifications.ump_messages_sent).ok_or( + ModificationError::UmpMessagesOverflow { + messages_remaining: self.ump_remaining, + messages_submitted: modifications.ump_messages_sent, + }, + )?; + + self.ump_remaining_bytes.checked_sub(modifications.ump_bytes_sent).ok_or( + ModificationError::UmpBytesOverflow { + bytes_remaining: self.ump_remaining_bytes, + bytes_submitted: modifications.ump_bytes_sent, + }, + )?; + + self.dmp_remaining_messages + .checked_sub(modifications.dmp_messages_processed) + .ok_or(ModificationError::DmpMessagesUnderflow { + messages_remaining: self.dmp_remaining_messages, + messages_processed: modifications.dmp_messages_processed, + })?; + + if self.future_validation_code.is_none() && modifications.code_upgrade_applied { + return Err(ModificationError::AppliedNonexistentCodeUpgrade) + } + + Ok(()) + } + + /// Apply modifications to these constraints. If this succeeds, it passes + /// all sanity-checks. + pub fn apply_modifications( + &self, + modifications: &ConstraintModifications, + ) -> Result { + let mut new = self.clone(); + + if let Some(required_parent) = modifications.required_parent.as_ref() { + new.required_parent = required_parent.clone(); + } + + if let Some(hrmp_watermark) = modifications.hrmp_watermark { + match new.hrmp_inbound.valid_watermarks.iter().position(|w| w == &hrmp_watermark) { + Some(pos) => { + let _ = new.hrmp_inbound.valid_watermarks.drain(..pos + 1); + }, + None => return Err(ModificationError::DisallowedHrmpWatermark(hrmp_watermark)), + } + } + + for (id, outbound_hrmp_mod) in &modifications.outbound_hrmp { + if let Some(outbound) = new.hrmp_channels_out.get_mut(&id) { + outbound.bytes_remaining = outbound + .bytes_remaining + .checked_sub(outbound_hrmp_mod.bytes_submitted) + .ok_or(ModificationError::HrmpBytesOverflow { + para_id: *id, + bytes_remaining: outbound.bytes_remaining, + bytes_submitted: outbound_hrmp_mod.bytes_submitted, + })?; + + outbound.messages_remaining = outbound + .messages_remaining + .checked_sub(outbound_hrmp_mod.messages_submitted) + .ok_or(ModificationError::HrmpMessagesOverflow { + para_id: *id, + messages_remaining: outbound.messages_remaining, + messages_submitted: outbound_hrmp_mod.messages_submitted, + })?; + } else { + return Err(ModificationError::NoSuchHrmpChannel(*id)) + } + } + + new.ump_remaining = new.ump_remaining.checked_sub(modifications.ump_messages_sent).ok_or( + ModificationError::UmpMessagesOverflow { + messages_remaining: new.ump_remaining, + messages_submitted: modifications.ump_messages_sent, + }, + )?; + + new.ump_remaining_bytes = new + .ump_remaining_bytes + .checked_sub(modifications.ump_bytes_sent) + .ok_or(ModificationError::UmpBytesOverflow { + bytes_remaining: new.ump_remaining_bytes, + bytes_submitted: modifications.ump_bytes_sent, + })?; + + new.dmp_remaining_messages = new + .dmp_remaining_messages + .checked_sub(modifications.dmp_messages_processed) + .ok_or(ModificationError::DmpMessagesUnderflow { + messages_remaining: new.dmp_remaining_messages, + messages_processed: modifications.dmp_messages_processed, + })?; + + if modifications.code_upgrade_applied { + new.validation_code_hash = new + .future_validation_code + .take() + .ok_or(ModificationError::AppliedNonexistentCodeUpgrade)? + .1; + } + + Ok(new) + } +} + +/// Information about a relay-chain block. +#[derive(Debug, Clone, PartialEq)] +pub struct RelayChainBlockInfo { + /// The hash of the relay-chain block. + pub hash: Hash, + /// The number of the relay-chain block. + pub number: BlockNumber, + /// The storage-root of the relay-chain block. + pub storage_root: Hash, +} + +/// An update to outbound HRMP channels. +#[derive(Debug, Clone, PartialEq, Default)] +pub struct OutboundHrmpChannelModification { + /// The number of bytes submitted to the channel. + pub bytes_submitted: usize, + /// The number of messages submitted to the channel. + pub messages_submitted: usize, +} + +/// Modifications to constraints as a result of prospective candidates. +#[derive(Debug, Clone, PartialEq)] +pub struct ConstraintModifications { + /// The required parent head to build upon. + pub required_parent: Option, + /// The new HRMP watermark + pub hrmp_watermark: Option, + /// Outbound HRMP channel modifications. + pub outbound_hrmp: HashMap, + /// The amount of UMP messages sent. + pub ump_messages_sent: usize, + /// The amount of UMP bytes sent. + pub ump_bytes_sent: usize, + /// The amount of DMP messages processed. + pub dmp_messages_processed: usize, + /// Whether a pending code upgrade has been applied. + pub code_upgrade_applied: bool, +} + +impl ConstraintModifications { + /// The 'identity' modifications: these can be applied to + /// any constraints and yield the exact same result. + pub fn identity() -> Self { + ConstraintModifications { + required_parent: None, + hrmp_watermark: None, + outbound_hrmp: HashMap::new(), + ump_messages_sent: 0, + ump_bytes_sent: 0, + dmp_messages_processed: 0, + code_upgrade_applied: false, + } + } + + /// Stack other modifications on top of these. + /// + /// This does no sanity-checking, so if `other` is garbage relative + /// to `self`, then the new value will be garbage as well. + /// + /// This is an addition which is not commutative. + pub fn stack(&mut self, other: &Self) { + if let Some(ref new_parent) = other.required_parent { + self.required_parent = Some(new_parent.clone()); + } + if let Some(ref new_hrmp_watermark) = other.hrmp_watermark { + self.hrmp_watermark = Some(new_hrmp_watermark.clone()); + } + + for (id, mods) in &other.outbound_hrmp { + let record = self.outbound_hrmp.entry(id.clone()).or_default(); + record.messages_submitted += mods.messages_submitted; + record.bytes_submitted += mods.bytes_submitted; + } + + self.ump_messages_sent += other.ump_messages_sent; + self.ump_bytes_sent += other.ump_bytes_sent; + self.dmp_messages_processed += other.dmp_messages_processed; + self.code_upgrade_applied |= other.code_upgrade_applied; + } +} + +/// The prospective candidate. +/// +/// This comprises the key information that represent a candidate +/// without pinning it to a particular session. For example, everything +/// to do with the collator's signature and commitments are represented +/// here. But the erasure-root is not. This means that prospective candidates +/// are not correlated to any session in particular. +#[derive(Debug, Clone, PartialEq)] +pub struct ProspectiveCandidate { + /// The commitments to the output of the execution. + pub commitments: CandidateCommitments, + /// The collator that created the candidate. + pub collator: CollatorId, + /// The signature of the collator on the payload. + pub collator_signature: CollatorSignature, + /// The persisted validation data used to create the candidate. + pub persisted_validation_data: PersistedValidationData, + /// The hash of the PoV. + pub pov_hash: Hash, + /// The validation code hash used by the candidate. + pub validation_code_hash: ValidationCodeHash, +} + +/// Kinds of errors with the validity of a fragment. +#[derive(Debug, Clone, PartialEq)] +pub enum FragmentValidityError { + /// The validation code of the candidate doesn't match the + /// operating constraints. + /// + /// Expected, Got + ValidationCodeMismatch(ValidationCodeHash, ValidationCodeHash), + /// The persisted-validation-data doesn't match. + /// + /// Expected, Got + PersistedValidationDataMismatch(PersistedValidationData, PersistedValidationData), + /// The outputs of the candidate are invalid under the operating + /// constraints. + OutputsInvalid(ModificationError), + /// New validation code size too big. + /// + /// Max allowed, new. + CodeSizeTooLarge(usize, usize), + /// Relay parent too old. + /// + /// Min allowed, current. + RelayParentTooOld(BlockNumber, BlockNumber), + /// Too many messages submitted to all HRMP channels. + HrmpMessagesPerCandidateOverflow { + /// The amount of messages a single candidate can submit. + messages_allowed: usize, + /// The amount of messages sent to all HRMP channels. + messages_submitted: usize, + }, + /// Code upgrade not allowed. + CodeUpgradeRestricted, + /// HRMP messages are not ascending or are duplicate. + /// + /// The `usize` is the index into the outbound HRMP messages of + /// the candidate. + HrmpMessagesDescendingOrDuplicate(usize), +} + +/// A parachain fragment, representing another prospective parachain block. +/// +/// This is a type which guarantees that the candidate is valid under the +/// operating constraints. +#[derive(Debug, Clone, PartialEq)] +pub struct Fragment { + /// The new relay-parent. + relay_parent: RelayChainBlockInfo, + /// The constraints this fragment is operating under. + operating_constraints: Constraints, + /// The core information about the prospective candidate. + candidate: ProspectiveCandidate, + /// Modifications to the constraints based on the outputs of + /// the candidate. + modifications: ConstraintModifications, +} + +impl Fragment { + /// Create a new fragment. + /// + /// This fails if the fragment isn't in line with the operating + /// constraints. That is, either its inputs or its outputs fail + /// checks against the constraints. + /// + /// This doesn't check that the collator signature is valid or + /// whether the PoV is small enough. + pub fn new( + relay_parent: RelayChainBlockInfo, + operating_constraints: Constraints, + candidate: ProspectiveCandidate, + ) -> Result { + let modifications = { + let commitments = &candidate.commitments; + ConstraintModifications { + required_parent: Some(commitments.head_data.clone()), + hrmp_watermark: Some(commitments.hrmp_watermark), + outbound_hrmp: { + let mut outbound_hrmp = HashMap::<_, OutboundHrmpChannelModification>::new(); + + let mut last_recipient = None::; + for (i, message) in commitments.horizontal_messages.iter().enumerate() { + if let Some(last) = last_recipient { + if last >= message.recipient { + return Err( + FragmentValidityError::HrmpMessagesDescendingOrDuplicate(i), + ) + } + } + + last_recipient = Some(message.recipient); + let record = outbound_hrmp.entry(message.recipient.clone()).or_default(); + + record.bytes_submitted += message.data.len(); + record.messages_submitted += 1; + } + + outbound_hrmp + }, + ump_messages_sent: commitments.upward_messages.len(), + ump_bytes_sent: commitments.upward_messages.iter().map(|msg| msg.len()).sum(), + dmp_messages_processed: commitments.processed_downward_messages as _, + code_upgrade_applied: operating_constraints + .future_validation_code + .map_or(false, |(at, _)| relay_parent.number >= at), + } + }; + + validate_against_constraints( + &operating_constraints, + &relay_parent, + &candidate, + &modifications, + )?; + + Ok(Fragment { relay_parent, operating_constraints, candidate, modifications }) + } + + /// Access the relay parent information. + pub fn relay_parent(&self) -> &RelayChainBlockInfo { + &self.relay_parent + } + + /// Access the operating constraints + pub fn operating_constraints(&self) -> &Constraints { + &self.operating_constraints + } + + /// Access the underlying prospective candidate. + pub fn candidate(&self) -> &ProspectiveCandidate { + &self.candidate + } + + /// Modifications to constraints based on the outputs of the candidate. + pub fn constraint_modifications(&self) -> &ConstraintModifications { + &self.modifications + } + + /// Validate this fragment against some set of constraints + /// instead of the operating constraints. + pub fn validate_against_constraints( + &self, + constraints: &Constraints, + ) -> Result<(), FragmentValidityError> { + validate_against_constraints( + constraints, + &self.relay_parent, + &self.candidate, + &self.modifications, + ) + } +} + +fn validate_against_constraints( + constraints: &Constraints, + relay_parent: &RelayChainBlockInfo, + candidate: &ProspectiveCandidate, + modifications: &ConstraintModifications, +) -> Result<(), FragmentValidityError> { + let expected_pvd = PersistedValidationData { + parent_head: constraints.required_parent.clone(), + relay_parent_number: relay_parent.number, + relay_parent_storage_root: relay_parent.storage_root, + max_pov_size: constraints.max_pov_size as u32, + }; + + if expected_pvd != candidate.persisted_validation_data { + return Err(FragmentValidityError::PersistedValidationDataMismatch( + expected_pvd, + candidate.persisted_validation_data.clone(), + )) + } + + if constraints.validation_code_hash != candidate.validation_code_hash { + return Err(FragmentValidityError::ValidationCodeMismatch( + constraints.validation_code_hash, + candidate.validation_code_hash, + )) + } + + if relay_parent.number < constraints.min_relay_parent_number { + return Err(FragmentValidityError::RelayParentTooOld( + constraints.min_relay_parent_number, + relay_parent.number, + )) + } + + if candidate.commitments.new_validation_code.is_some() { + match constraints.upgrade_restriction { + None => {}, + Some(UpgradeRestriction::Present) => + return Err(FragmentValidityError::CodeUpgradeRestricted), + } + } + + let announced_code_size = candidate + .commitments + .new_validation_code + .as_ref() + .map_or(0, |code| code.0.len()); + + if announced_code_size > constraints.max_code_size { + return Err(FragmentValidityError::CodeSizeTooLarge( + constraints.max_code_size, + announced_code_size, + )) + } + + if candidate.commitments.horizontal_messages.len() > constraints.max_hrmp_num_per_candidate { + return Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow { + messages_allowed: constraints.max_hrmp_num_per_candidate, + messages_submitted: candidate.commitments.horizontal_messages.len(), + }) + } + + constraints + .check_modifications(&modifications) + .map_err(FragmentValidityError::OutputsInvalid) +} + +#[cfg(test)] +mod tests { + use super::*; + use polkadot_primitives::vstaging::{CollatorPair, OutboundHrmpMessage, ValidationCode}; + use sp_application_crypto::Pair; + + #[test] + fn stack_modifications() { + let para_a = ParaId::from(1u32); + let para_b = ParaId::from(2u32); + let para_c = ParaId::from(3u32); + + let a = ConstraintModifications { + required_parent: None, + hrmp_watermark: None, + outbound_hrmp: { + let mut map = HashMap::new(); + map.insert( + para_a, + OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, + ); + + map.insert( + para_b, + OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, + ); + + map + }, + ump_messages_sent: 6, + ump_bytes_sent: 1000, + dmp_messages_processed: 5, + code_upgrade_applied: true, + }; + + let b = ConstraintModifications { + required_parent: None, + hrmp_watermark: None, + outbound_hrmp: { + let mut map = HashMap::new(); + map.insert( + para_b, + OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, + ); + + map.insert( + para_c, + OutboundHrmpChannelModification { bytes_submitted: 100, messages_submitted: 5 }, + ); + + map + }, + ump_messages_sent: 6, + ump_bytes_sent: 1000, + dmp_messages_processed: 5, + code_upgrade_applied: true, + }; + + let mut c = a.clone(); + c.stack(&b); + + assert_eq!( + c, + ConstraintModifications { + required_parent: None, + hrmp_watermark: None, + outbound_hrmp: { + let mut map = HashMap::new(); + map.insert( + para_a, + OutboundHrmpChannelModification { + bytes_submitted: 100, + messages_submitted: 5, + }, + ); + + map.insert( + para_b, + OutboundHrmpChannelModification { + bytes_submitted: 200, + messages_submitted: 10, + }, + ); + + map.insert( + para_c, + OutboundHrmpChannelModification { + bytes_submitted: 100, + messages_submitted: 5, + }, + ); + + map + }, + ump_messages_sent: 12, + ump_bytes_sent: 2000, + dmp_messages_processed: 10, + code_upgrade_applied: true, + }, + ); + + let mut d = ConstraintModifications::identity(); + d.stack(&a); + d.stack(&b); + + assert_eq!(c, d); + } + + fn make_constraints() -> Constraints { + let para_a = ParaId::from(1u32); + let para_b = ParaId::from(2u32); + let para_c = ParaId::from(3u32); + + Constraints { + min_relay_parent_number: 5, + max_pov_size: 1000, + max_code_size: 1000, + ump_remaining: 10, + ump_remaining_bytes: 1024, + dmp_remaining_messages: 5, + hrmp_inbound: InboundHrmpLimitations { valid_watermarks: vec![6, 8] }, + hrmp_channels_out: { + let mut map = HashMap::new(); + + map.insert( + para_a, + OutboundHrmpChannelLimitations { messages_remaining: 5, bytes_remaining: 512 }, + ); + + map.insert( + para_b, + OutboundHrmpChannelLimitations { + messages_remaining: 10, + bytes_remaining: 1024, + }, + ); + + map.insert( + para_c, + OutboundHrmpChannelLimitations { messages_remaining: 1, bytes_remaining: 128 }, + ); + + map + }, + max_hrmp_num_per_candidate: 5, + required_parent: HeadData::from(vec![1, 2, 3]), + validation_code_hash: ValidationCode(vec![4, 5, 6]).hash(), + upgrade_restriction: None, + future_validation_code: None, + } + } + + #[test] + fn constraints_disallowed_watermark() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + modifications.hrmp_watermark = Some(7); + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::DisallowedHrmpWatermark(7)), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::DisallowedHrmpWatermark(7)), + ); + } + + #[test] + fn constraints_no_such_hrmp_channel() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + let bad_para = ParaId::from(100u32); + modifications.outbound_hrmp.insert( + bad_para, + OutboundHrmpChannelModification { bytes_submitted: 0, messages_submitted: 0 }, + ); + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::NoSuchHrmpChannel(bad_para)), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::NoSuchHrmpChannel(bad_para)), + ); + } + + #[test] + fn constraints_hrmp_messages_overflow() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + let para_a = ParaId::from(1u32); + modifications.outbound_hrmp.insert( + para_a, + OutboundHrmpChannelModification { bytes_submitted: 0, messages_submitted: 6 }, + ); + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::HrmpMessagesOverflow { + para_id: para_a, + messages_remaining: 5, + messages_submitted: 6, + }), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::HrmpMessagesOverflow { + para_id: para_a, + messages_remaining: 5, + messages_submitted: 6, + }), + ); + } + + #[test] + fn constraints_hrmp_bytes_overflow() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + let para_a = ParaId::from(1u32); + modifications.outbound_hrmp.insert( + para_a, + OutboundHrmpChannelModification { bytes_submitted: 513, messages_submitted: 1 }, + ); + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::HrmpBytesOverflow { + para_id: para_a, + bytes_remaining: 512, + bytes_submitted: 513, + }), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::HrmpBytesOverflow { + para_id: para_a, + bytes_remaining: 512, + bytes_submitted: 513, + }), + ); + } + + #[test] + fn constraints_ump_messages_overflow() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + modifications.ump_messages_sent = 11; + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::UmpMessagesOverflow { + messages_remaining: 10, + messages_submitted: 11, + }), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::UmpMessagesOverflow { + messages_remaining: 10, + messages_submitted: 11, + }), + ); + } + + #[test] + fn constraints_ump_bytes_overflow() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + modifications.ump_bytes_sent = 1025; + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::UmpBytesOverflow { + bytes_remaining: 1024, + bytes_submitted: 1025, + }), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::UmpBytesOverflow { + bytes_remaining: 1024, + bytes_submitted: 1025, + }), + ); + } + + #[test] + fn constraints_dmp_messages() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + modifications.dmp_messages_processed = 6; + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::DmpMessagesUnderflow { + messages_remaining: 5, + messages_processed: 6, + }), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::DmpMessagesUnderflow { + messages_remaining: 5, + messages_processed: 6, + }), + ); + } + + #[test] + fn constraints_nonexistent_code_upgrade() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + modifications.code_upgrade_applied = true; + + assert_eq!( + constraints.check_modifications(&modifications), + Err(ModificationError::AppliedNonexistentCodeUpgrade), + ); + + assert_eq!( + constraints.apply_modifications(&modifications), + Err(ModificationError::AppliedNonexistentCodeUpgrade), + ); + } + + fn make_candidate( + constraints: &Constraints, + relay_parent: &RelayChainBlockInfo, + ) -> ProspectiveCandidate { + let collator_pair = CollatorPair::generate().0; + let collator = collator_pair.public(); + + let sig = collator_pair.sign(b"blabla".as_slice()); + + ProspectiveCandidate { + commitments: CandidateCommitments { + upward_messages: Vec::new(), + horizontal_messages: Vec::new(), + new_validation_code: None, + head_data: HeadData::from(vec![1, 2, 3, 4, 5]), + processed_downward_messages: 0, + hrmp_watermark: relay_parent.number, + }, + collator, + collator_signature: sig, + persisted_validation_data: PersistedValidationData { + parent_head: constraints.required_parent.clone(), + relay_parent_number: relay_parent.number, + relay_parent_storage_root: relay_parent.storage_root, + max_pov_size: constraints.max_pov_size as u32, + }, + pov_hash: Hash::repeat_byte(1), + validation_code_hash: constraints.validation_code_hash, + } + } + + #[test] + fn fragment_validation_code_mismatch() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + let expected_code = constraints.validation_code_hash.clone(); + let got_code = ValidationCode(vec![9, 9, 9]).hash(); + + candidate.validation_code_hash = got_code; + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::ValidationCodeMismatch(expected_code, got_code,)), + ) + } + + #[test] + fn fragment_pvd_mismatch() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let relay_parent_b = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0b), + storage_root: Hash::repeat_byte(0xee), + }; + + let constraints = make_constraints(); + let candidate = make_candidate(&constraints, &relay_parent); + + let expected_pvd = PersistedValidationData { + parent_head: constraints.required_parent.clone(), + relay_parent_number: relay_parent_b.number, + relay_parent_storage_root: relay_parent_b.storage_root, + max_pov_size: constraints.max_pov_size as u32, + }; + + let got_pvd = candidate.persisted_validation_data.clone(); + + assert_eq!( + Fragment::new(relay_parent_b, constraints, candidate), + Err(FragmentValidityError::PersistedValidationDataMismatch(expected_pvd, got_pvd,)), + ); + } + + #[test] + fn fragment_code_size_too_large() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + let max_code_size = constraints.max_code_size; + candidate.commitments.new_validation_code = Some(vec![0; max_code_size + 1].into()); + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::CodeSizeTooLarge(max_code_size, max_code_size + 1,)), + ); + } + + #[test] + fn fragment_relay_parent_too_old() { + let relay_parent = RelayChainBlockInfo { + number: 3, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let candidate = make_candidate(&constraints, &relay_parent); + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::RelayParentTooOld(5, 3,)), + ); + } + + #[test] + fn fragment_hrmp_messages_overflow() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + let max_hrmp = constraints.max_hrmp_num_per_candidate; + + candidate.commitments.horizontal_messages.extend((0..max_hrmp + 1).map(|i| { + OutboundHrmpMessage { recipient: ParaId::from(i as u32), data: vec![1, 2, 3] } + })); + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow { + messages_allowed: max_hrmp, + messages_submitted: max_hrmp + 1, + }), + ); + } + + #[test] + fn fragment_code_upgrade_restricted() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let mut constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + constraints.upgrade_restriction = Some(UpgradeRestriction::Present); + candidate.commitments.new_validation_code = Some(ValidationCode(vec![1, 2, 3])); + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::CodeUpgradeRestricted), + ); + } + + #[test] + fn fragment_hrmp_messages_descending_or_duplicate() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + candidate.commitments.horizontal_messages = vec![ + OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![1, 2, 3] }, + OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] }, + ]; + + assert_eq!( + Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()), + Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)), + ); + + candidate.commitments.horizontal_messages = vec![ + OutboundHrmpMessage { recipient: ParaId::from(1 as u32), data: vec![1, 2, 3] }, + OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] }, + ]; + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)), + ); + } +} diff --git a/node/subsystem-util/src/lib.rs b/node/subsystem-util/src/lib.rs index bf120c945f02..adb77a331f55 100644 --- a/node/subsystem-util/src/lib.rs +++ b/node/subsystem-util/src/lib.rs @@ -82,6 +82,9 @@ pub mod reexports { pub use polkadot_overseer::gen::{SpawnNamed, SpawnedSubsystem, Subsystem, SubsystemContext}; } +/// An emulator for node-side code to predict the results of on-chain parachain inclusion +/// and predict future constraints. +pub mod inclusion_emulator; /// A rolling session window cache. pub mod rolling_session_window; /// Convenient and efficient runtime info access. diff --git a/node/subsystem-util/src/runtime/mod.rs b/node/subsystem-util/src/runtime/mod.rs index d7afac0b58c2..7031d234705f 100644 --- a/node/subsystem-util/src/runtime/mod.rs +++ b/node/subsystem-util/src/runtime/mod.rs @@ -329,3 +329,5 @@ where recv_runtime(request_validation_code_by_hash(relay_parent, validation_code_hash, sender).await) .await } + +// TODO [now] : a way of getting all [`ContextLimitations`] from runtime. diff --git a/primitives/src/lib.rs b/primitives/src/lib.rs index febcb175d0c9..dbff50917237 100644 --- a/primitives/src/lib.rs +++ b/primitives/src/lib.rs @@ -19,6 +19,19 @@ #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] +/// The minimum supported version of the primitives by this implementation. +pub const MIN_SUPPORTED_VERSION: u32 = 1; +/// The maximum supported version of the primitives by this implementation. +pub const MAX_SUPPORTED_VERSION: u32 = 2; + +/// The STAGING version. +pub const STAGING_VERSION: u32 = u32::MAX; + pub mod v0; pub mod v1; pub mod v2; + +// The 'staging' version is special - while other versions are set in stone, +// the staging version is malleable. Once it's released, it gets the next +// version number. +pub mod vstaging; diff --git a/primitives/src/v2/mod.rs b/primitives/src/v2/mod.rs index 065d5cc3c057..fe3aef940d5d 100644 --- a/primitives/src/v2/mod.rs +++ b/primitives/src/v2/mod.rs @@ -16,8 +16,6 @@ //! `V2` Primitives. -use crate::v1; - use parity_scale_codec::{Decode, Encode}; use primitives::RuntimeDebug; use scale_info::TypeInfo; @@ -26,6 +24,8 @@ use sp_std::{collections::btree_map::BTreeMap, prelude::*}; #[cfg(feature = "std")] use parity_util_mem::MallocSizeOf; +pub use crate::v1::*; + /// Information about validator sets of a session. #[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] #[cfg_attr(feature = "std", derive(PartialEq, MallocSizeOf))] @@ -33,11 +33,11 @@ pub struct SessionInfo { /****** New in v2 *******/ /// All the validators actively participating in parachain consensus. /// Indices are into the broader validator set. - pub active_validator_indices: Vec, + pub active_validator_indices: Vec, /// A secure random seed for the session, gathered from BABE. pub random_seed: [u8; 32], /// The amount of sessions to keep for disputes. - pub dispute_period: v1::SessionIndex, + pub dispute_period: SessionIndex, /****** Old fields ******/ /// Validators in canonical ordering. @@ -47,7 +47,7 @@ pub struct SessionInfo { /// [`max_validators`](https://github.com/paritytech/polkadot/blob/a52dca2be7840b23c19c153cf7e110b1e3e475f8/runtime/parachains/src/configuration.rs#L148). /// /// `SessionInfo::validators` will be limited to to `max_validators` when set. - pub validators: Vec, + pub validators: Vec, /// Validators' authority discovery keys for the session in canonical ordering. /// /// NOTE: The first `validators.len()` entries will match the corresponding validators in @@ -55,7 +55,7 @@ pub struct SessionInfo { /// participating in parachain consensus - see /// [`max_validators`](https://github.com/paritytech/polkadot/blob/a52dca2be7840b23c19c153cf7e110b1e3e475f8/runtime/parachains/src/configuration.rs#L148) #[cfg_attr(feature = "std", ignore_malloc_size_of = "outside type")] - pub discovery_keys: Vec, + pub discovery_keys: Vec, /// The assignment keys for validators. /// /// NOTE: There might be more authorities in the current session, than validators participating @@ -66,11 +66,11 @@ pub struct SessionInfo { /// ```ignore /// assignment_keys.len() == validators.len() && validators.len() <= discovery_keys.len() /// ``` - pub assignment_keys: Vec, + pub assignment_keys: Vec, /// Validators in shuffled ordering - these are the validator groups as produced /// by the `Scheduler` module for the session and are typically referred to by /// `GroupIndex`. - pub validator_groups: Vec>, + pub validator_groups: Vec>, /// The number of availability cores used by the protocol during this session. pub n_cores: u32, /// The zeroth delay tranche width. @@ -86,8 +86,8 @@ pub struct SessionInfo { pub needed_approvals: u32, } -impl From for SessionInfo { - fn from(old: v1::SessionInfo) -> SessionInfo { +impl From for SessionInfo { + fn from(old: crate::v1::SessionInfo) -> SessionInfo { SessionInfo { // new fields active_validator_indices: Vec::new(), @@ -115,11 +115,11 @@ pub struct PvfCheckStatement { /// `true` if the subject passed pre-checking and `false` otherwise. pub accept: bool, /// The validation code hash that was checked. - pub subject: v1::ValidationCodeHash, + pub subject: ValidationCodeHash, /// The index of a session during which this statement is considered valid. - pub session_index: v1::SessionIndex, + pub session_index: SessionIndex, /// The index of the validator from which this statement originates. - pub validator_index: v1::ValidatorIndex, + pub validator_index: ValidatorIndex, } impl PvfCheckStatement { @@ -136,97 +136,97 @@ impl PvfCheckStatement { sp_api::decl_runtime_apis! { /// The API for querying the state of parachains on-chain. #[api_version(2)] - pub trait ParachainHost { + pub trait ParachainHost { /// Get the current validators. - fn validators() -> Vec; + fn validators() -> Vec; /// Returns the validator groups and rotation info localized based on the hypothetical child /// of a block whose state this is invoked on. Note that `now` in the `GroupRotationInfo` /// should be the successor of the number of the block. - fn validator_groups() -> (Vec>, v1::GroupRotationInfo); + fn validator_groups() -> (Vec>, GroupRotationInfo); /// Yields information on all availability cores as relevant to the child block. /// Cores are either free or occupied. Free cores can have paras assigned to them. - fn availability_cores() -> Vec>; + fn availability_cores() -> Vec>; /// Yields the persisted validation data for the given `ParaId` along with an assumption that /// should be used if the para currently occupies a core. /// /// Returns `None` if either the para is not registered or the assumption is `Freed` /// and the para already occupies a core. - fn persisted_validation_data(para_id: v1::Id, assumption: v1::OccupiedCoreAssumption) - -> Option>; + fn persisted_validation_data(para_id: Id, assumption: OccupiedCoreAssumption) + -> Option>; /// Returns the persisted validation data for the given `ParaId` along with the corresponding /// validation code hash. Instead of accepting assumption about the para, matches the validation /// data hash against an expected one and yields `None` if they're not equal. fn assumed_validation_data( - para_id: v1::Id, - expected_persisted_validation_data_hash: v1::Hash, - ) -> Option<(v1::PersistedValidationData, v1::ValidationCodeHash)>; + para_id: Id, + expected_persisted_validation_data_hash: Hash, + ) -> Option<(PersistedValidationData, ValidationCodeHash)>; /// Checks if the given validation outputs pass the acceptance criteria. - fn check_validation_outputs(para_id: v1::Id, outputs: v1::CandidateCommitments) -> bool; + fn check_validation_outputs(para_id: Id, outputs: CandidateCommitments) -> bool; /// Returns the session index expected at a child of the block. /// /// This can be used to instantiate a `SigningContext`. - fn session_index_for_child() -> v1::SessionIndex; + fn session_index_for_child() -> SessionIndex; /// Old method to fetch v1 session info. #[changed_in(2)] - fn session_info(index: v1::SessionIndex) -> Option; + fn session_info(index: SessionIndex) -> Option; /// Fetch the validation code used by a para, making the given `OccupiedCoreAssumption`. /// /// Returns `None` if either the para is not registered or the assumption is `Freed` /// and the para already occupies a core. - fn validation_code(para_id: v1::Id, assumption: v1::OccupiedCoreAssumption) - -> Option; + fn validation_code(para_id: Id, assumption: OccupiedCoreAssumption) + -> Option; /// Get the receipt of a candidate pending availability. This returns `Some` for any paras /// assigned to occupied cores in `availability_cores` and `None` otherwise. - fn candidate_pending_availability(para_id: v1::Id) -> Option>; + fn candidate_pending_availability(para_id: Id) -> Option>; /// Get a vector of events concerning candidates that occurred within a block. - fn candidate_events() -> Vec>; + fn candidate_events() -> Vec>; /// Get all the pending inbound messages in the downward message queue for a para. fn dmq_contents( - recipient: v1::Id, - ) -> Vec>; + recipient: Id, + ) -> Vec>; /// Get the contents of all channels addressed to the given recipient. Channels that have no /// messages in them are also included. - fn inbound_hrmp_channels_contents(recipient: v1::Id) -> BTreeMap>>; + fn inbound_hrmp_channels_contents(recipient: Id) -> BTreeMap>>; /// Get the validation code from its hash. - fn validation_code_by_hash(hash: v1::ValidationCodeHash) -> Option; + fn validation_code_by_hash(hash: ValidationCodeHash) -> Option; /// Scrape dispute relevant from on-chain, backing votes and resolved disputes. - fn on_chain_votes() -> Option>; + fn on_chain_votes() -> Option>; /***** Added in v2 *****/ /// Get the session info for the given session, if stored. /// /// NOTE: This function is only available since parachain host version 2. - fn session_info(index: v1::SessionIndex) -> Option; + fn session_info(index: SessionIndex) -> Option; /// Submits a PVF pre-checking statement into the transaction pool. /// /// NOTE: This function is only available since parachain host version 2. - fn submit_pvf_check_statement(stmt: PvfCheckStatement, signature: v1::ValidatorSignature); + fn submit_pvf_check_statement(stmt: PvfCheckStatement, signature: ValidatorSignature); /// Returns code hashes of PVFs that require pre-checking by validators in the active set. /// /// NOTE: This function is only available since parachain host version 2. - fn pvfs_require_precheck() -> Vec; + fn pvfs_require_precheck() -> Vec; /// Fetch the hash of the validation code used by a para, making the given `OccupiedCoreAssumption`. /// /// NOTE: This function is only available since parachain host version 2. - fn validation_code_hash(para_id: v1::Id, assumption: v1::OccupiedCoreAssumption) - -> Option; + fn validation_code_hash(para_id: Id, assumption: OccupiedCoreAssumption) + -> Option; } } diff --git a/primitives/src/vstaging/mod.rs b/primitives/src/vstaging/mod.rs new file mode 100644 index 000000000000..b26f78ff502e --- /dev/null +++ b/primitives/src/vstaging/mod.rs @@ -0,0 +1,28 @@ +// Copyright 2017-2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Staging Primitives. + +use parity_scale_codec::{Decode, Encode}; + +pub use crate::v2::*; + +sp_api::decl_runtime_apis! { + /// The API for querying the state of parachains on-chain. + // In the staging API, this is u32::MAX. + #[api_version(4294967295)] + pub trait ParachainHost {} +} From 37735ee26aae9ab0bc221b8549a2192911ecba48 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 2 Mar 2022 11:32:49 -0600 Subject: [PATCH 02/76] Runtime changes for Asynchronous Backing (#4786) * inclusion: utility for allowed relay-parents * inclusion: use prev number instead of prev hash * track most recent context of paras * inclusion: accept previous relay-parents * update dmp advancement rule for async backing * fmt * add a comment about validation outputs * clean up a couple of TODOs * weights * fix weights * fmt * Resolve dmp todo * Restore inclusion tests * Restore paras_inherent tests * MostRecentContext test * Benchmark for new paras dispatchable * Prepare check_validation_outputs for upgrade * cargo run --quiet --profile=production --features=runtime-benchmarks -- benchmark --chain=kusama-dev --steps=50 --repeat=20 --pallet=runtime_parachains::paras --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --header=./file_header.txt --output=./runtime/kusama/src/weights/runtime_parachains_paras.rs * cargo run --quiet --profile=production --features=runtime-benchmarks -- benchmark --chain=westend-dev --steps=50 --repeat=20 --pallet=runtime_parachains::paras --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --header=./file_header.txt --output=./runtime/westend/src/weights/runtime_parachains_paras.rs * cargo run --quiet --profile=production --features=runtime-benchmarks -- benchmark --chain=polkadot-dev --steps=50 --repeat=20 --pallet=runtime_parachains::paras --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --header=./file_header.txt --output=./runtime/polkadot/src/weights/runtime_parachains_paras.rs * cargo run --quiet --profile=production --features=runtime-benchmarks -- benchmark --chain=rococo-dev --steps=50 --repeat=20 --pallet=runtime_parachains::paras --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --header=./file_header.txt --output=./runtime/rococo/src/weights/runtime_parachains_paras.rs * Implementers guide changes * More tests for allowed relay parents * Add a github issue link * Compute group index based on relay parent * Storage migration * Move allowed parents tracker to shared * Compile error * Get group assigned to core at the next block * Test group assignment * fmt * Error instead of panic * Update guide * Extend doc-comment * Update runtime/parachains/src/shared.rs Co-authored-by: Robert Habermeier Co-authored-by: Chris Sosnin Co-authored-by: Parity Bot Co-authored-by: Chris Sosnin <48099298+slumber@users.noreply.github.com> --- roadmap/implementers-guide/src/runtime/dmp.md | 4 +- .../src/runtime/inclusion.md | 14 +- .../src/runtime/parainherent.md | 9 +- .../implementers-guide/src/runtime/paras.md | 13 +- .../src/runtime/scheduler.md | 1 - .../implementers-guide/src/runtime/shared.md | 25 ++ .../src/weights/runtime_parachains_paras.rs | 36 +- runtime/parachains/src/dmp.rs | 16 +- runtime/parachains/src/dmp/tests.rs | 34 +- runtime/parachains/src/inclusion/mod.rs | 146 +++++--- runtime/parachains/src/inclusion/tests.rs | 315 ++++++++++++++++-- runtime/parachains/src/paras/benchmarking.rs | 4 + runtime/parachains/src/paras/mod.rs | 25 ++ runtime/parachains/src/paras/tests.rs | 41 +++ runtime/parachains/src/paras_inherent/mod.rs | 76 +++-- .../parachains/src/paras_inherent/tests.rs | 16 - runtime/parachains/src/runtime_api_impl/v1.rs | 9 +- runtime/parachains/src/scheduler.rs | 31 +- runtime/parachains/src/scheduler/migration.rs | 76 +++++ runtime/parachains/src/scheduler/tests.rs | 61 ++-- runtime/parachains/src/shared.rs | 91 ++++- .../src/weights/runtime_parachains_paras.rs | 36 +- .../src/weights/runtime_parachains_paras.rs | 36 +- .../src/weights/runtime_parachains_paras.rs | 22 +- 24 files changed, 885 insertions(+), 252 deletions(-) create mode 100644 runtime/parachains/src/scheduler/migration.rs diff --git a/roadmap/implementers-guide/src/runtime/dmp.md b/roadmap/implementers-guide/src/runtime/dmp.md index df261db94576..bade5ad4b8c4 100644 --- a/roadmap/implementers-guide/src/runtime/dmp.md +++ b/roadmap/implementers-guide/src/runtime/dmp.md @@ -27,9 +27,9 @@ No initialization routine runs for this module. Candidate Acceptance Function: -* `check_processed_downward_messages(P: ParaId, processed_downward_messages: u32)`: +* `check_processed_downward_messages(P: ParaId, relay_parent_number: BlockNumber, processed_downward_messages: u32)`: + 1. Checks that `processed_downward_messages` is at least 1 if `DownwardMessageQueues` for `P` is not empty at the given `relay_parent_number`. 1. Checks that `DownwardMessageQueues` for `P` is at least `processed_downward_messages` long. - 1. Checks that `processed_downward_messages` is at least 1 if `DownwardMessageQueues` for `P` is not empty. Candidate Enactment: diff --git a/roadmap/implementers-guide/src/runtime/inclusion.md b/roadmap/implementers-guide/src/runtime/inclusion.md index 6df34ae4ddc1..183ed25d2edc 100644 --- a/roadmap/implementers-guide/src/runtime/inclusion.md +++ b/roadmap/implementers-guide/src/runtime/inclusion.md @@ -68,26 +68,26 @@ All failed checks should lead to an unrecoverable error making the block invalid 1. check that the validator bit index is not out of bounds. 1. check the validators signature, iff `full_check=FullCheck::Yes`. -* `sanitize_backed_candidates bool>( - relay_parent: T::Hash, +* `sanitize_backed_candidates) -> bool>( mut backed_candidates: Vec>, candidate_has_concluded_invalid_dispute: F, scheduled: &[CoreAssignment], ) ` 1. filter out any backed candidates that have concluded invalid. - 1. filter out backed candidates that don't have a matching `relay_parent`. 1. filters backed candidates whom's paraid was scheduled by means of the provided `scheduled` parameter. + 1. sorts remaining candidates with respect to the core index assigned to them. -* `process_candidates(parent_storage_root, BackedCandidates, scheduled: Vec, group_validators: Fn(GroupIndex) -> Option>)`: +* `process_candidates(allowed_relay_parents, BackedCandidates, scheduled: Vec, group_validators: Fn(GroupIndex) -> Option>)`: + > For details on `AllowedRelayParentsTracker` see documentation for [Shared](./shared.md) module. 1. check that each candidate corresponds to a scheduled core and that they are ordered in the same order the cores appear in assignments in `scheduled`. 1. check that `scheduled` is sorted ascending by `CoreIndex`, without duplicates. + 1. check that the relay-parent from each candidate receipt is one of the allowed relay-parents. 1. check that there is no candidate pending availability for any scheduled `ParaId`. - 1. check that each candidate's `validation_data_hash` corresponds to a `PersistedValidationData` computed from the current state. - > NOTE: With contextual execution in place, validation data will be obtained as of the state of the context block. However, only the state of the current block can be used for such a query. + 1. check that each candidate's `validation_data_hash` corresponds to a `PersistedValidationData` computed from the state of the context block. 1. If the core assignment includes a specific collator, ensure the backed candidate is issued by that collator. 1. Ensure that any code upgrade scheduled by the candidate does not happen within `config.validation_upgrade_cooldown` of `Paras::last_code_upgrade(para_id, true)`, if any, comparing against the value of `Paras::FutureCodeUpgrades` for the given para ID. 1. Check the collator's signature on the candidate data. - 1. check the backing of the candidate using the signatures and the bitfields, comparing against the validators assigned to the groups, fetched with the `group_validators` lookup. + 1. check the backing of the candidate using the signatures and the bitfields, comparing against the validators assigned to the groups, fetched with the `group_validators` lookup, while group indices are computed by `Scheduler` according to group rotation info. 1. call `Ump::check_upward_messages(para, commitments.upward_messages)` to check that the upward messages are valid. 1. call `Dmp::check_processed_downward_messages(para, commitments.processed_downward_messages)` to check that the DMQ is properly drained. 1. call `Hrmp::check_hrmp_watermark(para, commitments.hrmp_watermark)` for each candidate to check rules of processing the HRMP watermark. diff --git a/roadmap/implementers-guide/src/runtime/parainherent.md b/roadmap/implementers-guide/src/runtime/parainherent.md index dd67f9f108f8..5f8e5a6d1ad1 100644 --- a/roadmap/implementers-guide/src/runtime/parainherent.md +++ b/roadmap/implementers-guide/src/runtime/parainherent.md @@ -35,6 +35,7 @@ OnChainVotes: Option, 1. Set `Included` as `Some`. 1. Unpack `ParachainsInherentData` into `signed_bitfields`, `backed_candidates`, `parent_header`, and `disputes`. 1. Hash the parent header and make sure that it corresponds to the block hash of the parent (tracked by the `frame_system` FRAME module). + 1. Add a previous block to the `AllowedRelayParents` before anything else and read the resulting value from `shared` storage. 1. Calculate the `candidate_weight`, `bitfields_weight`, and `disputes_weight`. 1. If the sum of `candidate_weight`, `bitfields_weight`, and `disputes_weight` is greater than the max block weight we do the following with the goal of prioritizing the inclusion of disputes without making it game-able by block authors: 1. clear `bitfields` and set `bitfields_weight` equal to 0. @@ -48,10 +49,9 @@ OnChainVotes: Option, 1. If `Scheduler::availability_timeout_predicate` is `Some`, invoke `Inclusion::collect_pending` using it and annotate each of those freed cores with `FreedReason::TimedOut`. 1. Combine and sort the the bitfield-freed cores and the timed-out cores. 1. Invoke `Scheduler::clear` - 1. Invoke `Scheduler::schedule(freed_cores, System::current_block())` - 1. Extract `parent_storage_root` from the parent header, + 1. Invoke `Scheduler::schedule(freed_cores, System::current_block())` 1. If `Disputes::concluded_invalid(current_session, candidate)` is true for any of the `backed_candidates`, fail. - 1. Invoke the `Inclusion::process_candidates` routine with the parameters `(parent_storage_root, backed_candidates, Scheduler::scheduled(), Scheduler::group_validators)`. + 1. Invoke the `Inclusion::process_candidates` routine with the parameters `(allowed_relay_parents, backed_candidates, Scheduler::scheduled(), Scheduler::group_validators)`. 1. Deconstruct the returned `ProcessedCandidates` value into `occupied` core indices, and backing validators by candidate `backing_validators_per_candidate` represented by `Vec<(CandidateReceipt, Vec<(ValidatorIndex, ValidityAttestation)>)>`. 1. Set `OnChainVotes` to `ScrapedOnChainVotes`, based on the `current_session`, concluded `disputes`, and `backing_validators_per_candidate`. 1. Call `Scheduler::occupied` using the `occupied` core indices of the returned above, first sorting the list of assigned core indices. @@ -68,6 +68,7 @@ OnChainVotes: Option, * `create_inherent_inner(data: &InherentData) -> Option>` 1. Unpack `InherentData` into its parts, `bitfields`, `backed_candidates`, `disputes` and the `parent_header`. If data cannot be unpacked return `None`. 1. Hash the `parent_header` and make sure that it corresponds to the block hash of the parent (tracked by the `frame_system` FRAME module). + 1. Read `AllowedRelayParents` from `shared` storage and add a previous block to this value so that we operate with the same look-back as in `enter`. 1. Invoke `Disputes::filter_multi_dispute_data` to remove duplicates et al from `disputes`. 1. Run the following within a `with_transaction` closure to avoid side effects (we are essentially replicating the logic that would otherwise happen within `enter` so we can get the filtered bitfields and the `concluded_invalid_disputes` + `scheduled` to use in filtering the `backed_candidates`.): 1. Invoke `Disputes::provide_multi_dispute_data`. @@ -81,7 +82,7 @@ OnChainVotes: Option, 1. Invoke `scheduler::Pallet>::schedule` with `freed` and the current block number to create the same schedule of the cores that `enter` will create. 1. Read the new `>::scheduled()` into `schedule`. 1. From the `with_transaction` closure return `concluded_invalid_disputes`, `bitfields`, and `scheduled`. - 1. Invoke `sanitize_backed_candidates` using the `scheduled` return from the `with_transaction` and pass the closure `|candidate_hash: CandidateHash| -> bool { DisputesHandler::concluded_invalid(current_session, candidate_hash) }` for the param `candidate_has_concluded_invalid_dispute`. + 1. Invoke `sanitize_backed_candidates` using the `scheduled` return from the `with_transaction` and pass the closure `|candidate_idx: usize, candidate_hash: CandidateHash| -> bool` which returns `true` either if the candidate is concluded to be invalid during the dispute or it doesn't pass the verification in the context of the most recent parachain head, such as relay-parent being out-of-bounds or commitments hashes mismatch. 1. create a `rng` from `rand_chacha::ChaChaRng::from_seed(compute_entropy::(parent_hash))`. 1. Invoke `limit_disputes` with the max block weight and `rng`, storing the returned weigh in `remaining_weight`. 1. Fill up the remaining of the block weight with backed candidates and bitfields by invoking `apply_weight_limit` with `remaining_weigh` and `rng`. diff --git a/roadmap/implementers-guide/src/runtime/paras.md b/roadmap/implementers-guide/src/runtime/paras.md index af2e7add54e5..2d6024ba23b2 100644 --- a/roadmap/implementers-guide/src/runtime/paras.md +++ b/roadmap/implementers-guide/src/runtime/paras.md @@ -153,6 +153,8 @@ Parachains: Vec, ParaLifecycle: map ParaId => Option, /// The head-data of every registered para. Heads: map ParaId => Option; +/// The context (relay-chain block number) of the most recent parachain head. +MostRecentContext: map ParaId => BlockNumber; /// The validation code hash of every live para. CurrentCodeHash: map ParaId => Option; /// Actual past code hash, indicated by the para id as well as the block number at which it became outdated. @@ -220,14 +222,12 @@ CodeByHash: map ValidationCodeHash => Option 1. Execute all queued actions for paralifecycle changes: 1. Clean up outgoing paras. - 1. This means removing the entries under `Heads`, `CurrentCode`, `FutureCodeUpgrades`, and - `FutureCode`. An according entry should be added to `PastCode`, `PastCodeMeta`, and - `PastCodePruning` using the outgoing `ParaId` and removed `CurrentCode` value. This is - because any outdated validation code must remain available on-chain for a determined amount + 1. This means removing the entries under `Heads`, `CurrentCode`, `FutureCodeUpgrades`, + `FutureCode` and `MostRecentContext`. An according entry should be added to `PastCode`, `PastCodeMeta`, and `PastCodePruning` using the outgoing `ParaId` and removed `CurrentCode` value. This is because any outdated validation code must remain available on-chain for a determined amount of blocks, and validation code outdated by de-registering the para is still subject to that invariant. 1. Apply all incoming paras by initializing the `Heads` and `CurrentCode` using the genesis - parameters. + parameters as well as `MostRecentContext` to `0`. 1. Amend the `Parachains` list and `ParaLifecycle` to reflect changes in registered parachains. 1. Amend the `ParaLifecycle` set to reflect changes in registered parathreads. 1. Upgrade all parathreads that should become parachains, updating the `Parachains` list and @@ -261,8 +261,7 @@ CodeByHash: map ValidationCodeHash => Option executed in the context of a relay-chain block with number >= `relay_parent + config.validation_upgrade_delay`. If the upgrade is scheduled `UpgradeRestrictionSignal` is set and it will remain set until `relay_parent + config.validation_upgrade_cooldown`. In case the PVF pre-checking is enabled, or the new code is not already present in the storage, then the PVF pre-checking run will be scheduled for that validation code. If the pre-checking concludes with rejection, then the upgrade is canceled. Otherwise, after pre-checking is concluded the upgrade will be scheduled and be enacted as described above. * `note_new_head(ParaId, HeadData, BlockNumber)`: note that a para has progressed to a new head, - where the new head was executed in the context of a relay-chain block with given number. This will - apply pending code upgrades based on the block number provided. If an upgrade took place it will clear the `UpgradeGoAheadSignal`. + where the new head was executed in the context of a relay-chain block with given number, the latter value is inserted into the `MostRecentContext` mapping. This will apply pending code upgrades based on the block number provided. If an upgrade took place it will clear the `UpgradeGoAheadSignal`. * `lifecycle(ParaId) -> Option`: Return the `ParaLifecycle` of a para. * `is_parachain(ParaId) -> bool`: Returns true if the para ID references any live parachain, including those which may be transitioning to a parathread in the future. diff --git a/roadmap/implementers-guide/src/runtime/scheduler.md b/roadmap/implementers-guide/src/runtime/scheduler.md index 16c3280d1808..7383177aa1cb 100644 --- a/roadmap/implementers-guide/src/runtime/scheduler.md +++ b/roadmap/implementers-guide/src/runtime/scheduler.md @@ -137,7 +137,6 @@ struct CoreAssignment { core: CoreIndex, para_id: ParaId, kind: AssignmentKind, - group_idx: GroupIndex, } // reasons a core might be freed. enum FreedReason { diff --git a/roadmap/implementers-guide/src/runtime/shared.md b/roadmap/implementers-guide/src/runtime/shared.md index ae538928d5fe..58845e19a0dc 100644 --- a/roadmap/implementers-guide/src/runtime/shared.md +++ b/roadmap/implementers-guide/src/runtime/shared.md @@ -19,6 +19,27 @@ pub(crate) const SESSION_DELAY: SessionIndex = 2; ## Storage +Helper structs: + +```rust +struct AllowedRelayParentsTracker { + // The past relay parents, paired with state roots, that are viable to build upon. + // + // They are in ascending chronologic order, so the newest relay parents are at + // the back of the deque. + // + // (relay_parent, state_root) + // + // NOTE: the size limit of look-back is currently defined as a constant in Runtime. + buffer: VecDeque<(Hash, Hash)>, + + // The number of the most recent relay-parent, if any. + latest_number: BlockNumber, +} +``` + +Storage Layout: + ```rust /// The current session index within the Parachains Runtime system. CurrentSessionIndex: SessionIndex; @@ -28,6 +49,8 @@ ActiveValidatorIndices: Vec, /// The parachain attestation keys of the validators actively participating in parachain consensus. /// This should be the same length as `ActiveValidatorIndices`. ActiveValidatorKeys: Vec +/// Relay-parents allowed to build candidates upon. +AllowedRelayParents: AllowedRelayParentsTracker, ``` ## Initialization @@ -51,6 +74,8 @@ This information is used in the: passed. * Paras Module: For delaying updates to paras until at least one full session has passed. +Allowed relay parents buffer, which is maintained by [ParaInherent](./parainherent.md) module, is cleared on every session change. + ## Finalization The Shared Module currently has no finalization routines. diff --git a/runtime/kusama/src/weights/runtime_parachains_paras.rs b/runtime/kusama/src/weights/runtime_parachains_paras.rs index 36ec52fcf5ba..7405c0232a13 100644 --- a/runtime/kusama/src/weights/runtime_parachains_paras.rs +++ b/runtime/kusama/src/weights/runtime_parachains_paras.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_parachains::paras` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-12-28, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 128 +//! DATE: 2022-02-01, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("kusama-dev"), DB CACHE: 1024 // Executed Command: -// target/release/polkadot +// target/production/polkadot // benchmark // --chain=kusama-dev // --steps=50 @@ -45,21 +45,29 @@ pub struct WeightInfo(PhantomData); impl runtime_parachains::paras::WeightInfo for WeightInfo { // Storage: Paras CurrentCodeHash (r:1 w:1) // Storage: Paras CodeByHashRefs (r:1 w:1) + // Storage: Paras PastCodeMeta (r:1 w:1) + // Storage: Paras PastCodePruning (r:1 w:1) + // Storage: Paras PastCodeHash (r:0 w:1) // Storage: Paras CodeByHash (r:0 w:1) fn force_set_current_code(c: u32, ) -> Weight { (0 as Weight) // Standard Error: 0 - .saturating_add((3_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add((2_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) } // Storage: Paras Heads (r:0 w:1) fn force_set_current_head(s: u32, ) -> Weight { - (11_803_000 as Weight) + (9_718_000 as Weight) // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } + // Storage: Paras MostRecentContext (r:0 w:1) + fn force_set_most_recent_context() -> Weight { + (1_170_000 as Weight) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } // Storage: Configuration ActiveConfig (r:1 w:0) // Storage: Paras FutureCodeHash (r:1 w:1) // Storage: Paras CurrentCodeHash (r:1 w:0) @@ -74,23 +82,25 @@ impl runtime_parachains::paras::WeightInfo for WeightIn fn force_schedule_code_upgrade(c: u32, ) -> Weight { (0 as Weight) // Standard Error: 0 - .saturating_add((3_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((2_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(9 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) } // Storage: Paras FutureCodeUpgrades (r:1 w:0) // Storage: Paras Heads (r:0 w:1) + // Storage: Paras UpgradeGoAheadSignal (r:0 w:1) + // Storage: Paras MostRecentContext (r:0 w:1) fn force_note_new_head(s: u32, ) -> Weight { - (18_655_000 as Weight) + (12_348_000 as Weight) // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: ParasShared CurrentSessionIndex (r:1 w:0) // Storage: Paras ActionsQueue (r:1 w:1) fn force_queue_action() -> Weight { - (23_208_000 as Weight) + (16_559_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -99,14 +109,14 @@ impl runtime_parachains::paras::WeightInfo for WeightIn fn add_trusted_validation_code(c: u32, ) -> Weight { (0 as Weight) // Standard Error: 0 - .saturating_add((3_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((2_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Paras CodeByHashRefs (r:1 w:0) // Storage: Paras CodeByHash (r:0 w:1) fn poke_unused_validation_code() -> Weight { - (4_639_000 as Weight) + (2_811_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } diff --git a/runtime/parachains/src/dmp.rs b/runtime/parachains/src/dmp.rs index 4a11796af0ce..1890cd7bf4f7 100644 --- a/runtime/parachains/src/dmp.rs +++ b/runtime/parachains/src/dmp.rs @@ -175,13 +175,27 @@ impl Pallet { /// Checks if the number of processed downward messages is valid. pub(crate) fn check_processed_downward_messages( para: ParaId, + relay_parent_number: T::BlockNumber, processed_downward_messages: u32, ) -> Result<(), ProcessedDownwardMessagesAcceptanceErr> { let dmq_length = Self::dmq_length(para); if dmq_length > 0 && processed_downward_messages == 0 { - return Err(ProcessedDownwardMessagesAcceptanceErr::AdvancementRule) + // The advancement rule is for at least one downwards message to be processed + // if the queue is non-empty at the relay-parent. Downwards messages are annotated + // with the block number, so we compare the earliest (first) against the relay parent. + let contents = Self::dmq_contents(para); + + // sanity: if dmq_length is >0 this should always be 'Some'. + if contents.get(0).map_or(false, |msg| msg.sent_at <= relay_parent_number) { + return Err(ProcessedDownwardMessagesAcceptanceErr::AdvancementRule) + } } + + // Note that we might be allowing a parachain to signal that it's processed + // messages that hadn't been placed in the queue at the relay_parent. + // only 'stupid' parachains would do it and we don't (and can't) force anyone + // to act on messages, so the lenient approach is fine here. if dmq_length < processed_downward_messages { return Err(ProcessedDownwardMessagesAcceptanceErr::Underflow { processed_downward_messages, diff --git a/runtime/parachains/src/dmp/tests.rs b/runtime/parachains/src/dmp/tests.rs index 46c497dde904..7c3b63b539b6 100644 --- a/runtime/parachains/src/dmp/tests.rs +++ b/runtime/parachains/src/dmp/tests.rs @@ -121,21 +121,43 @@ fn check_processed_downward_messages() { let a = ParaId::from(1312); new_test_ext(default_genesis_config()).execute_with(|| { + let block_number = System::block_number(); + // processed_downward_messages=0 is allowed when the DMQ is empty. - assert!(Dmp::check_processed_downward_messages(a, 0).is_ok()); + assert!(Dmp::check_processed_downward_messages(a, block_number, 0).is_ok()); queue_downward_message(a, vec![1, 2, 3]).unwrap(); queue_downward_message(a, vec![4, 5, 6]).unwrap(); queue_downward_message(a, vec![7, 8, 9]).unwrap(); // 0 doesn't pass if the DMQ has msgs. - assert!(!Dmp::check_processed_downward_messages(a, 0).is_ok()); + assert!(Dmp::check_processed_downward_messages(a, block_number, 0).is_err()); // a candidate can consume up to 3 messages - assert!(Dmp::check_processed_downward_messages(a, 1).is_ok()); - assert!(Dmp::check_processed_downward_messages(a, 2).is_ok()); - assert!(Dmp::check_processed_downward_messages(a, 3).is_ok()); + assert!(Dmp::check_processed_downward_messages(a, block_number, 1).is_ok()); + assert!(Dmp::check_processed_downward_messages(a, block_number, 2).is_ok()); + assert!(Dmp::check_processed_downward_messages(a, block_number, 3).is_ok()); // there is no 4 messages in the queue - assert!(!Dmp::check_processed_downward_messages(a, 4).is_ok()); + assert!(Dmp::check_processed_downward_messages(a, block_number, 4).is_err()); + }); +} + +#[test] +fn check_processed_downward_messages_advancement_rule() { + let a = ParaId::from(1312); + + new_test_ext(default_genesis_config()).execute_with(|| { + let block_number = System::block_number(); + + run_to_block(block_number + 1, None); + let advanced_block_number = System::block_number(); + + queue_downward_message(a, vec![1, 2, 3]).unwrap(); + queue_downward_message(a, vec![4, 5, 6]).unwrap(); + + // The queue was empty at genesis, 0 is OK despite it being non-empty in the further block. + assert!(Dmp::check_processed_downward_messages(a, block_number, 0).is_ok()); + // For the advanced block number, however, the rule is broken in case of 0. + assert!(Dmp::check_processed_downward_messages(a, advanced_block_number, 0).is_err()); }); } diff --git a/runtime/parachains/src/inclusion/mod.rs b/runtime/parachains/src/inclusion/mod.rs index ecf5b7c94af8..a5b3ac165a80 100644 --- a/runtime/parachains/src/inclusion/mod.rs +++ b/runtime/parachains/src/inclusion/mod.rs @@ -21,8 +21,11 @@ //! to included. use crate::{ - configuration, disputes, dmp, hrmp, paras, paras_inherent::DisputedBitfield, - scheduler::CoreAssignment, shared, ump, + configuration, disputes, dmp, hrmp, paras, + paras_inherent::DisputedBitfield, + scheduler::{self, CoreAssignment}, + shared::{self, AllowedRelayParentsTracker}, + ump, }; use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; use frame_support::pallet_prelude::*; @@ -194,6 +197,7 @@ pub mod pallet { + ump::Config + hrmp::Config + configuration::Config + + scheduler::Config { type Event: From> + IsType<::Event>; type DisputesHandler: disputes::DisputesHandler; @@ -245,8 +249,12 @@ pub mod pallet { PrematureCodeUpgrade, /// Output code is too large NewCodeTooLarge, - /// Candidate not in parent context. - CandidateNotInParentContext, + /// The candidate's relay-parent was not allowed. Either it was + /// not recent enough or it didn't advance based on the last parachain block. + DisallowedRelayParent, + /// Failed to compute group index for the core: either it's out of bounds + /// or the relay parent doesn't belong to the current session. + InvalidAssignment, /// Invalid group index in core assignment. InvalidGroupIndex, /// Insufficient (non-majority) backing. @@ -463,7 +471,7 @@ impl Pallet { /// Both should be sorted ascending by core index, and the candidates should be a subset of /// scheduled cores. If these conditions are not met, the execution of the function fails. pub(crate) fn process_candidates( - parent_storage_root: T::Hash, + allowed_relay_parents: &AllowedRelayParentsTracker, candidates: Vec>, scheduled: Vec, group_validators: GV, @@ -471,6 +479,8 @@ impl Pallet { where GV: Fn(GroupIndex) -> Option>, { + let now = >::block_number(); + ensure!(candidates.len() <= scheduled.len(), Error::::UnscheduledCandidate); if scheduled.is_empty() { @@ -478,13 +488,6 @@ impl Pallet { } let validators = shared::Pallet::::active_validator_keys(); - let parent_hash = >::parent_hash(); - - // At the moment we assume (and in fact enforce, below) that the relay-parent is always one - // before of the block where we include a candidate (i.e. this code path). - let now = >::block_number(); - let relay_parent_number = now - One::one(); - let check_ctx = CandidateCheckContext::::new(now, relay_parent_number); // Collect candidate receipts with backers. let mut candidate_receipt_with_backing_validator_indices = @@ -506,9 +509,6 @@ impl Pallet { Ok(()) }; - let signing_context = - SigningContext { parent_hash, session_index: shared::Pallet::::session_index() }; - // We combine an outer loop over candidates with an inner loop over the scheduled, // where each iteration of the outer loop picks up at the position // in scheduled just after the past iteration left off. @@ -522,18 +522,27 @@ impl Pallet { 'next_backed_candidate: for (candidate_idx, backed_candidate) in candidates.iter().enumerate() { - match check_ctx.verify_backed_candidate( - parent_hash, - parent_storage_root, + let relay_parent_hash = backed_candidate.descriptor().relay_parent; + let para_id = backed_candidate.descriptor().para_id; + + let prev_context = >::para_most_recent_context(para_id); + + let check_ctx = CandidateCheckContext::::new(prev_context); + let signing_context = SigningContext { + parent_hash: relay_parent_hash, + session_index: shared::Pallet::::session_index(), + }; + + let relay_parent_number = match check_ctx.verify_backed_candidate( + &allowed_relay_parents, candidate_idx, backed_candidate, )? { Err(FailedToCreatePVD) => { log::debug!( target: LOG_TARGET, - "Failed to create PVD for candidate {} on relay parent {:?}", + "Failed to create PVD for candidate {}", candidate_idx, - parent_hash, ); // We don't want to error out here because it will // brick the relay-chain. So we return early without @@ -541,7 +550,7 @@ impl Pallet { return Ok(ProcessedCandidates::default()) }, Ok(rpn) => rpn, - } + }; let para_id = backed_candidate.descriptor().para_id; let mut backers = bitvec::bitvec![u8, BitOrderLsb0; 0; validators.len()]; @@ -566,7 +575,22 @@ impl Pallet { // account for already skipped, and then skip this one. skip = i + skip + 1; - let group_vals = group_validators(assignment.group_idx) + // The candidate based upon relay parent `N` should be backed by a group + // assigned to core at block `N + 1`. Thus, `relay_parent_number + 1` + // will always land in the current session. + let group_idx = >::group_assigned_to_core( + assignment.core, + relay_parent_number + One::one(), + ) + .ok_or_else(|| { + log::warn!( + target: LOG_TARGET, + "Failed to compute group index for candidate {}", + candidate_idx + ); + Error::::InvalidAssignment + })?; + let group_vals = group_validators(group_idx) .ok_or_else(|| Error::::InvalidGroupIndex)?; // check the signatures in the backing and that it is a majority. @@ -620,7 +644,8 @@ impl Pallet { core_indices_and_backers.push(( assignment.core, backers, - assignment.group_idx, + group_idx, + relay_parent_number, )); continue 'next_backed_candidate } @@ -642,8 +667,8 @@ impl Pallet { // one more sweep for actually writing to storage. let core_indices = - core_indices_and_backers.iter().map(|&(ref c, _, _)| c.clone()).collect(); - for (candidate, (core, backers, group)) in + core_indices_and_backers.iter().map(|&(ref c, _, _, _)| c.clone()).collect(); + for (candidate, (core, backers, group, relay_parent_number)) in candidates.into_iter().zip(core_indices_and_backers) { let para_id = candidate.descriptor().para_id; @@ -673,7 +698,7 @@ impl Pallet { availability_votes, relay_parent_number, backers: backers.to_bitvec(), - backed_in_number: check_ctx.now, + backed_in_number: now, backing_group: group, }, ); @@ -689,16 +714,15 @@ impl Pallet { /// Run the acceptance criteria checks on the given candidate commitments. pub(crate) fn check_validation_outputs_for_runtime_api( para_id: ParaId, + relay_parent_number: T::BlockNumber, validation_outputs: primitives::v1::CandidateCommitments, ) -> bool { - // This function is meant to be called from the runtime APIs against the relay-parent, hence - // `relay_parent_number` is equal to `now`. - let now = >::block_number(); - let relay_parent_number = now; - let check_ctx = CandidateCheckContext::::new(now, relay_parent_number); + let prev_context = >::para_most_recent_context(para_id); + let check_ctx = CandidateCheckContext::::new(prev_context); if let Err(err) = check_ctx.check_validation_outputs( para_id, + relay_parent_number, &validation_outputs.head_data, &validation_outputs.new_validation_code, validation_outputs.processed_downward_messages, @@ -934,8 +958,7 @@ impl AcceptanceCheckErr { /// A collection of data required for checking a candidate. pub(crate) struct CandidateCheckContext { config: configuration::HostConfiguration, - now: T::BlockNumber, - relay_parent_number: T::BlockNumber, + prev_context: Option, } /// An error indicating that creating Persisted Validation Data failed @@ -943,34 +966,42 @@ pub(crate) struct CandidateCheckContext { pub(crate) struct FailedToCreatePVD; impl CandidateCheckContext { - pub(crate) fn new(now: T::BlockNumber, relay_parent_number: T::BlockNumber) -> Self { - Self { config: >::config(), now, relay_parent_number } + pub(crate) fn new(prev_context: Option) -> Self { + Self { config: >::config(), prev_context } } /// Execute verification of the candidate. /// /// Assures: - /// * correct expected relay parent reference + /// * relay-parent in-bounds /// * collator signature check passes /// * code hash of commitments matches current code hash /// * para head in the descriptor and commitments match + /// + /// Returns the relay-parent block number. pub(crate) fn verify_backed_candidate( &self, - parent_hash: ::Hash, - parent_storage_root: T::Hash, + allowed_relay_parents: &AllowedRelayParentsTracker, candidate_idx: usize, backed_candidate: &BackedCandidate<::Hash>, - ) -> Result, Error> { + ) -> Result, Error> { let para_id = backed_candidate.descriptor().para_id; - let now = >::block_number(); - let relay_parent_number = now - One::one(); + let relay_parent = backed_candidate.descriptor().relay_parent; + + // Check that the relay-parent is one of the allowed relay-parents. + let (relay_parent_storage_root, relay_parent_number) = { + match allowed_relay_parents.acquire_info(relay_parent, self.prev_context) { + None => return Err(Error::::DisallowedRelayParent), + Some(info) => info, + } + }; { // this should never fail because the para is registered let persisted_validation_data = match crate::util::make_persisted_validation_data::( para_id, relay_parent_number, - parent_storage_root, + relay_parent_storage_root, ) { Some(l) => l, None => return Ok(Err(FailedToCreatePVD)), @@ -984,11 +1015,6 @@ impl CandidateCheckContext { ); } - // we require that the candidate is in the context of the parent block. - ensure!( - backed_candidate.descriptor().relay_parent == parent_hash, - Error::::CandidateNotInParentContext, - ); ensure!( backed_candidate.descriptor().check_collator_signature().is_ok(), Error::::NotCollatorSigned, @@ -1010,6 +1036,7 @@ impl CandidateCheckContext { if let Err(err) = self.check_validation_outputs( para_id, + relay_parent_number, &backed_candidate.candidate.commitments.head_data, &backed_candidate.candidate.commitments.new_validation_code, backed_candidate.candidate.commitments.processed_downward_messages, @@ -1026,14 +1053,29 @@ impl CandidateCheckContext { ); Err(err.strip_into_dispatch_err::())?; }; - Ok(Ok(())) + Ok(Ok(relay_parent_number)) } /// Check the given outputs after candidate validation on whether it passes the acceptance /// criteria. + /// + /// The things that are checked can be roughly divided into limits and minimums. + /// + /// Limits are things like max message queue sizes and max head data size. + /// + /// Minimums are things like the minimum amount of messages that must be processed + /// by the parachain block. + /// + /// Limits are checked against the current state. The parachain block must be acceptable + /// by the current relay-chain state regardless of whether it was acceptable at some relay-chain + /// state in the past. + /// + /// Minimums are checked against the current state but modulated by + /// considering the information available at the relay-parent of the parachain block. fn check_validation_outputs( &self, para_id: ParaId, + relay_parent_number: T::BlockNumber, head_data: &HeadData, new_validation_code: &Option, processed_downward_messages: u32, @@ -1059,9 +1101,13 @@ impl CandidateCheckContext { } // check if the candidate passes the messaging acceptance criteria - >::check_processed_downward_messages(para_id, processed_downward_messages)?; + >::check_processed_downward_messages( + para_id, + relay_parent_number, + processed_downward_messages, + )?; >::check_upward_messages(&self.config, para_id, upward_messages)?; - >::check_hrmp_watermark(para_id, self.relay_parent_number, hrmp_watermark)?; + >::check_hrmp_watermark(para_id, relay_parent_number, hrmp_watermark)?; >::check_outbound_hrmp(&self.config, para_id, horizontal_messages)?; Ok(()) diff --git a/runtime/parachains/src/inclusion/tests.rs b/runtime/parachains/src/inclusion/tests.rs index cd6b968ddcd1..b29a28e2bb8c 100644 --- a/runtime/parachains/src/inclusion/tests.rs +++ b/runtime/parachains/src/inclusion/tests.rs @@ -19,12 +19,13 @@ use crate::{ configuration::HostConfiguration, initializer::SessionChangeNotification, mock::{ - new_test_ext, Configuration, MockGenesisConfig, ParaInclusion, Paras, ParasShared, System, - Test, + new_test_ext, Configuration, MockGenesisConfig, ParaInclusion, Paras, ParasShared, + Scheduler, System, Test, }, paras::ParaGenesisArgs, paras_inherent::DisputedBitfield, scheduler::AssignmentKind, + shared::AllowedRelayParentsTracker, }; use assert_matches::assert_matches; use frame_support::assert_noop; @@ -50,6 +51,7 @@ fn default_config() -> HostConfiguration { let mut config = HostConfiguration::default(); config.parathread_cores = 1; config.max_code_size = 3; + config.group_rotation_frequency = u32::MAX; config } @@ -79,6 +81,16 @@ pub(crate) fn genesis_config(paras: Vec<(ParaId, bool)>) -> MockGenesisConfig { } } +fn default_allowed_relay_parent_tracker() -> AllowedRelayParentsTracker { + let mut allowed = AllowedRelayParentsTracker::default(); + + let relay_parent = System::parent_hash(); + let parent_number = System::block_number().saturating_sub(1); + + allowed.update(relay_parent, Hash::zero(), parent_number, 1); + allowed +} + #[derive(Debug, Clone, Copy, PartialEq)] pub(crate) enum BackingKind { #[allow(unused)] @@ -298,6 +310,13 @@ impl TestCandidateBuilder { pub(crate) fn make_vdata_hash(para_id: ParaId) -> Option { let relay_parent_number = >::block_number() - 1; + make_vdata_hash_with_block_number(para_id, relay_parent_number) +} + +fn make_vdata_hash_with_block_number( + para_id: ParaId, + relay_parent_number: BlockNumber, +) -> Option { let persisted_validation_data = crate::util::make_persisted_validation_data::( para_id, relay_parent_number, @@ -937,29 +956,36 @@ fn candidate_checks() { .map(|m| m.into_iter().map(ValidatorIndex).collect::>()) }; + // When processing candidates, we compute the group index from scheduler. + let validator_groups = vec![ + vec![ValidatorIndex(0), ValidatorIndex(1)], + vec![ValidatorIndex(2), ValidatorIndex(3)], + vec![ValidatorIndex(4)], + ]; + Scheduler::set_validator_groups(validator_groups); + let thread_collator: CollatorId = Sr25519Keyring::Two.public().into(); let chain_a_assignment = CoreAssignment { core: CoreIndex::from(0), para_id: chain_a, kind: AssignmentKind::Parachain, - group_idx: GroupIndex::from(0), }; let chain_b_assignment = CoreAssignment { core: CoreIndex::from(1), para_id: chain_b, kind: AssignmentKind::Parachain, - group_idx: GroupIndex::from(1), }; let thread_a_assignment = CoreAssignment { core: CoreIndex::from(2), para_id: thread_a, kind: AssignmentKind::Parathread(thread_collator.clone(), 0), - group_idx: GroupIndex::from(2), }; + let allowed_relay_parents = default_allowed_relay_parent_tracker(); + // unscheduled candidate. { let mut candidate = TestCandidateBuilder { @@ -984,7 +1010,7 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( - Default::default(), + &allowed_relay_parents, vec![backed], vec![chain_b_assignment.clone()], &group_validators, @@ -1039,7 +1065,7 @@ fn candidate_checks() { // out-of-order manifests as unscheduled. assert_noop!( ParaInclusion::process_candidates( - Default::default(), + &allowed_relay_parents, vec![backed_b, backed_a], vec![chain_a_assignment.clone(), chain_b_assignment.clone()], &group_validators, @@ -1072,7 +1098,7 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( - Default::default(), + &allowed_relay_parents, vec![backed], vec![chain_a_assignment.clone()], &group_validators, @@ -1081,12 +1107,12 @@ fn candidate_checks() { ); } - // candidate not in parent context. + // one of candidates is not based on allowed relay parent. { let wrong_parent_hash = Hash::repeat_byte(222); assert!(System::parent_hash() != wrong_parent_hash); - let mut candidate = TestCandidateBuilder { + let mut candidate_a = TestCandidateBuilder { para_id: chain_a, relay_parent: wrong_parent_hash, pov_hash: Hash::repeat_byte(1), @@ -1094,10 +1120,23 @@ fn candidate_checks() { ..Default::default() } .build(); - collator_sign_candidate(Sr25519Keyring::One, &mut candidate); - let backed = block_on(back_candidate( - candidate, + let mut candidate_b = TestCandidateBuilder { + para_id: chain_b, + relay_parent: System::parent_hash(), + pov_hash: Hash::repeat_byte(2), + persisted_validation_data_hash: make_vdata_hash(chain_b).unwrap(), + hrmp_watermark: RELAY_PARENT_NUM, + ..Default::default() + } + .build(); + + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_a); + + collator_sign_candidate(Sr25519Keyring::Two, &mut candidate_b); + + let backed_a = block_on(back_candidate( + candidate_a, &validators, group_validators(GroupIndex::from(0)).unwrap().as_ref(), &keystore, @@ -1105,14 +1144,23 @@ fn candidate_checks() { BackingKind::Threshold, )); + let backed_b = block_on(back_candidate( + candidate_b, + &validators, + group_validators(GroupIndex::from(1)).unwrap().as_ref(), + &keystore, + &signing_context, + BackingKind::Threshold, + )); + assert_noop!( ParaInclusion::process_candidates( - Default::default(), - vec![backed], - vec![chain_a_assignment.clone()], + &allowed_relay_parents, + vec![backed_b, backed_a], + vec![chain_a_assignment.clone(), chain_b_assignment.clone()], &group_validators, ), - Error::::CandidateNotInParentContext + Error::::DisallowedRelayParent ); } @@ -1142,7 +1190,7 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( - Default::default(), + &allowed_relay_parents, vec![backed], vec![ chain_a_assignment.clone(), @@ -1184,7 +1232,7 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( - Default::default(), + &allowed_relay_parents, vec![backed], vec![thread_a_assignment.clone()], &group_validators, @@ -1234,7 +1282,7 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( - Default::default(), + &allowed_relay_parents, vec![backed], vec![chain_a_assignment.clone()], &group_validators, @@ -1274,7 +1322,7 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( - Default::default(), + &allowed_relay_parents, vec![backed], vec![chain_a_assignment.clone()], &group_validators, @@ -1318,7 +1366,7 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( - Default::default(), + &allowed_relay_parents, vec![backed], vec![chain_a_assignment.clone()], &group_validators, @@ -1352,7 +1400,7 @@ fn candidate_checks() { assert_eq!( ParaInclusion::process_candidates( - Default::default(), + &allowed_relay_parents, vec![backed], vec![chain_a_assignment.clone()], &group_validators, @@ -1387,7 +1435,7 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( - Default::default(), + &allowed_relay_parents, vec![backed], vec![chain_a_assignment.clone()], &group_validators, @@ -1422,7 +1470,7 @@ fn candidate_checks() { assert_noop!( ParaInclusion::process_candidates( - Default::default(), + &allowed_relay_parents, vec![backed], vec![chain_a_assignment.clone()], &group_validators, @@ -1480,27 +1528,34 @@ fn backing_works() { .map(|vs| vs.into_iter().map(ValidatorIndex).collect::>()) }; + // When processing candidates, we compute the group index from scheduler. + let validator_groups = vec![ + vec![ValidatorIndex(0), ValidatorIndex(1)], + vec![ValidatorIndex(2), ValidatorIndex(3)], + vec![ValidatorIndex(4)], + ]; + Scheduler::set_validator_groups(validator_groups); + + let allowed_relay_parents = default_allowed_relay_parent_tracker(); + let thread_collator: CollatorId = Sr25519Keyring::Two.public().into(); let chain_a_assignment = CoreAssignment { core: CoreIndex::from(0), para_id: chain_a, kind: AssignmentKind::Parachain, - group_idx: GroupIndex::from(0), }; let chain_b_assignment = CoreAssignment { core: CoreIndex::from(1), para_id: chain_b, kind: AssignmentKind::Parachain, - group_idx: GroupIndex::from(1), }; let thread_a_assignment = CoreAssignment { core: CoreIndex::from(2), para_id: thread_a, kind: AssignmentKind::Parathread(thread_collator.clone(), 0), - group_idx: GroupIndex::from(2), }; let mut candidate_a = TestCandidateBuilder { @@ -1587,7 +1642,7 @@ fn backing_works() { core_indices: occupied_cores, candidate_receipt_with_backing_validator_indices, } = ParaInclusion::process_candidates( - Default::default(), + &allowed_relay_parents, backed_candidates.clone(), vec![ chain_a_assignment.clone(), @@ -1758,11 +1813,22 @@ fn can_include_candidate_with_ok_code_upgrade() { .map(|vs| vs.into_iter().map(ValidatorIndex).collect::>()) }; + // When processing candidates, we compute the group index from scheduler. + let validator_groups = vec![vec![ + ValidatorIndex(0), + ValidatorIndex(1), + ValidatorIndex(2), + ValidatorIndex(3), + ValidatorIndex(4), + ]]; + Scheduler::set_validator_groups(validator_groups); + + let allowed_relay_parents = default_allowed_relay_parent_tracker(); + let chain_a_assignment = CoreAssignment { core: CoreIndex::from(0), para_id: chain_a, kind: AssignmentKind::Parachain, - group_idx: GroupIndex::from(0), }; let mut candidate_a = TestCandidateBuilder { @@ -1788,7 +1854,7 @@ fn can_include_candidate_with_ok_code_upgrade() { let ProcessedCandidates { core_indices: occupied_cores, .. } = ParaInclusion::process_candidates( - Default::default(), + &allowed_relay_parents, vec![backed_a], vec![chain_a_assignment.clone()], &group_validators, @@ -1821,6 +1887,193 @@ fn can_include_candidate_with_ok_code_upgrade() { }); } +#[test] +fn check_allowed_relay_parents() { + let chain_a = ParaId::from(1); + let chain_b = ParaId::from(2); + let thread_a = ParaId::from(3); + + // The block number of the relay-parent for testing. + const RELAY_PARENT_NUM: BlockNumber = 4; + + let paras = vec![(chain_a, true), (chain_b, true), (thread_a, false)]; + let validators = vec![ + Sr25519Keyring::Alice, + Sr25519Keyring::Bob, + Sr25519Keyring::Charlie, + Sr25519Keyring::Dave, + Sr25519Keyring::Ferdie, + ]; + let keystore: SyncCryptoStorePtr = Arc::new(LocalKeystore::in_memory()); + for validator in validators.iter() { + SyncCryptoStore::sr25519_generate_new( + &*keystore, + PARACHAIN_KEY_TYPE_ID, + Some(&validator.to_seed()), + ) + .unwrap(); + } + let validator_public = validator_pubkeys(&validators); + let mut config = genesis_config(paras); + config.configuration.config.group_rotation_frequency = 1; + + new_test_ext(config).execute_with(|| { + shared::Pallet::::set_active_validators_ascending(validator_public.clone()); + shared::Pallet::::set_session_index(5); + + run_to_block(5, |_| None); + + let group_validators = |group_index: GroupIndex| { + match group_index { + group_index if group_index == GroupIndex::from(0) => Some(vec![0, 1]), + group_index if group_index == GroupIndex::from(1) => Some(vec![2, 3]), + group_index if group_index == GroupIndex::from(2) => Some(vec![4]), + _ => panic!("Group index out of bounds for 2 parachains and 1 parathread core"), + } + .map(|vs| vs.into_iter().map(ValidatorIndex).collect::>()) + }; + + // When processing candidates, we compute the group index from scheduler. + let validator_groups = vec![ + vec![ValidatorIndex(0), ValidatorIndex(1)], + vec![ValidatorIndex(2), ValidatorIndex(3)], + vec![ValidatorIndex(4)], + ]; + Scheduler::set_validator_groups(validator_groups); + + let thread_collator: CollatorId = Sr25519Keyring::Two.public().into(); + + // Base each candidate on one of allowed relay parents. + // + // Note that the group rotation frequency is set to 1 above, + // which means groups shift at each relay parent. + // + // For example, candidate `a` is based on block 1, + // thus it will be included in block 2, its group index is + // core = 0 shifted 2 times: one for group rotation and one for + // fetching the group assigned to the next block. + // + // Candidates `b` and `c` are constructed accordingly. + + let relay_parent_a = (1, Hash::repeat_byte(0x1)); + let relay_parent_b = (2, Hash::repeat_byte(0x2)); + let relay_parent_c = (3, Hash::repeat_byte(0x3)); + + let mut allowed_relay_parents = AllowedRelayParentsTracker::default(); + let max_len = RELAY_PARENT_NUM as usize; + allowed_relay_parents.update(relay_parent_a.1, Hash::zero(), relay_parent_a.0, max_len); + allowed_relay_parents.update(relay_parent_b.1, Hash::zero(), relay_parent_b.0, max_len); + allowed_relay_parents.update(relay_parent_c.1, Hash::zero(), relay_parent_c.0, max_len); + + let chain_a_assignment = CoreAssignment { + core: CoreIndex::from(0), + para_id: chain_a, + kind: AssignmentKind::Parachain, + }; + + let chain_b_assignment = CoreAssignment { + core: CoreIndex::from(1), + para_id: chain_b, + kind: AssignmentKind::Parachain, + }; + + let thread_a_assignment = CoreAssignment { + core: CoreIndex::from(2), + para_id: thread_a, + kind: AssignmentKind::Parathread(thread_collator.clone(), 0), + }; + + let mut candidate_a = TestCandidateBuilder { + para_id: chain_a, + relay_parent: relay_parent_a.1, + pov_hash: Hash::repeat_byte(1), + persisted_validation_data_hash: make_vdata_hash_with_block_number( + chain_a, + relay_parent_a.0, + ) + .unwrap(), + hrmp_watermark: relay_parent_a.0, + ..Default::default() + } + .build(); + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_a); + let signing_context_a = SigningContext { parent_hash: relay_parent_a.1, session_index: 5 }; + + let mut candidate_b = TestCandidateBuilder { + para_id: chain_b, + relay_parent: relay_parent_b.1, + pov_hash: Hash::repeat_byte(2), + persisted_validation_data_hash: make_vdata_hash_with_block_number( + chain_b, + relay_parent_b.0, + ) + .unwrap(), + hrmp_watermark: relay_parent_b.0, + ..Default::default() + } + .build(); + collator_sign_candidate(Sr25519Keyring::One, &mut candidate_b); + let signing_context_b = SigningContext { parent_hash: relay_parent_b.1, session_index: 5 }; + + let mut candidate_c = TestCandidateBuilder { + para_id: thread_a, + relay_parent: relay_parent_c.1, + pov_hash: Hash::repeat_byte(3), + persisted_validation_data_hash: make_vdata_hash_with_block_number( + thread_a, + relay_parent_c.0, + ) + .unwrap(), + hrmp_watermark: relay_parent_c.0, + ..Default::default() + } + .build(); + collator_sign_candidate(Sr25519Keyring::Two, &mut candidate_c); + let signing_context_c = SigningContext { parent_hash: relay_parent_c.1, session_index: 5 }; + + let backed_a = block_on(back_candidate( + candidate_a.clone(), + &validators, + group_validators(GroupIndex::from(2)).unwrap().as_ref(), + &keystore, + &signing_context_a, + BackingKind::Threshold, + )); + + let backed_b = block_on(back_candidate( + candidate_b.clone(), + &validators, + group_validators(GroupIndex::from(1)).unwrap().as_ref(), + &keystore, + &signing_context_b, + BackingKind::Threshold, + )); + + let backed_c = block_on(back_candidate( + candidate_c.clone(), + &validators, + group_validators(GroupIndex::from(0)).unwrap().as_ref(), + &keystore, + &signing_context_c, + BackingKind::Threshold, + )); + + let backed_candidates = vec![backed_a, backed_b, backed_c]; + + ParaInclusion::process_candidates( + &allowed_relay_parents, + backed_candidates.clone(), + vec![ + chain_a_assignment.clone(), + chain_b_assignment.clone(), + thread_a_assignment.clone(), + ], + &group_validators, + ) + .expect("candidates scheduled, in order, and backed"); + }); +} + #[test] fn session_change_wipes() { let chain_a = ParaId::from(1); diff --git a/runtime/parachains/src/paras/benchmarking.rs b/runtime/parachains/src/paras/benchmarking.rs index bd9106422d90..c7d2fbcd036e 100644 --- a/runtime/parachains/src/paras/benchmarking.rs +++ b/runtime/parachains/src/paras/benchmarking.rs @@ -95,6 +95,10 @@ benchmarks! { verify { assert_last_event::(Event::CurrentHeadUpdated(para_id).into()); } + force_set_most_recent_context { + let para_id = ParaId::from(1000); + let context = T::BlockNumber::from(1000u32); + }: _(RawOrigin::Root, para_id, context) force_schedule_code_upgrade { let c in 1 .. MAX_CODE_SIZE; let new_code = ValidationCode(vec![0; c as usize]); diff --git a/runtime/parachains/src/paras/mod.rs b/runtime/parachains/src/paras/mod.rs index fc35b80c7a63..a379bfaf1847 100644 --- a/runtime/parachains/src/paras/mod.rs +++ b/runtime/parachains/src/paras/mod.rs @@ -402,6 +402,7 @@ impl PvfCheckActiveVoteState { pub trait WeightInfo { fn force_set_current_code(c: u32) -> Weight; fn force_set_current_head(s: u32) -> Weight; + fn force_set_most_recent_context() -> Weight; fn force_schedule_code_upgrade(c: u32) -> Weight; fn force_note_new_head(s: u32) -> Weight; fn force_queue_action() -> Weight; @@ -417,6 +418,9 @@ impl WeightInfo for TestWeightInfo { fn force_set_current_head(_s: u32) -> Weight { Weight::MAX } + fn force_set_most_recent_context() -> Weight { + Weight::MAX + } fn force_schedule_code_upgrade(_c: u32) -> Weight { Weight::MAX } @@ -552,6 +556,12 @@ pub mod pallet { #[pallet::getter(fn para_head)] pub(super) type Heads = StorageMap<_, Twox64Concat, ParaId, HeadData>; + /// The context (relay-chain block number) of the most recent parachain head. + #[pallet::storage] + #[pallet::getter(fn para_most_recent_context)] + pub(super) type MostRecentContext = + StorageMap<_, Twox64Concat, ParaId, T::BlockNumber>; + /// The validation code hash of every live para. /// /// Corresponding code can be retrieved with [`CodeByHash`]. @@ -738,6 +748,18 @@ pub mod pallet { Ok(()) } + /// Set the storage for the current parachain head data immediately. + #[pallet::weight(::WeightInfo::force_set_most_recent_context())] + pub fn force_set_most_recent_context( + origin: OriginFor, + para: ParaId, + context: T::BlockNumber, + ) -> DispatchResult { + ensure_root(origin)?; + ::MostRecentContext::insert(¶, context); + Ok(()) + } + /// Schedule an upgrade as if it was scheduled in the given relay parent block. #[pallet::weight(::WeightInfo::force_schedule_code_upgrade(new_code.0.len() as u32))] pub fn force_schedule_code_upgrade( @@ -1098,6 +1120,7 @@ impl Pallet { parachains.remove(para); ::Heads::remove(¶); + ::MostRecentContext::remove(¶); ::FutureCodeUpgrades::remove(¶); ::UpgradeGoAheadSignal::remove(¶); ::UpgradeRestrictionSignal::remove(¶); @@ -1785,6 +1808,7 @@ impl Pallet { execution_context: T::BlockNumber, ) -> Weight { Heads::::insert(&id, new_head); + MostRecentContext::::insert(&id, execution_context); if let Some(expected_at) = ::FutureCodeUpgrades::get(&id) { if expected_at <= execution_context { @@ -1979,6 +2003,7 @@ impl Pallet { } Heads::::insert(&id, &genesis_data.genesis_head); + MostRecentContext::::insert(&id, T::BlockNumber::from(0u32)); } } diff --git a/runtime/parachains/src/paras/tests.rs b/runtime/parachains/src/paras/tests.rs index 7f4ac54711c1..943342c33530 100644 --- a/runtime/parachains/src/paras/tests.rs +++ b/runtime/parachains/src/paras/tests.rs @@ -1574,3 +1574,44 @@ fn verify_upgrade_restriction_signal_is_externally_accessible() { ); }); } + +#[test] +fn most_recent_context() { + let validation_code = vec![1, 2, 3].into(); + + let genesis_config = MockGenesisConfig::default(); + + new_test_ext(genesis_config).execute_with(|| { + run_to_block(1, Some(vec![1])); + + let para_id = ParaId::from(111); + + assert_eq!(Paras::para_most_recent_context(para_id), None); + + assert_ok!(Paras::schedule_para_initialize( + para_id, + ParaGenesisArgs { parachain: true, genesis_head: vec![1].into(), validation_code }, + )); + + assert_eq!( + ::ParaLifecycles::get(¶_id), + Some(ParaLifecycle::Onboarding) + ); + + // Two sessions pass, so action queue is triggered. + run_to_block(4, Some(vec![3, 4])); + + // Double-check the para is onboarded, the context is set to the recent block. + assert_eq!(::ParaLifecycles::get(¶_id), Some(ParaLifecycle::Parachain)); + assert_eq!(Paras::para_most_recent_context(para_id), Some(0)); + + // Progress para to the new head and check that the recent context is updated. + Paras::note_new_head(para_id, vec![4, 5, 6].into(), 3); + assert_eq!(Paras::para_most_recent_context(para_id), Some(3)); + + // Finally, offboard the para and expect the context to be cleared. + assert_ok!(Paras::schedule_para_cleanup(para_id)); + run_to_block(6, Some(vec![5, 6])); + assert_eq!(Paras::para_most_recent_context(para_id), None); + }) +} diff --git a/runtime/parachains/src/paras_inherent/mod.rs b/runtime/parachains/src/paras_inherent/mod.rs index bbc8b59803cd..b8d8c12a3136 100644 --- a/runtime/parachains/src/paras_inherent/mod.rs +++ b/runtime/parachains/src/paras_inherent/mod.rs @@ -28,8 +28,10 @@ use crate::{ inclusion::{CandidateCheckContext, FullCheck}, initializer, metrics::METRICS, + paras, scheduler::{self, CoreAssignment, FreedReason}, - shared, ump, ParaId, + shared::{self, ALLOWED_RELAY_PARENT_LOOKBACK}, + ump, ParaId, }; use bitvec::prelude::BitVec; use frame_support::{ @@ -331,6 +333,22 @@ impl Pallet { let now = >::block_number(); + // Before anything else, update the allowed relay-parents. + { + let parent_number = now - One::one(); + let parent_storage_root = parent_header.state_root().clone(); + + shared::AllowedRelayParents::::mutate(|tracker| { + tracker.update( + parent_hash, + parent_storage_root, + parent_number, + ALLOWED_RELAY_PARENT_LOOKBACK, + ); + }); + } + let allowed_relay_parents = >::allowed_relay_parents(); + let mut candidates_weight = backed_candidates_weight::(&backed_candidates); let mut bitfields_weight = signed_bitfields_weight::(signed_bitfields.len()); let disputes_weight = multi_dispute_statement_sets_weight::(&disputes); @@ -494,13 +512,12 @@ impl Pallet { let freed = collect_all_freed_cores::(freed_concluded.iter().cloned()); >::clear(); - >::schedule(freed, now); + >::schedule(freed); METRICS.on_candidates_processed_total(backed_candidates.len() as u64); let scheduled = >::scheduled(); assure_sanity_backed_candidates::( - parent_hash, &backed_candidates, move |_candidate_index: usize, backed_candidate: &BackedCandidate| -> bool { ::DisputesHandler::concluded_invalid(current_session, backed_candidate.hash()) @@ -512,12 +529,11 @@ impl Pallet { METRICS.on_candidates_sanitized(backed_candidates.len() as u64); // Process backed candidates according to scheduled cores. - let parent_storage_root = parent_header.state_root().clone(); let inclusion::ProcessedCandidates::<::Hash> { core_indices: occupied, candidate_receipt_with_backing_validator_indices, } = >::process_candidates( - parent_storage_root, + &allowed_relay_parents, backed_candidates, scheduled, >::group_validators, @@ -570,6 +586,7 @@ impl Pallet { ); let parent_hash = >::parent_hash(); + let now = >::block_number(); if parent_hash != parent_header.hash() { log::warn!( @@ -587,6 +604,22 @@ impl Pallet { let entropy = compute_entropy::(parent_hash); let mut rng = rand_chacha::ChaChaRng::from_seed(entropy.into()); + // Update the allowed relay-parents + let allowed_relay_parents = { + let parent_number = now - One::one(); + let parent_storage_root = parent_header.state_root().clone(); + let mut tracker = >::allowed_relay_parents(); + + tracker.update( + parent_hash, + parent_storage_root, + parent_number, + ALLOWED_RELAY_PARENT_LOOKBACK, + ); + + tracker + }; + // Filter out duplicates and continue. if let Err(_) = T::DisputesHandler::deduplicate_and_sort_dispute_data(&mut disputes) { log::debug!(target: LOG_TARGET, "Found duplicate statement sets, retaining the first"); @@ -702,29 +735,28 @@ impl Pallet { let freed = collect_all_freed_cores::(freed_concluded.iter().cloned()); >::clear(); - let now = >::block_number(); - >::schedule(freed, now); + >::schedule(freed); let scheduled = >::scheduled(); - let relay_parent_number = now - One::one(); - let parent_storage_root = parent_header.state_root().clone(); - - let check_ctx = CandidateCheckContext::::new(now, relay_parent_number); let backed_candidates = sanitize_backed_candidates::( - parent_hash, backed_candidates, move |candidate_idx: usize, backed_candidate: &BackedCandidate<::Hash>| -> bool { + let para_id = backed_candidate.descriptor().para_id; + let prev_context = >::para_most_recent_context(para_id); + let check_ctx = CandidateCheckContext::::new(prev_context); + // never include a concluded-invalid candidate concluded_invalid_disputes.contains(&backed_candidate.hash()) || // Instead of checking the candidates with code upgrades twice // move the checking up here and skip it in the training wheels fallback. // That way we avoid possible duplicate checks while assuring all // backed candidates fine to pass on. - check_ctx - .verify_backed_candidate(parent_hash, parent_storage_root, candidate_idx, backed_candidate) + // + // NOTE: this is the only place where we check the relay-parent. + check_ctx.verify_backed_candidate(&allowed_relay_parents, candidate_idx, backed_candidate) .is_err() }, &scheduled[..], @@ -1107,7 +1139,6 @@ fn sanitize_backed_candidates< T: crate::inclusion::Config, F: FnMut(usize, &BackedCandidate) -> bool, >( - relay_parent: T::Hash, mut backed_candidates: Vec>, mut candidate_has_concluded_invalid_dispute_or_is_invalid: F, scheduled: &[CoreAssignment], @@ -1125,12 +1156,11 @@ fn sanitize_backed_candidates< // Assure the backed candidate's `ParaId`'s core is free. // This holds under the assumption that `Scheduler::schedule` is called _before_. - // Also checks the candidate references the correct relay parent. - + // We don't check the relay-parent because this is done in the closure when + // constructing the inherent and during actual processing otherwise. backed_candidates.retain(|backed_candidate| { let desc = backed_candidate.descriptor(); - desc.relay_parent == relay_parent && - scheduled_paras_to_core_idx.get(&desc.para_id).is_some() + scheduled_paras_to_core_idx.get(&desc.para_id).is_some() }); // Sort the `Vec` last, once there is a guarantee that these @@ -1152,7 +1182,6 @@ pub(crate) fn assure_sanity_backed_candidates< T: crate::inclusion::Config, F: FnMut(usize, &BackedCandidate) -> bool, >( - relay_parent: T::Hash, backed_candidates: &[BackedCandidate], mut candidate_has_concluded_invalid_dispute_or_is_invalid: F, scheduled: &[CoreAssignment], @@ -1163,13 +1192,6 @@ pub(crate) fn assure_sanity_backed_candidates< if candidate_has_concluded_invalid_dispute_or_is_invalid(idx, backed_candidate) { return Err(Error::::UnsortedOrDuplicateBackedCandidates) } - // Assure the backed candidate's `ParaId`'s core is free. - // This holds under the assumption that `Scheduler::schedule` is called _before_. - // Also checks the candidate references the correct relay parent. - let desc = backed_candidate.descriptor(); - if desc.relay_parent != relay_parent { - return Err(Error::::UnexpectedRelayParent) - } } let scheduled_paras_to_core_idx = scheduled diff --git a/runtime/parachains/src/paras_inherent/tests.rs b/runtime/parachains/src/paras_inherent/tests.rs index eed7ebebbcfa..448b6400cdd8 100644 --- a/runtime/parachains/src/paras_inherent/tests.rs +++ b/runtime/parachains/src/paras_inherent/tests.rs @@ -1170,7 +1170,6 @@ mod sanitizers { .map(|idx| { let ca = CoreAssignment { kind: scheduler::AssignmentKind::Parachain, - group_idx: GroupIndex::from(idx as u32), para_id: ParaId::from(1_u32 + idx as u32), core: CoreIndex::from(idx as u32), }; @@ -1219,7 +1218,6 @@ mod sanitizers { // happy path assert_eq!( sanitize_backed_candidates::( - relay_parent, backed_candidates.clone(), has_concluded_invalid, scheduled @@ -1231,19 +1229,6 @@ mod sanitizers { { let scheduled = &[][..]; assert!(sanitize_backed_candidates::( - relay_parent, - backed_candidates.clone(), - has_concluded_invalid, - scheduled - ) - .is_empty()); - } - - // relay parent mismatch - { - let relay_parent = Hash::repeat_byte(0xFA); - assert!(sanitize_backed_candidates::( - relay_parent, backed_candidates.clone(), has_concluded_invalid, scheduled @@ -1267,7 +1252,6 @@ mod sanitizers { |_idx: usize, candidate: &BackedCandidate| set.contains(&candidate.hash()); assert_eq!( sanitize_backed_candidates::( - relay_parent, backed_candidates.clone(), has_concluded_invalid, scheduled diff --git a/runtime/parachains/src/runtime_api_impl/v1.rs b/runtime/parachains/src/runtime_api_impl/v1.rs index 994a720bc590..538c16ca5b2d 100644 --- a/runtime/parachains/src/runtime_api_impl/v1.rs +++ b/runtime/parachains/src/runtime_api_impl/v1.rs @@ -58,7 +58,7 @@ pub fn availability_cores() -> Vec>::block_number() + One::one(); >::clear(); - >::schedule(Vec::new(), now); + >::schedule(Vec::new()); let rotation_info = >::group_rotation_info(now); @@ -261,7 +261,12 @@ pub fn check_validation_outputs( para_id: ParaId, outputs: primitives::v1::CandidateCommitments, ) -> bool { - >::check_validation_outputs_for_runtime_api(para_id, outputs) + let relay_parent_number = >::block_number(); + >::check_validation_outputs_for_runtime_api( + para_id, + relay_parent_number, + outputs, + ) } /// Implementation for the `session_index_for_child` function of the runtime API. diff --git a/runtime/parachains/src/scheduler.rs b/runtime/parachains/src/scheduler.rs index 2753fe4e111b..b784882fa6e4 100644 --- a/runtime/parachains/src/scheduler.rs +++ b/runtime/parachains/src/scheduler.rs @@ -36,6 +36,7 @@ //! over time. use frame_support::pallet_prelude::*; +use frame_system::pallet_prelude::*; use primitives::v1::{ CollatorId, CoreIndex, CoreOccupied, GroupIndex, GroupRotationInfo, Id as ParaId, ParathreadClaim, ParathreadEntry, ScheduledCore, ValidatorIndex, @@ -51,6 +52,8 @@ pub use pallet::*; #[cfg(test)] mod tests; +mod migration; + /// A queued parathread entry, pre-assigned to a core. #[derive(Encode, Decode, TypeInfo)] #[cfg_attr(test, derive(PartialEq, Debug))] @@ -127,8 +130,6 @@ pub struct CoreAssignment { pub para_id: ParaId, /// The kind of the assignment. pub kind: AssignmentKind, - /// The index of the validator group assigned to the core. - pub group_idx: GroupIndex, } impl CoreAssignment { @@ -160,11 +161,19 @@ pub mod pallet { #[pallet::pallet] #[pallet::generate_store(pub(super) trait Store)] #[pallet::without_storage_info] + #[pallet::storage_version(migration::STORAGE_VERSION)] pub struct Pallet(_); #[pallet::config] pub trait Config: frame_system::Config + configuration::Config + paras::Config {} + #[pallet::hooks] + impl Hooks> for Pallet { + fn on_runtime_upgrade() -> Weight { + migration::on_runtime_upgrade::() + } + } + /// All the validator groups. One for each core. Indices are into `ActiveValidators` - not the /// broader set of Polkadot validators, but instead just the subset used for parachains during /// this session. @@ -420,10 +429,7 @@ impl Pallet { /// Schedule all unassigned cores, where possible. Provide a list of cores that should be considered /// newly-freed along with the reason for them being freed. The list is assumed to be sorted in /// ascending order by core index. - pub(crate) fn schedule( - just_freed_cores: impl IntoIterator, - now: T::BlockNumber, - ) { + pub(crate) fn schedule(just_freed_cores: impl IntoIterator) { Self::free_cores(just_freed_cores); let cores = AvailabilityCores::::get(); @@ -484,10 +490,6 @@ impl Pallet { kind: AssignmentKind::Parachain, para_id: parachains[core_index], core: core.clone(), - group_idx: Self::group_assigned_to_core(core, now).expect( - "core is not out of bounds and we are guaranteed \ - to be after the most recent session start; qed", - ), }) } else { // parathread core offset, rel. to beginning. @@ -497,10 +499,6 @@ impl Pallet { kind: AssignmentKind::Parathread(entry.claim.1, entry.retries), para_id: entry.claim.0, core: core.clone(), - group_idx: Self::group_assigned_to_core(core, now).expect( - "core is not out of bounds and we are guaranteed \ - to be after the most recent session start; qed", - ), }) }; @@ -762,4 +760,9 @@ impl Pallet { } }); } + + #[cfg(test)] + pub(crate) fn set_validator_groups(validator_groups: Vec>) { + ::ValidatorGroups::set(validator_groups); + } } diff --git a/runtime/parachains/src/scheduler/migration.rs b/runtime/parachains/src/scheduler/migration.rs new file mode 100644 index 000000000000..97f65df6d38c --- /dev/null +++ b/runtime/parachains/src/scheduler/migration.rs @@ -0,0 +1,76 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A module that is responsible for migration of storage. + +use crate::scheduler::{self, AssignmentKind, Config, Pallet, Store}; +use frame_support::{pallet_prelude::*, traits::StorageVersion, weights::Weight}; +use parity_scale_codec::{Decode, Encode}; + +/// The current storage version. +pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); + +/// Call this during the next runtime upgrade for this module. +pub fn on_runtime_upgrade() -> Weight { + let mut weight: Weight = 0; + + if StorageVersion::get::>() == 0 { + weight = weight + .saturating_add(v1::migrate::()) + .saturating_add(T::DbWeight::get().writes(1)); + StorageVersion::new(1).put::>(); + } + + weight +} + +mod v0 { + use super::*; + use primitives::v1::{CoreIndex, GroupIndex, Id as ParaId}; + + #[derive(Encode, Decode)] + pub struct CoreAssignment { + pub core: CoreIndex, + pub para_id: ParaId, + pub kind: AssignmentKind, + pub group_idx: GroupIndex, + } + + impl From for scheduler::CoreAssignment { + fn from(old: CoreAssignment) -> Self { + Self { core: old.core, para_id: old.para_id, kind: old.kind } + } + } +} + +/// V1: Group index is dropped from the core assignment, it's explicitly computed during +/// candidates processing. +mod v1 { + use super::*; + use sp_std::vec::Vec; + + pub fn migrate() -> Weight { + let _ = as Store>::Scheduled::translate( + |scheduled: Option>| { + scheduled.map(|scheduled| { + scheduled.into_iter().map(|old| scheduler::CoreAssignment::from(old)).collect() + }) + }, + ); + + T::DbWeight::get().reads_writes(1, 1) + } +} diff --git a/runtime/parachains/src/scheduler/tests.rs b/runtime/parachains/src/scheduler/tests.rs index 234c1833d9f0..75b7288231d1 100644 --- a/runtime/parachains/src/scheduler/tests.rs +++ b/runtime/parachains/src/scheduler/tests.rs @@ -70,7 +70,7 @@ fn run_to_block( // In the real runtime this is expected to be called by the `InclusionInherent` pallet. Scheduler::clear(); - Scheduler::schedule(Vec::new(), b + 1); + Scheduler::schedule(Vec::new()); } } @@ -481,7 +481,6 @@ fn schedule_schedules() { core: CoreIndex(0), para_id: chain_a, kind: AssignmentKind::Parachain, - group_idx: GroupIndex(0), } ); @@ -491,7 +490,6 @@ fn schedule_schedules() { core: CoreIndex(1), para_id: chain_b, kind: AssignmentKind::Parachain, - group_idx: GroupIndex(1), } ); } @@ -512,7 +510,6 @@ fn schedule_schedules() { core: CoreIndex(0), para_id: chain_a, kind: AssignmentKind::Parachain, - group_idx: GroupIndex(0), } ); @@ -522,7 +519,6 @@ fn schedule_schedules() { core: CoreIndex(1), para_id: chain_b, kind: AssignmentKind::Parachain, - group_idx: GroupIndex(1), } ); @@ -532,7 +528,6 @@ fn schedule_schedules() { core: CoreIndex(2), para_id: thread_a, kind: AssignmentKind::Parathread(collator.clone(), 0), - group_idx: GroupIndex(2), } ); @@ -542,7 +537,6 @@ fn schedule_schedules() { core: CoreIndex(3), para_id: thread_c, kind: AssignmentKind::Parathread(collator.clone(), 0), - group_idx: GroupIndex(3), } ); } @@ -644,20 +638,16 @@ fn schedule_schedules_including_just_freed() { core: CoreIndex(4), para_id: thread_b, kind: AssignmentKind::Parathread(collator.clone(), 0), - group_idx: GroupIndex(4), } ); } // now note that cores 0, 2, and 3 were freed. - Scheduler::schedule( - vec![ - (CoreIndex(0), FreedReason::Concluded), - (CoreIndex(2), FreedReason::Concluded), - (CoreIndex(3), FreedReason::TimedOut), // should go back on queue. - ], - 3, - ); + Scheduler::schedule(vec![ + (CoreIndex(0), FreedReason::Concluded), + (CoreIndex(2), FreedReason::Concluded), + (CoreIndex(3), FreedReason::TimedOut), // should go back on queue. + ]); { let scheduled = Scheduler::scheduled(); @@ -670,7 +660,6 @@ fn schedule_schedules_including_just_freed() { core: CoreIndex(0), para_id: chain_a, kind: AssignmentKind::Parachain, - group_idx: GroupIndex(0), } ); assert_eq!( @@ -679,7 +668,6 @@ fn schedule_schedules_including_just_freed() { core: CoreIndex(2), para_id: thread_d, kind: AssignmentKind::Parathread(collator.clone(), 0), - group_idx: GroupIndex(2), } ); assert_eq!( @@ -688,7 +676,6 @@ fn schedule_schedules_including_just_freed() { core: CoreIndex(3), para_id: thread_e, kind: AssignmentKind::Parathread(collator.clone(), 0), - group_idx: GroupIndex(3), } ); assert_eq!( @@ -697,7 +684,6 @@ fn schedule_schedules_including_just_freed() { core: CoreIndex(4), para_id: thread_b, kind: AssignmentKind::Parathread(collator.clone(), 0), - group_idx: GroupIndex(4), } ); @@ -783,10 +769,10 @@ fn schedule_clears_availability_cores() { run_to_block(3, |_| None); // now note that cores 0 and 2 were freed. - Scheduler::schedule( - vec![(CoreIndex(0), FreedReason::Concluded), (CoreIndex(2), FreedReason::Concluded)], - 3, - ); + Scheduler::schedule(vec![ + (CoreIndex(0), FreedReason::Concluded), + (CoreIndex(2), FreedReason::Concluded), + ]); { let scheduled = Scheduler::scheduled(); @@ -798,7 +784,6 @@ fn schedule_clears_availability_cores() { core: CoreIndex(0), para_id: chain_a, kind: AssignmentKind::Parachain, - group_idx: GroupIndex(0), } ); assert_eq!( @@ -807,7 +792,6 @@ fn schedule_clears_availability_cores() { core: CoreIndex(2), para_id: chain_c, kind: AssignmentKind::Parachain, - group_idx: GroupIndex(2), } ); @@ -873,31 +857,37 @@ fn schedule_rotates_groups() { run_to_block(2, |_| None); - let assert_groups_rotated = |rotations: u32| { + let assert_groups_rotated = |rotations: u32, block_number: u32| { let scheduled = Scheduler::scheduled(); assert_eq!(scheduled.len(), 2); - assert_eq!(scheduled[0].group_idx, GroupIndex((0u32 + rotations) % parathread_cores)); - assert_eq!(scheduled[1].group_idx, GroupIndex((1u32 + rotations) % parathread_cores)); + assert_eq!( + Scheduler::group_assigned_to_core(scheduled[0].core, block_number).unwrap(), + GroupIndex((0u32 + rotations) % parathread_cores) + ); + assert_eq!( + Scheduler::group_assigned_to_core(scheduled[1].core, block_number).unwrap(), + GroupIndex((1u32 + rotations) % parathread_cores) + ); }; - assert_groups_rotated(0); + assert_groups_rotated(0, 2); // one block before first rotation. run_to_block(rotation_frequency, |_| None); - assert_groups_rotated(0); + assert_groups_rotated(0, rotation_frequency); // first rotation. run_to_block(rotation_frequency + 1, |_| None); - assert_groups_rotated(1); + assert_groups_rotated(1, rotation_frequency + 1); // one block before second rotation. run_to_block(rotation_frequency * 2, |_| None); - assert_groups_rotated(1); + assert_groups_rotated(1, rotation_frequency * 2); // second rotation. run_to_block(rotation_frequency * 2 + 1, |_| None); - assert_groups_rotated(2); + assert_groups_rotated(2, rotation_frequency * 2 + 1); }); } @@ -1377,7 +1367,7 @@ fn session_change_requires_reschedule_dropping_removed_paras() { }); Scheduler::clear(); - Scheduler::schedule(Vec::new(), 3); + Scheduler::schedule(Vec::new()); assert_eq!( Scheduler::scheduled(), @@ -1385,7 +1375,6 @@ fn session_change_requires_reschedule_dropping_removed_paras() { core: CoreIndex(0), para_id: chain_a, kind: AssignmentKind::Parachain, - group_idx: GroupIndex(0), }], ); }); diff --git a/runtime/parachains/src/shared.rs b/runtime/parachains/src/shared.rs index 7bd33c503c63..3991126f13f2 100644 --- a/runtime/parachains/src/shared.rs +++ b/runtime/parachains/src/shared.rs @@ -21,7 +21,8 @@ use frame_support::pallet_prelude::*; use primitives::v1::{SessionIndex, ValidatorId, ValidatorIndex}; -use sp_std::vec::Vec; +use sp_runtime::traits::AtLeast32BitUnsigned; +use sp_std::{collections::vec_deque::VecDeque, vec::Vec}; use rand::{seq::SliceRandom, SeedableRng}; use rand_chacha::ChaCha20Rng; @@ -38,6 +39,77 @@ pub(crate) const SESSION_DELAY: SessionIndex = 2; #[cfg(test)] mod tests; +/// The maximum amount of relay-parent lookback. +// TODO: put this in the configuration module (https://github.com/paritytech/polkadot/issues/4841). +pub const ALLOWED_RELAY_PARENT_LOOKBACK: usize = 4; + +/// Information about past relay-parents. +#[derive(Encode, Decode, Default, TypeInfo)] +pub struct AllowedRelayParentsTracker { + // The past relay parents, paired with state roots, that are viable to build upon. + // + // They are in ascending chronologic order, so the newest relay parents are at + // the back of the deque. + // + // (relay_parent, state_root) + buffer: VecDeque<(Hash, Hash)>, + + // The number of the most recent relay-parent, if any. + // If the buffer is empty, this value has no meaning and may + // be nonsensical. + latest_number: BlockNumber, +} + +impl + AllowedRelayParentsTracker +{ + /// Add a new relay-parent to the allowed relay parents, along with info about the header. + /// Provide a maximum length for the buffer, which will cause old relay-parents to be pruned. + pub(crate) fn update( + &mut self, + relay_parent: Hash, + state_root: Hash, + number: BlockNumber, + max_len: usize, + ) { + self.buffer.push_back((relay_parent, state_root)); + self.latest_number = number; + while self.buffer.len() > max_len { + let _ = self.buffer.pop_front(); + } + + // if max_len == 0, then latest_number is nonsensical. Otherwise, it's fine. + + // We only allow relay parents within the same sessions, the buffer + // gets cleared on session changes. + } + + /// Attempt to acquire the state root and block number to be used when building + /// upon the given relay-parent. + /// + /// This only succeeds if the relay-parent is one of the allowed relay-parents. + /// If a previous relay-parent number is passed, then this only passes if the new relay-parent is + /// more recent than the previous. + pub(crate) fn acquire_info( + &self, + relay_parent: Hash, + prev: Option, + ) -> Option<(Hash, BlockNumber)> { + let pos = self.buffer.iter().position(|(rp, _)| rp == &relay_parent)?; + + if let Some(prev) = prev { + if prev >= self.latest_number { + return None + } + } + + let age = (self.buffer.len() - 1) - pos; + let number = self.latest_number.clone() - BlockNumber::from(age as u32); + + Some((self.buffer[pos].1, number)) + } +} + #[frame_support::pallet] pub mod pallet { use super::*; @@ -68,6 +140,12 @@ pub mod pallet { #[pallet::getter(fn active_validator_keys)] pub(super) type ActiveValidatorKeys = StorageValue<_, Vec, ValueQuery>; + /// All allowed relay-parents. + #[pallet::storage] + #[pallet::getter(fn allowed_relay_parents)] + pub(crate) type AllowedRelayParents = + StorageValue<_, AllowedRelayParentsTracker, ValueQuery>; + #[pallet::call] impl Pallet {} } @@ -90,6 +168,17 @@ impl Pallet { new_config: &HostConfiguration, all_validators: Vec, ) -> Vec { + // Drop allowed relay parents buffer on a session change. + // + // During the initialization of the next block we always add its parent + // to the tracker. + // + // With asynchronous backing candidates built on top of relay + // parent `R` are still restricted by the runtime to be backed + // by the group assigned at `number(R) + 1`, which is guaranteed + // to be in the current session. + AllowedRelayParents::::mutate(|tracker| tracker.buffer.clear()); + CurrentSessionIndex::::set(session_index); let mut rng: ChaCha20Rng = SeedableRng::from_seed(random_seed); diff --git a/runtime/polkadot/src/weights/runtime_parachains_paras.rs b/runtime/polkadot/src/weights/runtime_parachains_paras.rs index 1e67b0e9359c..e3df5918ce8b 100644 --- a/runtime/polkadot/src/weights/runtime_parachains_paras.rs +++ b/runtime/polkadot/src/weights/runtime_parachains_paras.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_parachains::paras` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-12-29, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("polkadot-dev"), DB CACHE: 128 +//! DATE: 2022-02-01, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("polkadot-dev"), DB CACHE: 1024 // Executed Command: -// target/release/polkadot +// target/production/polkadot // benchmark // --chain=polkadot-dev // --steps=50 @@ -45,21 +45,29 @@ pub struct WeightInfo(PhantomData); impl runtime_parachains::paras::WeightInfo for WeightInfo { // Storage: Paras CurrentCodeHash (r:1 w:1) // Storage: Paras CodeByHashRefs (r:1 w:1) + // Storage: Paras PastCodeMeta (r:1 w:1) + // Storage: Paras PastCodePruning (r:1 w:1) + // Storage: Paras PastCodeHash (r:0 w:1) // Storage: Paras CodeByHash (r:0 w:1) fn force_set_current_code(c: u32, ) -> Weight { (0 as Weight) // Standard Error: 0 - .saturating_add((3_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add((2_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) } // Storage: Paras Heads (r:0 w:1) fn force_set_current_head(s: u32, ) -> Weight { - (15_314_000 as Weight) + (9_365_000 as Weight) // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } + // Storage: Paras MostRecentContext (r:0 w:1) + fn force_set_most_recent_context() -> Weight { + (1_162_000 as Weight) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } // Storage: Configuration ActiveConfig (r:1 w:0) // Storage: Paras FutureCodeHash (r:1 w:1) // Storage: Paras CurrentCodeHash (r:1 w:0) @@ -74,23 +82,25 @@ impl runtime_parachains::paras::WeightInfo for WeightIn fn force_schedule_code_upgrade(c: u32, ) -> Weight { (0 as Weight) // Standard Error: 0 - .saturating_add((3_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((2_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(9 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) } // Storage: Paras FutureCodeUpgrades (r:1 w:0) // Storage: Paras Heads (r:0 w:1) + // Storage: Paras UpgradeGoAheadSignal (r:0 w:1) + // Storage: Paras MostRecentContext (r:0 w:1) fn force_note_new_head(s: u32, ) -> Weight { - (19_183_000 as Weight) + (16_565_000 as Weight) // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: ParasShared CurrentSessionIndex (r:1 w:0) // Storage: Paras ActionsQueue (r:1 w:1) fn force_queue_action() -> Weight { - (23_668_000 as Weight) + (16_375_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -99,14 +109,14 @@ impl runtime_parachains::paras::WeightInfo for WeightIn fn add_trusted_validation_code(c: u32, ) -> Weight { (0 as Weight) // Standard Error: 0 - .saturating_add((3_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((2_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Paras CodeByHashRefs (r:1 w:0) // Storage: Paras CodeByHash (r:0 w:1) fn poke_unused_validation_code() -> Weight { - (4_647_000 as Weight) + (2_683_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } diff --git a/runtime/rococo/src/weights/runtime_parachains_paras.rs b/runtime/rococo/src/weights/runtime_parachains_paras.rs index 10d0cd013021..70b9d42f6490 100644 --- a/runtime/rococo/src/weights/runtime_parachains_paras.rs +++ b/runtime/rococo/src/weights/runtime_parachains_paras.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_parachains::paras` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-12-29, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 128 +//! DATE: 2022-02-01, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("rococo-dev"), DB CACHE: 1024 // Executed Command: -// target/release/polkadot +// target/production/polkadot // benchmark // --chain=rococo-dev // --steps=50 @@ -45,21 +45,29 @@ pub struct WeightInfo(PhantomData); impl runtime_parachains::paras::WeightInfo for WeightInfo { // Storage: Paras CurrentCodeHash (r:1 w:1) // Storage: Paras CodeByHashRefs (r:1 w:1) + // Storage: Paras PastCodeMeta (r:1 w:1) + // Storage: Paras PastCodePruning (r:1 w:1) + // Storage: Paras PastCodeHash (r:0 w:1) // Storage: Paras CodeByHash (r:0 w:1) fn force_set_current_code(c: u32, ) -> Weight { (0 as Weight) // Standard Error: 0 - .saturating_add((3_000 as Weight).saturating_mul(c as Weight)) - .saturating_add(T::DbWeight::get().reads(2 as Weight)) - .saturating_add(T::DbWeight::get().writes(3 as Weight)) + .saturating_add((2_000 as Weight).saturating_mul(c as Weight)) + .saturating_add(T::DbWeight::get().reads(4 as Weight)) + .saturating_add(T::DbWeight::get().writes(6 as Weight)) } // Storage: Paras Heads (r:0 w:1) fn force_set_current_head(s: u32, ) -> Weight { - (14_013_000 as Weight) + (9_076_000 as Weight) // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } + // Storage: Paras MostRecentContext (r:0 w:1) + fn force_set_most_recent_context() -> Weight { + (1_335_000 as Weight) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } // Storage: Configuration ActiveConfig (r:1 w:0) // Storage: Paras FutureCodeHash (r:1 w:1) // Storage: Paras CurrentCodeHash (r:1 w:0) @@ -74,23 +82,25 @@ impl runtime_parachains::paras::WeightInfo for WeightIn fn force_schedule_code_upgrade(c: u32, ) -> Weight { (0 as Weight) // Standard Error: 0 - .saturating_add((3_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((2_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(9 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) } // Storage: Paras FutureCodeUpgrades (r:1 w:0) // Storage: Paras Heads (r:0 w:1) + // Storage: Paras UpgradeGoAheadSignal (r:0 w:1) + // Storage: Paras MostRecentContext (r:0 w:1) fn force_note_new_head(s: u32, ) -> Weight { - (17_583_000 as Weight) + (13_978_000 as Weight) // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(1 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: ParasShared CurrentSessionIndex (r:1 w:0) // Storage: Paras ActionsQueue (r:1 w:1) fn force_queue_action() -> Weight { - (23_310_000 as Weight) + (16_907_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -99,14 +109,14 @@ impl runtime_parachains::paras::WeightInfo for WeightIn fn add_trusted_validation_code(c: u32, ) -> Weight { (0 as Weight) // Standard Error: 0 - .saturating_add((3_000 as Weight).saturating_mul(c as Weight)) + .saturating_add((2_000 as Weight).saturating_mul(c as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } // Storage: Paras CodeByHashRefs (r:1 w:0) // Storage: Paras CodeByHash (r:0 w:1) fn poke_unused_validation_code() -> Weight { - (4_372_000 as Weight) + (2_708_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } diff --git a/runtime/westend/src/weights/runtime_parachains_paras.rs b/runtime/westend/src/weights/runtime_parachains_paras.rs index 630f51edc4e5..6f448f72067b 100644 --- a/runtime/westend/src/weights/runtime_parachains_paras.rs +++ b/runtime/westend/src/weights/runtime_parachains_paras.rs @@ -16,11 +16,11 @@ //! Autogenerated weights for `runtime_parachains::paras` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev -//! DATE: 2021-12-31, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` -//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 128 +//! DATE: 2022-02-01, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("westend-dev"), DB CACHE: 1024 // Executed Command: -// target/release/polkadot +// target/production/polkadot // benchmark // --chain=westend-dev // --steps=50 @@ -58,11 +58,16 @@ impl runtime_parachains::paras::WeightInfo for WeightIn } // Storage: Paras Heads (r:0 w:1) fn force_set_current_head(s: u32, ) -> Weight { - (13_711_000 as Weight) + (11_066_000 as Weight) // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } + // Storage: Paras MostRecentContext (r:0 w:1) + fn force_set_most_recent_context() -> Weight { + (1_180_000 as Weight) + .saturating_add(T::DbWeight::get().writes(1 as Weight)) + } // Storage: Paras FutureCodeHash (r:1 w:1) // Storage: Paras CurrentCodeHash (r:1 w:0) // Storage: Paras UpgradeCooldowns (r:1 w:1) @@ -83,17 +88,18 @@ impl runtime_parachains::paras::WeightInfo for WeightIn // Storage: Paras FutureCodeUpgrades (r:1 w:0) // Storage: Paras Heads (r:0 w:1) // Storage: Paras UpgradeGoAheadSignal (r:0 w:1) + // Storage: Paras MostRecentContext (r:0 w:1) fn force_note_new_head(s: u32, ) -> Weight { - (18_543_000 as Weight) + (14_398_000 as Weight) // Standard Error: 0 .saturating_add((1_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) - .saturating_add(T::DbWeight::get().writes(2 as Weight)) + .saturating_add(T::DbWeight::get().writes(3 as Weight)) } // Storage: ParasShared CurrentSessionIndex (r:1 w:0) // Storage: Paras ActionsQueue (r:1 w:1) fn force_queue_action() -> Weight { - (22_153_000 as Weight) + (16_375_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -109,7 +115,7 @@ impl runtime_parachains::paras::WeightInfo for WeightIn // Storage: Paras CodeByHashRefs (r:1 w:0) // Storage: Paras CodeByHash (r:0 w:1) fn poke_unused_validation_code() -> Weight { - (4_207_000 as Weight) + (2_586_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } From c1fbdeee68d9efb586bf04becc13f403d42cee56 Mon Sep 17 00:00:00 2001 From: asynchronous rob Date: Wed, 18 May 2022 18:29:55 -0500 Subject: [PATCH 03/76] Prospective Parachains Subsystem (#4913) * docs and skeleton * subsystem skeleton * main loop * fragment tree basics & fmt * begin fragment trees & view * flesh out more of view update logic * further flesh out update logic * some refcount functions for fragment trees * add fatal/non-fatal errors * use non-fatal results * clear up some TODOs * ideal format for scheduling info * add a bunch of TODOs * some more fluff * extract fragment graph to submodule * begin fragment graph API * trees, not graphs * improve docs * scope and constructor for trees * add some test TODOs * limit max ancestors and store constraints * constructor * constraints: fix bug in HRMP watermarks * fragment tree population logic * set::retain * extract population logic * implement add_and_populate * fmt * add some TODOs in tests * implement child-selection * strip out old stuff based on wrong assumptions * use fatality * implement pruning * remove unused ancestor constraints * fragment tree instantiation * remove outdated comment * add message/request types and skeleton for handling * fmt * implement handle_candidate_seconded * candidate storage: handle backed * implement handle_candidate_backed * implement answer_get_backable_candidate * remove async where not needed * implement fetch_ancestry * add logic for run_iteration * add some docs * remove global allow(unused), fix warnings * make spellcheck happy (despite English) * fmt * bump Cargo.lock * replace tracing with gum * introduce PopulateFrom trait * implement GetHypotheticalDepths * revise docs slightly * first fragment tree scope test * more scope tests * test add_candidate * fmt * test retain * refactor test code * test populate is recursive * test contiguity of depth 0 is maintained * add_and_populate tests * cycle tests * remove PopulateFrom trait * fmt * test hypothetical depths (non-recursive) * have CandidateSeconded return membership * tree membership requests * Add a ProspectiveParachainsSubsystem struct * add a staging API for base constraints * add a `From` impl * add runtime API for staging_validity_constraints * implement fetch_base_constraints * implement `fetch_upcoming_paras` * remove reconstruction of candidate receipt; no obvious usecase * fmt * export message to broader module * remove last TODO * correctly export * fix compilation and add GetMinimumRelayParent request * make provisioner into a real subsystem with proper mesage bounds * fmt * fix ChannelsOut in overseer test * fix overseer tests * fix again * fmt --- Cargo.lock | 479 +++--- Cargo.toml | 1 + node/core/prospective-parachains/Cargo.toml | 26 + node/core/prospective-parachains/src/error.rs | 83 ++ .../src/fragment_tree.rs | 1312 +++++++++++++++++ node/core/prospective-parachains/src/lib.rs | 591 ++++++++ node/core/runtime-api/src/cache.rs | 44 +- node/core/runtime-api/src/lib.rs | 11 + node/core/runtime-api/src/tests.rs | 19 +- node/overseer/src/dummy.rs | 8 +- node/overseer/src/lib.rs | 10 +- node/overseer/src/tests.rs | 18 +- node/service/src/overseer.rs | 2 + node/subsystem-types/src/messages.rs | 83 +- .../src/inclusion_emulator/staging.rs | 106 +- primitives/src/runtime_api.rs | 6 +- primitives/src/v2/mod.rs | 1 + primitives/src/vstaging/mod.rs | 64 + runtime/kusama/src/lib.rs | 4 + runtime/polkadot/src/lib.rs | 4 + runtime/rococo/src/lib.rs | 4 + runtime/test-runtime/src/lib.rs | 4 + runtime/westend/src/lib.rs | 4 + 23 files changed, 2637 insertions(+), 247 deletions(-) create mode 100644 node/core/prospective-parachains/Cargo.toml create mode 100644 node/core/prospective-parachains/src/error.rs create mode 100644 node/core/prospective-parachains/src/fragment_tree.rs create mode 100644 node/core/prospective-parachains/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 2f4d62410042..53d7911958ef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -441,7 +441,7 @@ dependencies = [ "futures-timer", "hex", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "sc-chain-spec", "sc-client-api", @@ -474,7 +474,7 @@ dependencies = [ "futures 0.3.21", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "sc-rpc", "sc-utils", @@ -494,7 +494,7 @@ name = "beefy-primitives" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-api", "sp-application-crypto", @@ -543,16 +543,28 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitvec" +version = "0.20.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7774144344a4faa177370406a7ff5f1da24303817368584c6206c8303eb07848" +dependencies = [ + "funty 1.1.0", + "radium 0.6.2", + "tap", + "wyz 0.2.0", +] + [[package]] name = "bitvec" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1489fcb93a5bb47da0462ca93ad252ad6af2145cce58d10d46a83931ba9f016b" dependencies = [ - "funty", - "radium", + "funty 2.0.0", + "radium 0.7.0", "tap", - "wyz", + "wyz 0.5.0", ] [[package]] @@ -690,7 +702,7 @@ dependencies = [ "frame-support", "hex", "hex-literal", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-core", @@ -705,7 +717,7 @@ version = "0.1.0" dependencies = [ "bp-runtime", "frame-support", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-std", ] @@ -714,14 +726,14 @@ dependencies = [ name = "bp-messages" version = "0.1.0" dependencies = [ - "bitvec", + "bitvec 1.0.0", "bp-runtime", "frame-support", "frame-system", "hex", "hex-literal", "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-core", @@ -737,7 +749,7 @@ dependencies = [ "frame-support", "frame-system", "hex", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-api", "sp-core", @@ -754,7 +766,7 @@ dependencies = [ "bp-polkadot-core", "bp-runtime", "frame-support", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "smallvec", "sp-api", "sp-runtime", @@ -770,7 +782,7 @@ dependencies = [ "hash-db", "hex-literal", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -787,7 +799,7 @@ dependencies = [ "bp-header-chain", "ed25519-dalek", "finality-grandpa", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-application-crypto", "sp-finality-grandpa", "sp-runtime", @@ -802,7 +814,7 @@ dependencies = [ "bp-polkadot-core", "bp-rococo", "bp-runtime", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-api", "sp-runtime", "sp-std", @@ -824,7 +836,7 @@ dependencies = [ "pallet-bridge-grandpa", "pallet-bridge-messages", "pallet-transaction-payment", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-api", "sp-core", @@ -2024,7 +2036,7 @@ dependencies = [ "futures-timer", "log", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.11.2", "scale-info", ] @@ -2098,7 +2110,7 @@ name = "fork-tree" version = "3.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", ] [[package]] @@ -2120,7 +2132,7 @@ dependencies = [ "frame-system", "linregress", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "paste", "scale-info", "serde", @@ -2154,7 +2166,7 @@ dependencies = [ "linked-hash-map", "log", "memory-db", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "rand 0.8.5", "rand_pcg 0.3.1", "sc-block-builder", @@ -2202,7 +2214,7 @@ dependencies = [ "frame-election-provider-solution-type", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-arithmetic", "sp-npos-elections", @@ -2217,7 +2229,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -2233,7 +2245,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df6bb8542ef006ef0de09a5c4420787d79823c0ed7924225822362fd2bf2ff2d" dependencies = [ "cfg-if 1.0.0", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", ] @@ -2250,7 +2262,7 @@ dependencies = [ "k256", "log", "once_cell", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "paste", "scale-info", "serde", @@ -2310,7 +2322,7 @@ dependencies = [ "frame-support", "frame-support-test-pallet", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "pretty_assertions", "rustversion", "scale-info", @@ -2332,7 +2344,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", ] @@ -2343,7 +2355,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "frame-support", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-core", @@ -2361,7 +2373,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-runtime", @@ -2373,7 +2385,7 @@ name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-api", ] @@ -2422,6 +2434,12 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394" +[[package]] +name = "funty" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" + [[package]] name = "funty" version = "2.0.0" @@ -3000,7 +3018,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", ] [[package]] @@ -3292,7 +3310,7 @@ name = "kusama-runtime" version = "0.9.22" dependencies = [ "beefy-primitives", - "bitvec", + "bitvec 1.0.0", "frame-benchmarking", "frame-election-provider-support", "frame-executive", @@ -3346,7 +3364,7 @@ dependencies = [ "pallet-vesting", "pallet-xcm", "pallet-xcm-benchmarks", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-parachains", @@ -4875,7 +4893,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-runtime", "sp-std", @@ -4889,7 +4907,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-application-crypto", "sp-authority-discovery", @@ -4905,7 +4923,7 @@ dependencies = [ "frame-support", "frame-system", "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-authorship", "sp-runtime", @@ -4924,7 +4942,7 @@ dependencies = [ "pallet-authorship", "pallet-session", "pallet-timestamp", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-application-crypto", "sp-consensus-babe", @@ -4947,7 +4965,7 @@ dependencies = [ "frame-system", "log", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -4984,7 +5002,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-runtime", "sp-std", @@ -4999,7 +5017,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-runtime", @@ -5020,7 +5038,7 @@ dependencies = [ "pallet-beefy", "pallet-mmr", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-core", @@ -5039,7 +5057,7 @@ dependencies = [ "frame-system", "log", "pallet-treasury", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5056,7 +5074,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5077,7 +5095,7 @@ dependencies = [ "frame-system", "log", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-core", @@ -5092,7 +5110,7 @@ dependencies = [ name = "pallet-bridge-messages" version = "0.1.0" dependencies = [ - "bitvec", + "bitvec 1.0.0", "bp-message-dispatch", "bp-messages", "bp-runtime", @@ -5102,7 +5120,7 @@ dependencies = [ "log", "num-traits", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-core", @@ -5122,7 +5140,7 @@ dependencies = [ "log", "pallet-bounties", "pallet-treasury", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5139,7 +5157,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5155,7 +5173,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-io", @@ -5173,7 +5191,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "rand 0.7.3", "scale-info", "sp-arithmetic", @@ -5194,7 +5212,7 @@ dependencies = [ "frame-benchmarking", "frame-election-provider-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-npos-elections", "sp-runtime", ] @@ -5208,7 +5226,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5225,7 +5243,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-arithmetic", "sp-runtime", @@ -5243,7 +5261,7 @@ dependencies = [ "log", "pallet-authorship", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-application-crypto", "sp-core", @@ -5264,7 +5282,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-io", "sp-runtime", @@ -5281,7 +5299,7 @@ dependencies = [ "frame-system", "log", "pallet-authorship", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-application-crypto", "sp-core", @@ -5299,7 +5317,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5317,7 +5335,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5334,7 +5352,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5349,7 +5367,7 @@ version = "3.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ "jsonrpsee", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "serde", "sp-api", "sp-blockchain", @@ -5366,7 +5384,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-io", "sp-runtime", @@ -5380,7 +5398,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-io", "sp-runtime", @@ -5395,7 +5413,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-runtime", @@ -5415,7 +5433,7 @@ dependencies = [ "pallet-bags-list", "pallet-nomination-pools", "pallet-staking", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-runtime", "sp-staking", @@ -5431,7 +5449,7 @@ dependencies = [ "frame-system", "log", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-runtime", @@ -5455,7 +5473,7 @@ dependencies = [ "pallet-offences", "pallet-session", "pallet-staking", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-runtime", "sp-staking", @@ -5470,7 +5488,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5486,7 +5504,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-io", "sp-runtime", @@ -5501,7 +5519,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-io", "sp-runtime", @@ -5517,7 +5535,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-io", "sp-runtime", @@ -5534,7 +5552,7 @@ dependencies = [ "impl-trait-for-tuples", "log", "pallet-timestamp", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5568,7 +5586,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "rand_chacha 0.2.2", "scale-info", "sp-runtime", @@ -5587,7 +5605,7 @@ dependencies = [ "log", "pallet-authorship", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "rand_chacha 0.2.2", "scale-info", "serde", @@ -5625,7 +5643,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-io", "sp-runtime", @@ -5641,7 +5659,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-inherents", "sp-io", @@ -5660,7 +5678,7 @@ dependencies = [ "frame-system", "log", "pallet-treasury", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-core", @@ -5676,7 +5694,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "smallvec", @@ -5693,7 +5711,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-api", "sp-blockchain", "sp-core", @@ -5707,7 +5725,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ "pallet-transaction-payment", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-api", "sp-runtime", ] @@ -5722,7 +5740,7 @@ dependencies = [ "frame-system", "impl-trait-for-tuples", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-runtime", @@ -5737,7 +5755,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-io", @@ -5754,7 +5772,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-runtime", "sp-std", @@ -5768,7 +5786,7 @@ dependencies = [ "frame-system", "log", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-parachain", "polkadot-runtime-parachains", "scale-info", @@ -5793,7 +5811,7 @@ dependencies = [ "pallet-assets", "pallet-balances", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-primitives", "polkadot-runtime-common", "scale-info", @@ -5826,6 +5844,19 @@ dependencies = [ "snap", ] +[[package]] +name = "parity-scale-codec" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" +dependencies = [ + "arrayvec 0.7.2", + "bitvec 0.20.4", + "byte-slice-cast", + "impl-trait-for-tuples", + "serde", +] + [[package]] name = "parity-scale-codec" version = "3.1.2" @@ -5833,7 +5864,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8b44461635bbb1a0300f100a841e571e7d919c81c73075ef5d152ffdb521066" dependencies = [ "arrayvec 0.7.2", - "bitvec", + "bitvec 1.0.0", "byte-slice-cast", "impl-trait-for-tuples", "parity-scale-codec-derive", @@ -6158,7 +6189,7 @@ name = "polkadot-availability-bitfield-distribution" version = "0.9.22" dependencies = [ "assert_matches", - "bitvec", + "bitvec 1.0.0", "env_logger 0.9.0", "futures 0.3.21", "log", @@ -6187,7 +6218,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "lru 0.7.5", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-erasure-coding", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -6217,7 +6248,7 @@ dependencies = [ "futures-timer", "log", "lru 0.7.5", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-erasure-coding", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -6313,7 +6344,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -6334,7 +6365,7 @@ dependencies = [ name = "polkadot-core-primitives" version = "0.9.22" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-util-mem", "scale-info", "sp-core", @@ -6354,7 +6385,7 @@ dependencies = [ "futures-timer", "lazy_static", "lru 0.7.5", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-erasure-coding", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -6377,7 +6408,7 @@ dependencies = [ name = "polkadot-erasure-coding" version = "0.9.22" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-node-primitives", "polkadot-primitives", "reed-solomon-novelpoly", @@ -6423,7 +6454,7 @@ dependencies = [ "bytes", "futures 0.3.21", "futures-timer", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "polkadot-node-network-protocol", "polkadot-node-subsystem", @@ -6444,7 +6475,7 @@ name = "polkadot-node-collation-generation" version = "0.9.22" dependencies = [ "futures 0.3.21", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-erasure-coding", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -6463,7 +6494,7 @@ name = "polkadot-node-core-approval-voting" version = "0.9.22" dependencies = [ "assert_matches", - "bitvec", + "bitvec 1.0.0", "derive_more", "futures 0.3.21", "futures-timer", @@ -6471,7 +6502,7 @@ dependencies = [ "kvdb-memorydb", "lru 0.7.5", "merlin", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "polkadot-node-jaeger", "polkadot-node-primitives", @@ -6501,14 +6532,14 @@ name = "polkadot-node-core-av-store" version = "0.9.22" dependencies = [ "assert_matches", - "bitvec", + "bitvec 1.0.0", "env_logger 0.9.0", "futures 0.3.21", "futures-timer", "kvdb", "kvdb-memorydb", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "polkadot-erasure-coding", "polkadot-node-primitives", @@ -6529,7 +6560,7 @@ name = "polkadot-node-core-backing" version = "0.9.22" dependencies = [ "assert_matches", - "bitvec", + "bitvec 1.0.0", "fatality", "futures 0.3.21", "polkadot-erasure-coding", @@ -6573,7 +6604,7 @@ dependencies = [ "assert_matches", "async-trait", "futures 0.3.21", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-node-core-pvf", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -6594,7 +6625,7 @@ version = "0.9.22" dependencies = [ "futures 0.3.21", "maplit", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", @@ -6616,7 +6647,7 @@ dependencies = [ "futures-timer", "kvdb", "kvdb-memorydb", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -6639,7 +6670,7 @@ dependencies = [ "kvdb", "kvdb-memorydb", "lru 0.7.5", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", @@ -6670,11 +6701,29 @@ dependencies = [ "tracing-gum", ] +[[package]] +name = "polkadot-node-core-prospective-parachains" +version = "0.9.16" +dependencies = [ + "assert_matches", + "bitvec 1.0.0", + "fatality", + "futures 0.3.21", + "parity-scale-codec 2.3.1", + "polkadot-node-primitives", + "polkadot-node-subsystem", + "polkadot-node-subsystem-util", + "polkadot-primitives", + "polkadot-primitives-test-helpers", + "thiserror", + "tracing-gum", +] + [[package]] name = "polkadot-node-core-provisioner" version = "0.9.22" dependencies = [ - "bitvec", + "bitvec 1.0.0", "fatality", "futures 0.3.21", "futures-timer", @@ -6702,7 +6751,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "hex-literal", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "pin-project 1.0.10", "polkadot-core-primitives", "polkadot-node-subsystem-util", @@ -6776,7 +6825,7 @@ dependencies = [ "lazy_static", "log", "mick-jaeger", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "polkadot-node-primitives", "polkadot-primitives", @@ -6797,7 +6846,7 @@ dependencies = [ "log", "metered-channel", "nix 0.24.1", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-primitives", "polkadot-test-service", "prometheus-parse", @@ -6821,7 +6870,7 @@ dependencies = [ "derive_more", "fatality", "futures 0.3.21", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-node-jaeger", "polkadot-node-primitives", "polkadot-primitives", @@ -6840,7 +6889,7 @@ version = "0.9.22" dependencies = [ "bounded-vec", "futures 0.3.21", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-erasure-coding", "polkadot-parachain", "polkadot-primitives", @@ -6919,7 +6968,7 @@ dependencies = [ "lru 0.7.5", "metered-channel", "parity-db", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-util-mem", "parking_lot 0.11.2", "pin-project 1.0.10", @@ -7006,7 +7055,7 @@ version = "0.9.22" dependencies = [ "derive_more", "frame-support", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-util-mem", "polkadot-core-primitives", "scale-info", @@ -7034,10 +7083,10 @@ dependencies = [ name = "polkadot-primitives" version = "0.9.22" dependencies = [ - "bitvec", + "bitvec 1.0.0", "frame-system", "hex-literal", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-util-mem", "polkadot-core-primitives", "polkadot-parachain", @@ -7106,7 +7155,7 @@ name = "polkadot-runtime" version = "0.9.22" dependencies = [ "beefy-primitives", - "bitvec", + "bitvec 1.0.0", "frame-benchmarking", "frame-election-provider-support", "frame-executive", @@ -7153,7 +7202,7 @@ dependencies = [ "pallet-utility", "pallet-vesting", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-constants", @@ -7197,7 +7246,7 @@ name = "polkadot-runtime-common" version = "0.9.22" dependencies = [ "beefy-primitives", - "bitvec", + "bitvec 1.0.0", "frame-benchmarking", "frame-election-provider-support", "frame-support", @@ -7219,7 +7268,7 @@ dependencies = [ "pallet-transaction-payment", "pallet-treasury", "pallet-vesting", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-runtime-parachains", @@ -7260,7 +7309,7 @@ name = "polkadot-runtime-metrics" version = "0.9.22" dependencies = [ "bs58", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-primitives", "sp-std", "sp-tracing", @@ -7272,7 +7321,7 @@ version = "0.9.22" dependencies = [ "assert_matches", "bitflags", - "bitvec", + "bitvec 1.0.0", "derive_more", "frame-benchmarking", "frame-support", @@ -7289,7 +7338,7 @@ dependencies = [ "pallet-staking", "pallet-timestamp", "pallet-vesting", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-runtime-metrics", @@ -7437,7 +7486,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "indexmap", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -7462,7 +7511,7 @@ dependencies = [ name = "polkadot-statement-table" version = "0.9.22" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-primitives", "sp-core", ] @@ -7472,7 +7521,7 @@ name = "polkadot-test-client" version = "0.9.22" dependencies = [ "futures 0.3.21", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-node-subsystem", "polkadot-primitives", "polkadot-test-runtime", @@ -7526,7 +7575,7 @@ name = "polkadot-test-runtime" version = "0.9.22" dependencies = [ "beefy-primitives", - "bitvec", + "bitvec 1.0.0", "frame-election-provider-support", "frame-executive", "frame-support", @@ -7551,7 +7600,7 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "pallet-vesting", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-parachain", "polkadot-primitives", "polkadot-runtime-common", @@ -7989,6 +8038,12 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "radium" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" + [[package]] name = "radium" version = "0.7.0" @@ -8254,7 +8309,7 @@ dependencies = [ "env_logger 0.9.0", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "serde", "serde_json", "sp-core", @@ -8405,7 +8460,7 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "pallet-utility", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-parachain", "polkadot-primitives", "polkadot-runtime-common", @@ -8634,7 +8689,7 @@ dependencies = [ "ip_network", "libp2p", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "prost 0.10.3", "prost-build", "rand 0.7.3", @@ -8658,7 +8713,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-block-builder", "sc-client-api", "sc-proposer-metrics", @@ -8678,7 +8733,7 @@ name = "sc-block-builder" version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-client-api", "sp-api", "sp-block-builder", @@ -8696,7 +8751,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "impl-trait-for-tuples", "memmap2 0.5.0", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-chain-spec-derive", "sc-network", "sc-telemetry", @@ -8730,7 +8785,7 @@ dependencies = [ "libp2p", "log", "names", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "rand 0.7.3", "regex", "rpassword", @@ -8765,7 +8820,7 @@ dependencies = [ "futures 0.3.21", "hash-db", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "sc-executor", "sc-transaction-pool-api", @@ -8796,7 +8851,7 @@ dependencies = [ "linked-hash-map", "log", "parity-db", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "sc-client-api", "sc-state-db", @@ -8846,7 +8901,7 @@ dependencies = [ "num-bigint", "num-rational 0.2.4", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "rand 0.7.3", "retain_mut", @@ -8904,7 +8959,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ "fork-tree", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-client-api", "sc-consensus", "sp-blockchain", @@ -8920,7 +8975,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-client-api", "sc-consensus", "sc-telemetry", @@ -8954,7 +9009,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "lazy_static", "lru 0.7.5", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "sc-executor-common", "sc-executor-wasmi", @@ -8980,7 +9035,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ "environmental", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-allocator", "sp-maybe-compressed-blob", "sp-sandbox", @@ -8997,7 +9052,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-allocator", "sc-executor-common", "sp-runtime-interface", @@ -9014,7 +9069,7 @@ dependencies = [ "cfg-if 1.0.0", "libc", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-wasm 0.42.2", "sc-allocator", "sc-executor-common", @@ -9038,7 +9093,7 @@ dependencies = [ "futures-timer", "hex", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "rand 0.8.5", "sc-block-builder", @@ -9073,7 +9128,7 @@ dependencies = [ "futures 0.3.21", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-client-api", "sc-finality-grandpa", "sc-rpc", @@ -9139,7 +9194,7 @@ dependencies = [ "linked_hash_set", "log", "lru 0.7.5", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "pin-project 1.0.10", "prost 0.10.3", @@ -9176,7 +9231,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "futures 0.3.21", "libp2p", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "prost-build", "sc-peerset", "smallvec", @@ -9207,7 +9262,7 @@ dependencies = [ "futures 0.3.21", "libp2p", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "prost 0.10.3", "prost-build", "sc-client-api", @@ -9231,7 +9286,7 @@ dependencies = [ "libp2p", "log", "lru 0.7.5", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "prost 0.10.3", "prost-build", "sc-client-api", @@ -9262,7 +9317,7 @@ dependencies = [ "hyper-rustls", "num_cpus", "once_cell", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "rand 0.7.3", "sc-client-api", @@ -9307,7 +9362,7 @@ dependencies = [ "hash-db", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "sc-block-builder", "sc-chain-spec", @@ -9336,7 +9391,7 @@ dependencies = [ "futures 0.3.21", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "sc-chain-spec", "sc-transaction-pool-api", @@ -9377,7 +9432,7 @@ dependencies = [ "hash-db", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-util-mem", "parking_lot 0.12.0", "pin-project 1.0.10", @@ -9435,7 +9490,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-util-mem", "parity-util-mem-derive", "parking_lot 0.12.0", @@ -9449,7 +9504,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ "jsonrpsee", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-chain-spec", "sc-client-api", "sc-consensus-babe", @@ -9550,7 +9605,7 @@ dependencies = [ "futures-timer", "linked-hash-map", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-util-mem", "parking_lot 0.12.0", "retain_mut", @@ -9600,10 +9655,10 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8980cafbe98a7ee7a9cc16b32ebce542c77883f512d83fbf2ddc8f6a85ea74c9" dependencies = [ - "bitvec", + "bitvec 1.0.0", "cfg-if 1.0.0", "derive_more", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info-derive", "serde", ] @@ -9988,7 +10043,7 @@ name = "slot-range-helper" version = "0.9.22" dependencies = [ "enumn", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "paste", "sp-runtime", "sp-std", @@ -10065,7 +10120,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "hash-db", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-api-proc-macro", "sp-core", "sp-runtime", @@ -10092,7 +10147,7 @@ name = "sp-application-crypto" version = "6.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-core", @@ -10107,7 +10162,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "integer-sqrt", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-debug-derive", @@ -10120,7 +10175,7 @@ name = "sp-authority-discovery" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-api", "sp-application-crypto", @@ -10134,7 +10189,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ "async-trait", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-inherents", "sp-runtime", "sp-std", @@ -10145,7 +10200,7 @@ name = "sp-block-builder" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-api", "sp-inherents", "sp-runtime", @@ -10160,7 +10215,7 @@ dependencies = [ "futures 0.3.21", "log", "lru 0.7.5", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "sp-api", "sp-consensus", @@ -10179,7 +10234,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-core", "sp-inherents", "sp-runtime", @@ -10196,7 +10251,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "async-trait", "merlin", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-api", @@ -10217,7 +10272,7 @@ name = "sp-consensus-slots" version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-arithmetic", @@ -10231,7 +10286,7 @@ name = "sp-consensus-vrf" version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "schnorrkel", "sp-core", @@ -10260,7 +10315,7 @@ dependencies = [ "log", "merlin", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-util-mem", "parking_lot 0.12.0", "primitive-types", @@ -10335,7 +10390,7 @@ version = "0.12.0" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ "environmental", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-std", "sp-storage", ] @@ -10347,7 +10402,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "finality-grandpa", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-api", @@ -10365,7 +10420,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "async-trait", "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-core", "sp-runtime", "sp-std", @@ -10381,7 +10436,7 @@ dependencies = [ "hash-db", "libsecp256k1", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "secp256k1", "sp-core", @@ -10416,7 +10471,7 @@ dependencies = [ "async-trait", "futures 0.3.21", "merlin", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "schnorrkel", "serde", @@ -10440,7 +10495,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "serde", "sp-api", "sp-core", @@ -10454,7 +10509,7 @@ name = "sp-npos-elections" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "serde", "sp-arithmetic", @@ -10502,7 +10557,7 @@ dependencies = [ "hash256-std-hasher", "impl-trait-for-tuples", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-util-mem", "paste", "rand 0.7.3", @@ -10521,7 +10576,7 @@ version = "6.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "primitive-types", "sp-externalities", "sp-runtime-interface-proc-macro", @@ -10550,7 +10605,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-core", "sp-io", "sp-std", @@ -10572,7 +10627,7 @@ name = "sp-session" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-api", "sp-core", @@ -10586,7 +10641,7 @@ name = "sp-staking" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-runtime", "sp-std", @@ -10600,7 +10655,7 @@ dependencies = [ "hash-db", "log", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parking_lot 0.12.0", "rand 0.7.3", "smallvec", @@ -10625,7 +10680,7 @@ version = "6.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ "impl-serde", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "ref-cast", "serde", "sp-debug-derive", @@ -10653,7 +10708,7 @@ dependencies = [ "async-trait", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-api", "sp-inherents", "sp-runtime", @@ -10666,7 +10721,7 @@ name = "sp-tracing" version = "5.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-std", "tracing", "tracing-core", @@ -10689,7 +10744,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "async-trait", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-inherents", @@ -10705,7 +10760,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "hash-db", "memory-db", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "sp-core", "sp-std", @@ -10720,7 +10775,7 @@ version = "5.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ "impl-serde", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "parity-wasm 0.42.2", "scale-info", "serde", @@ -10736,7 +10791,7 @@ name = "sp-version-proc-macro" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30e7730cf037518ec921c336fcbb0f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "proc-macro2", "quote", "syn", @@ -10749,7 +10804,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "impl-trait-for-tuples", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-std", "wasmi", "wasmtime", @@ -10798,7 +10853,7 @@ dependencies = [ "pallet-election-provider-multi-phase", "pallet-staking", "pallet-transaction-payment", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "paste", "polkadot-core-primitives", "polkadot-runtime", @@ -10951,7 +11006,7 @@ dependencies = [ "futures 0.3.21", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-client-api", "sc-rpc-api", "sc-transaction-pool-api", @@ -10983,7 +11038,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#19b44f087b30 dependencies = [ "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-client-api", "sc-rpc-api", "scale-info", @@ -11005,7 +11060,7 @@ dependencies = [ "async-trait", "futures 0.3.21", "hex", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sc-client-api", "sc-client-db", "sc-consensus", @@ -11188,7 +11243,7 @@ name = "test-parachain-adder" version = "0.9.22" dependencies = [ "dlmalloc", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-parachain", "sp-io", "sp-std", @@ -11204,7 +11259,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-cli", "polkadot-node-core-pvf", "polkadot-node-primitives", @@ -11235,7 +11290,7 @@ version = "0.9.22" dependencies = [ "dlmalloc", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-parachain", "sp-io", "sp-std", @@ -11251,7 +11306,7 @@ dependencies = [ "futures 0.3.21", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-cli", "polkadot-node-core-pvf", "polkadot-node-primitives", @@ -11273,7 +11328,7 @@ dependencies = [ name = "test-parachains" version = "0.9.22" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-core", "test-parachain-adder", "test-parachain-halt", @@ -11767,7 +11822,7 @@ dependencies = [ "clap", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "remote-externalities", "sc-chain-spec", "sc-cli", @@ -11831,7 +11886,7 @@ version = "1.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ee73e6e4924fe940354b8d4d98cad5231175d615cd855b758adc658c0aac6a0" dependencies = [ - "cfg-if 0.1.10", + "cfg-if 1.0.0", "digest 0.10.3", "rand 0.8.5", "static_assertions", @@ -12407,7 +12462,7 @@ name = "westend-runtime" version = "0.9.22" dependencies = [ "beefy-primitives", - "bitvec", + "bitvec 1.0.0", "frame-benchmarking", "frame-election-provider-support", "frame-executive", @@ -12457,7 +12512,7 @@ dependencies = [ "pallet-vesting", "pallet-xcm", "pallet-xcm-benchmarks", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-parachain", "polkadot-primitives", "polkadot-runtime-common", @@ -12657,6 +12712,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "wyz" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" + [[package]] name = "wyz" version = "0.5.0" @@ -12684,7 +12745,7 @@ dependencies = [ "derivative", "impl-trait-for-tuples", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "scale-info", "xcm-procedural", ] @@ -12699,7 +12760,7 @@ dependencies = [ "pallet-balances", "pallet-transaction-payment", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-parachain", "polkadot-runtime-parachains", "scale-info", @@ -12720,7 +12781,7 @@ dependencies = [ "frame-support", "impl-trait-for-tuples", "log", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "sp-arithmetic", "sp-core", "sp-io", @@ -12764,7 +12825,7 @@ name = "xcm-simulator" version = "0.9.22" dependencies = [ "frame-support", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "paste", "polkadot-core-primitives", "polkadot-parachain", @@ -12783,7 +12844,7 @@ dependencies = [ "frame-system", "pallet-balances", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-core-primitives", "polkadot-parachain", "polkadot-runtime-parachains", @@ -12807,7 +12868,7 @@ dependencies = [ "honggfuzz", "pallet-balances", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "polkadot-core-primitives", "polkadot-parachain", "polkadot-runtime-parachains", @@ -12863,7 +12924,7 @@ version = "0.9.22" dependencies = [ "futures-util", "lazy_static", - "parity-scale-codec", + "parity-scale-codec 3.1.2", "reqwest", "serde", "serde_json", diff --git a/Cargo.toml b/Cargo.toml index b02f6ac1b500..5a5f567fe57d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -68,6 +68,7 @@ members = [ "node/core/chain-selection", "node/core/dispute-coordinator", "node/core/parachains-inherent", + "node/core/prospective-parachains", "node/core/provisioner", "node/core/pvf", "node/core/pvf-checker", diff --git a/node/core/prospective-parachains/Cargo.toml b/node/core/prospective-parachains/Cargo.toml new file mode 100644 index 000000000000..71374285707b --- /dev/null +++ b/node/core/prospective-parachains/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "polkadot-node-core-prospective-parachains" +version = "0.9.16" +authors = ["Parity Technologies "] +edition = "2018" + +[dependencies] +futures = "0.3.19" +gum = { package = "tracing-gum", path = "../../gum" } +parity-scale-codec = "2" +thiserror = "1.0.30" +fatality = "0.0.6" +bitvec = "1" + +polkadot-primitives = { path = "../../../primitives" } +polkadot-node-primitives = { path = "../../primitives" } +polkadot-node-subsystem = { path = "../../subsystem" } +polkadot-node-subsystem-util = { path = "../../subsystem-util" } + +[dev-dependencies] +assert_matches = "1" +polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } + +[features] +# If not enabled, the dispute coordinator will do nothing. +disputes = [] diff --git a/node/core/prospective-parachains/src/error.rs b/node/core/prospective-parachains/src/error.rs new file mode 100644 index 000000000000..e7fa2f0e9641 --- /dev/null +++ b/node/core/prospective-parachains/src/error.rs @@ -0,0 +1,83 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Error types. + +use futures::channel::oneshot; + +use polkadot_node_subsystem::{ + errors::{ChainApiError, RuntimeApiError}, + SubsystemError, +}; + +use crate::LOG_TARGET; +use fatality::Nested; + +#[allow(missing_docs)] +#[fatality::fatality(splitable)] +pub enum Error { + #[fatal] + #[error("SubsystemError::Context error: {0}")] + SubsystemContext(String), + + #[fatal] + #[error("Spawning a task failed: {0}")] + SpawnFailed(SubsystemError), + + #[fatal] + #[error("Participation worker receiver exhausted.")] + ParticipationWorkerReceiverExhausted, + + #[fatal] + #[error("Receiving message from overseer failed: {0}")] + SubsystemReceive(#[source] SubsystemError), + + #[error(transparent)] + RuntimeApi(#[from] RuntimeApiError), + + #[error(transparent)] + ChainApi(#[from] ChainApiError), + + #[error(transparent)] + Subsystem(SubsystemError), + + #[error("Request to chain API subsystem dropped")] + ChainApiRequestCanceled(oneshot::Canceled), + + #[error("Request to runtime API subsystem dropped")] + RuntimeApiRequestCanceled(oneshot::Canceled), +} + +/// General `Result` type. +pub type Result = std::result::Result; +/// Result for non-fatal only failures. +pub type JfyiErrorResult = std::result::Result; +/// Result for fatal only failures. +pub type FatalResult = std::result::Result; + +/// Utility for eating top level errors and log them. +/// +/// We basically always want to try and continue on error. This utility function is meant to +/// consume top-level errors by simply logging them +pub fn log_error(result: Result<()>, ctx: &'static str) -> FatalResult<()> { + match result.into_nested()? { + Ok(()) => Ok(()), + Err(jfyi) => { + gum::debug!(target: LOG_TARGET, error = ?jfyi, ctx); + Ok(()) + }, + } +} diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs new file mode 100644 index 000000000000..9972b60490a1 --- /dev/null +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -0,0 +1,1312 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A tree utility for managing parachain fragments not referenced by the relay-chain. +//! +//! This module exposes two main types: [`FragmentTree`] and [`CandidateStorage`] +//! which are meant to be used in close conjunction. Each tree is associated with a particular +//! relay-parent, and it's expected that higher-level code will have a tree for each +//! relay-chain block which might reasonably have blocks built upon it. +//! +//! Trees only store indices into the [`CandidateStorage`] and the storage is meant to +//! be pruned when trees are dropped by higher-level code. +//! +//! Each node in the tree represents a candidate. Nodes do not uniquely refer to a parachain +//! block for two reasons. +//! 1. There's no requirement that head-data is unique +//! for a parachain. Furthermore, a parachain is under no obligation to be acyclic, and this is mostly +//! just because it's totally inefficient to enforce it. Practical use-cases are acyclic, but there is +//! still more than one way to reach the same head-data. +//! 2. and candidates only refer to their parent by its head-data. This whole issue could be +//! resolved by having candidates reference their parent by candidate hash. +//! +//! The implication is that when we receive a candidate receipt, there are actually multiple +//! possibilities for any candidates between the para-head recorded in the relay parent's state +//! and the candidate in question. +//! +//! This means that our candidates need to handle multiple parents and that depth is an +//! attribute of a node in a tree, not a candidate. Put another way, the same candidate might +//! have different depths in different parts of the tree. +//! +//! As an extreme example, a candidate which produces head-data which is the same as its parent +//! can correspond to multiple nodes within the same [`FragmentTree`]. Such cycles are bounded +//! by the maximum depth allowed by the tree. +//! +//! As long as the [`CandidateStorage`] has bounded input on the number of candidates supplied, +//! [`FragmentTree`] complexity is bounded. This means that higher-level code needs to be selective +//! about limiting the amount of candidates that are considered. +//! +//! The code in this module is not designed for speed or efficiency, but conceptual simplicity. +//! Our assumption is that the amount of candidates and parachains we consider will be reasonably +//! bounded and in practice will not exceed a few thousand at any time. This naive implementation +//! will still perform fairly well under these conditions, despite being somewhat wasteful of memory. + +use std::collections::{BTreeMap, HashMap, HashSet}; + +use super::LOG_TARGET; +use bitvec::prelude::*; +use polkadot_node_subsystem_util::inclusion_emulator::staging::{ + ConstraintModifications, Constraints, Fragment, ProspectiveCandidate, RelayChainBlockInfo, +}; +use polkadot_primitives::vstaging::{ + BlockNumber, CandidateHash, CommittedCandidateReceipt, Hash, Id as ParaId, + PersistedValidationData, +}; + +/// Kinds of failures to import a candidate into storage. +#[derive(Debug, Clone, PartialEq)] +pub enum CandidateStorageInsertionError { + /// An error indicating that a supplied candidate didn't match the persisted + /// validation data provided alongside it. + PersistedValidationDataMismatch, + /// The candidate was already known. + CandidateAlreadyKnown(CandidateHash), +} + +pub(crate) struct CandidateStorage { + // Index from parent head hash to candidate hashes. + by_parent_head: HashMap>, + + // Index from candidate hash to fragment node. + by_candidate_hash: HashMap, +} + +impl CandidateStorage { + /// Create a new `CandidateStorage`. + pub fn new() -> Self { + CandidateStorage { by_parent_head: HashMap::new(), by_candidate_hash: HashMap::new() } + } + + /// Introduce a new candidate. The candidate passed to this function + /// should have been seconded before introduction. + pub fn add_candidate( + &mut self, + candidate: CommittedCandidateReceipt, + persisted_validation_data: PersistedValidationData, + ) -> Result { + let candidate_hash = candidate.hash(); + + if self.by_candidate_hash.contains_key(&candidate_hash) { + return Err(CandidateStorageInsertionError::CandidateAlreadyKnown(candidate_hash)) + } + + if persisted_validation_data.hash() != candidate.descriptor.persisted_validation_data_hash { + return Err(CandidateStorageInsertionError::PersistedValidationDataMismatch) + } + + let parent_head_hash = persisted_validation_data.parent_head.hash(); + + let entry = CandidateEntry { + candidate_hash, + relay_parent: candidate.descriptor.relay_parent, + state: CandidateState::Seconded, + candidate: ProspectiveCandidate { + commitments: candidate.commitments, + collator: candidate.descriptor.collator, + collator_signature: candidate.descriptor.signature, + persisted_validation_data, + pov_hash: candidate.descriptor.pov_hash, + validation_code_hash: candidate.descriptor.validation_code_hash, + }, + }; + + self.by_parent_head.entry(parent_head_hash).or_default().insert(candidate_hash); + // sanity-checked already. + self.by_candidate_hash.insert(candidate_hash, entry); + + Ok(candidate_hash) + } + + /// Note that an existing candidate has been backed. + pub fn mark_backed(&mut self, candidate_hash: &CandidateHash) { + if let Some(entry) = self.by_candidate_hash.get_mut(candidate_hash) { + entry.state = CandidateState::Backed; + } + } + + /// Whether a candidate is recorded as being backed. + pub fn is_backed(&self, candidate_hash: &CandidateHash) -> bool { + self.by_candidate_hash + .get(candidate_hash) + .map_or(false, |e| e.state == CandidateState::Backed) + } + + /// Whether a candidate is contained within the storage already. + pub fn contains(&self, candidate_hash: &CandidateHash) -> bool { + self.by_candidate_hash.contains_key(candidate_hash) + } + + /// Retain only candidates which pass the predicate. + pub(crate) fn retain(&mut self, pred: impl Fn(&CandidateHash) -> bool) { + self.by_candidate_hash.retain(|h, _v| pred(h)); + self.by_parent_head.retain(|_parent, children| { + children.retain(|h| pred(h)); + !children.is_empty() + }) + } + + fn iter_para_children<'a>( + &'a self, + parent_head_hash: &Hash, + ) -> impl Iterator + 'a { + let by_candidate_hash = &self.by_candidate_hash; + self.by_parent_head + .get(parent_head_hash) + .into_iter() + .flat_map(|hashes| hashes.iter()) + .filter_map(move |h| by_candidate_hash.get(h)) + } + + fn get(&'_ self, candidate_hash: &CandidateHash) -> Option<&'_ CandidateEntry> { + self.by_candidate_hash.get(candidate_hash) + } +} + +/// The state of a candidate. +/// +/// Candidates aren't even considered until they've at least been seconded. +#[derive(Debug, PartialEq)] +enum CandidateState { + /// The candidate has been seconded. + Seconded, + /// The candidate has been completely backed by the group. + Backed, +} + +struct CandidateEntry { + candidate_hash: CandidateHash, + relay_parent: Hash, + candidate: ProspectiveCandidate, + state: CandidateState, +} + +/// The scope of a [`FragmentTree`]. +#[derive(Debug)] +pub(crate) struct Scope { + para: ParaId, + relay_parent: RelayChainBlockInfo, + ancestors: BTreeMap, + ancestors_by_hash: HashMap, + base_constraints: Constraints, + max_depth: usize, +} + +/// An error variant indicating that ancestors provided to a scope +/// had unexpected order. +#[derive(Debug)] +pub struct UnexpectedAncestor; + +impl Scope { + /// Define a new [`Scope`]. + /// + /// All arguments are straightforward except the ancestors. + /// + /// Ancestors should be in reverse order, starting with the parent + /// of the `relay_parent`, and proceeding backwards in block number + /// increments of 1. Ancestors not following these conditions will be + /// rejected. + /// + /// This function will only consume ancestors up to the `min_relay_parent_number` of + /// the `base_constraints`. + /// + /// Only ancestors whose children have the same session as the relay-parent's + /// children should be provided. + /// + /// It is allowed to provide zero ancestors. + pub fn with_ancestors( + para: ParaId, + relay_parent: RelayChainBlockInfo, + base_constraints: Constraints, + max_depth: usize, + ancestors: impl IntoIterator, + ) -> Result { + let mut ancestors_map = BTreeMap::new(); + let mut ancestors_by_hash = HashMap::new(); + { + let mut prev = relay_parent.number; + for ancestor in ancestors { + if prev == 0 { + return Err(UnexpectedAncestor) + } else if ancestor.number != prev - 1 { + return Err(UnexpectedAncestor) + } else if prev == base_constraints.min_relay_parent_number { + break + } else { + prev = ancestor.number; + ancestors_by_hash.insert(ancestor.hash, ancestor.clone()); + ancestors_map.insert(ancestor.number, ancestor); + } + } + } + + Ok(Scope { + para, + relay_parent, + base_constraints, + max_depth, + ancestors: ancestors_map, + ancestors_by_hash, + }) + } + + /// Get the earliest relay-parent allowed in the scope of the fragment tree. + pub fn earliest_relay_parent(&self) -> RelayChainBlockInfo { + self.ancestors + .iter() + .next() + .map(|(_, v)| v.clone()) + .unwrap_or_else(|| self.relay_parent.clone()) + } + + fn ancestor_by_hash(&self, hash: &Hash) -> Option { + if hash == &self.relay_parent.hash { + return Some(self.relay_parent.clone()) + } + + self.ancestors_by_hash.get(hash).map(|info| info.clone()) + } +} + +// We use indices into a flat vector to refer to nodes in the tree. +// Every tree also has an implicit root. +#[derive(Debug, Clone, Copy, PartialEq)] +enum NodePointer { + Root, + Storage(usize), +} + +/// This is a tree of candidates based on some underlying storage of candidates +/// and a scope. +pub(crate) struct FragmentTree { + scope: Scope, + + // Invariant: a contiguous prefix of the 'nodes' storage will contain + // the top-level children. + nodes: Vec, + + // The candidates stored in this tree, mapped to a bitvec indicating the depths + // where the candidate is stored. + candidates: HashMap>, +} + +impl FragmentTree { + /// Create a new [`FragmentTree`] with given scope and populated from the + /// storage. + pub fn populate(scope: Scope, storage: &CandidateStorage) -> Self { + gum::trace!( + target: LOG_TARGET, + relay_parent = ?scope.relay_parent.hash, + relay_parent_num = scope.relay_parent.number, + para_id = ?scope.para, + ancestors = scope.ancestors.len(), + "Instantiating Fragment Tree", + ); + + let mut tree = FragmentTree { scope, nodes: Vec::new(), candidates: HashMap::new() }; + + tree.populate_from_bases(storage, vec![NodePointer::Root]); + + tree + } + + /// Get the scope of the Fragment Tree. + pub fn scope(&self) -> &Scope { + &self.scope + } + + // Inserts a node and updates child references in a non-root parent. + fn insert_node(&mut self, node: FragmentNode) { + let pointer = NodePointer::Storage(self.nodes.len()); + let parent_pointer = node.parent; + let candidate_hash = node.candidate_hash; + + let max_depth = self.scope.max_depth; + + self.candidates + .entry(candidate_hash) + .or_insert_with(|| bitvec![u16, Msb0; 0; max_depth + 1]) + .set(node.depth, true); + + match parent_pointer { + NodePointer::Storage(ptr) => { + self.nodes.push(node); + self.nodes[ptr].children.push((pointer, candidate_hash)) + }, + NodePointer::Root => { + // Maintain the invariant of node storage beginning with depth-0. + if self.nodes.last().map_or(true, |last| last.parent == NodePointer::Root) { + self.nodes.push(node); + } else { + let pos = + self.nodes.iter().take_while(|n| n.parent == NodePointer::Root).count(); + self.nodes.insert(pos, node); + } + }, + } + } + + fn node_has_candidate_child( + &self, + pointer: NodePointer, + candidate_hash: &CandidateHash, + ) -> bool { + self.node_candidate_child(pointer, candidate_hash).is_some() + } + + fn node_candidate_child( + &self, + pointer: NodePointer, + candidate_hash: &CandidateHash, + ) -> Option { + match pointer { + NodePointer::Root => self + .nodes + .iter() + .take_while(|n| n.parent == NodePointer::Root) + .enumerate() + .find(|(_, n)| &n.candidate_hash == candidate_hash) + .map(|(i, _)| NodePointer::Storage(i)), + NodePointer::Storage(ptr) => + self.nodes.get(ptr).and_then(|n| n.candidate_child(candidate_hash)), + } + } + + /// Returns an O(n) iterator over the hashes of candidates contained in the + /// tree. + pub(crate) fn candidates<'a>(&'a self) -> impl Iterator + 'a { + self.candidates.keys().cloned() + } + + /// Whether the candidate exists and at what depths. + pub(crate) fn candidate(&self, candidate: &CandidateHash) -> Option> { + self.candidates.get(candidate).map(|d| d.iter_ones().collect()) + } + + /// Add a candidate and recursively populate from storage. + pub(crate) fn add_and_populate(&mut self, hash: CandidateHash, storage: &CandidateStorage) { + let candidate_entry = match storage.get(&hash) { + None => return, + Some(e) => e, + }; + + let candidate_parent = &candidate_entry.candidate.persisted_validation_data.parent_head; + + // Select an initial set of bases, whose required relay-parent matches that of the candidate. + let root_base = if &self.scope.base_constraints.required_parent == candidate_parent { + Some(NodePointer::Root) + } else { + None + }; + + let non_root_bases = self + .nodes + .iter() + .enumerate() + .filter(|(_, n)| { + n.cumulative_modifications.required_parent.as_ref() == Some(candidate_parent) + }) + .map(|(i, _)| NodePointer::Storage(i)); + + let bases = root_base.into_iter().chain(non_root_bases).collect(); + + // Pass this into the population function, which will sanity-check stuff like depth, fragments, + // etc. and then recursively populate. + self.populate_from_bases(storage, bases); + } + + /// Returns the hypothetical depths where a candidate with the given hash and parent head data + /// would be added to the tree, without applying other candidates recursively on top of it. + /// + /// If the candidate is already known, this returns the actual depths where this + /// candidate is part of the tree. + pub(crate) fn hypothetical_depths( + &self, + hash: CandidateHash, + parent_head_data_hash: Hash, + candidate_relay_parent: Hash, + ) -> Vec { + // if known. + if let Some(depths) = self.candidates.get(&hash) { + return depths.iter_ones().collect() + } + + // if out of scope. + let candidate_relay_parent_number = + if self.scope.relay_parent.hash == candidate_relay_parent { + self.scope.relay_parent.number + } else if let Some(info) = self.scope.ancestors_by_hash.get(&candidate_relay_parent) { + info.number + } else { + return Vec::new() + }; + + let max_depth = self.scope.max_depth; + let mut depths = bitvec![u16, Msb0; 0; max_depth + 1]; + + // iterate over all nodes < max_depth where parent head-data matches, + // relay-parent number is <= candidate, and depth < max_depth. + for node in &self.nodes { + if node.depth == max_depth { + continue + } + if node.fragment.relay_parent().number > candidate_relay_parent_number { + continue + } + if node.head_data_hash == parent_head_data_hash { + depths.set(node.depth + 1, true); + } + } + + // compare against root as well. + if self.scope.base_constraints.required_parent.hash() == parent_head_data_hash { + depths.set(0, true); + } + + depths.iter_ones().collect() + } + + /// Select a candidate after the given `required_path` which pass + /// the predicate. + /// + /// If there are multiple possibilities, this will select the first one. + /// + /// This returns `None` if there is no candidate meeting those criteria. + /// + /// The intention of the `required_path` is to allow queries on the basis of + /// one or more candidates which were previously pending availability becoming + /// available and opening up more room on the core. + pub(crate) fn select_child( + &self, + required_path: &[CandidateHash], + pred: impl Fn(&CandidateHash) -> bool, + ) -> Option { + let base_node = { + // traverse the required path. + let mut node = NodePointer::Root; + for required_step in required_path { + node = self.node_candidate_child(node, &required_step)?; + } + + node + }; + + // TODO [now]: taking the first selection might introduce bias + // or become gameable. + // + // For plausibly unique parachains, this shouldn't matter much. + // figure out alternative selection criteria? + match base_node { + NodePointer::Root => self + .nodes + .iter() + .take_while(|n| n.parent == NodePointer::Root) + .filter(|n| pred(&n.candidate_hash)) + .map(|n| n.candidate_hash) + .next(), + NodePointer::Storage(ptr) => + self.nodes[ptr].children.iter().filter(|n| pred(&n.1)).map(|n| n.1).next(), + } + } + + fn populate_from_bases<'a>( + &mut self, + storage: &'a CandidateStorage, + initial_bases: Vec, + ) { + // Populate the tree breadth-first. + let mut last_sweep_start = None; + + loop { + let sweep_start = self.nodes.len(); + + if Some(sweep_start) == last_sweep_start { + break + } + + let parents: Vec = if let Some(last_start) = last_sweep_start { + (last_start..self.nodes.len()).map(NodePointer::Storage).collect() + } else { + initial_bases.clone() + }; + + // 1. get parent head and find constraints + // 2. iterate all candidates building on the right head and viable relay parent + // 3. add new node + for parent_pointer in parents { + let (modifications, child_depth, earliest_rp) = match parent_pointer { + NodePointer::Root => + (ConstraintModifications::identity(), 0, self.scope.earliest_relay_parent()), + NodePointer::Storage(ptr) => { + let node = &self.nodes[ptr]; + let parent_rp = self + .scope + .ancestor_by_hash(&node.relay_parent()) + .expect("nodes in tree can only contain ancestors within scope; qed"); + + (node.cumulative_modifications.clone(), node.depth + 1, parent_rp) + }, + }; + + if child_depth > self.scope.max_depth { + continue + } + + let child_constraints = + match self.scope.base_constraints.apply_modifications(&modifications) { + Err(e) => { + gum::debug!( + target: LOG_TARGET, + new_parent_head = ?modifications.required_parent, + err = ?e, + "Failed to apply modifications", + ); + + continue + }, + Ok(c) => c, + }; + + // Add nodes to tree wherever + // 1. parent hash is correct + // 2. relay-parent does not move backwards + // 3. candidate outputs fulfill constraints + let required_head_hash = child_constraints.required_parent.hash(); + for candidate in storage.iter_para_children(&required_head_hash) { + let relay_parent = match self.scope.ancestor_by_hash(&candidate.relay_parent) { + None => continue, // not in chain + Some(info) => { + if info.number < earliest_rp.number { + // moved backwards + continue + } + + info + }, + }; + + // don't add candidates where the parent already has it as a child. + if self.node_has_candidate_child(parent_pointer, &candidate.candidate_hash) { + continue + } + + let fragment = { + let f = Fragment::new( + relay_parent.clone(), + child_constraints.clone(), + candidate.candidate.clone(), + ); + + match f { + Ok(f) => f, + Err(e) => { + gum::debug!( + target: LOG_TARGET, + err = ?e, + ?relay_parent, + candidate_hash = ?candidate.candidate_hash, + "Failed to instantiate fragment", + ); + + continue + }, + } + }; + + let mut cumulative_modifications = modifications.clone(); + cumulative_modifications.stack(fragment.constraint_modifications()); + + let head_data_hash = fragment.candidate().commitments.head_data.hash(); + let node = FragmentNode { + parent: parent_pointer, + fragment, + candidate_hash: candidate.candidate_hash.clone(), + depth: child_depth, + cumulative_modifications, + children: Vec::new(), + head_data_hash, + }; + + self.insert_node(node); + } + } + + last_sweep_start = Some(sweep_start); + } + } +} + +struct FragmentNode { + // A pointer to the parent node. + parent: NodePointer, + fragment: Fragment, + candidate_hash: CandidateHash, + depth: usize, + cumulative_modifications: ConstraintModifications, + head_data_hash: Hash, + children: Vec<(NodePointer, CandidateHash)>, +} + +impl FragmentNode { + fn relay_parent(&self) -> Hash { + self.fragment.relay_parent().hash + } + + fn candidate_child(&self, candidate_hash: &CandidateHash) -> Option { + self.children.iter().find(|(_, c)| c == candidate_hash).map(|(p, _)| *p) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use assert_matches::assert_matches; + use polkadot_node_subsystem_util::inclusion_emulator::staging::InboundHrmpLimitations; + use polkadot_primitives::vstaging::{ + BlockNumber, CandidateCommitments, CandidateDescriptor, HeadData, + }; + use polkadot_primitives_test_helpers as test_helpers; + + fn make_constraints( + min_relay_parent_number: BlockNumber, + valid_watermarks: Vec, + required_parent: HeadData, + ) -> Constraints { + Constraints { + min_relay_parent_number, + max_pov_size: 1_000_000, + max_code_size: 1_000_000, + ump_remaining: 10, + ump_remaining_bytes: 1_000, + dmp_remaining_messages: 10, + hrmp_inbound: InboundHrmpLimitations { valid_watermarks }, + hrmp_channels_out: HashMap::new(), + max_hrmp_num_per_candidate: 0, + required_parent, + validation_code_hash: Hash::repeat_byte(42).into(), + upgrade_restriction: None, + future_validation_code: None, + } + } + + fn make_committed_candidate( + para_id: ParaId, + relay_parent: Hash, + relay_parent_number: BlockNumber, + parent_head: HeadData, + para_head: HeadData, + hrmp_watermark: BlockNumber, + ) -> (PersistedValidationData, CommittedCandidateReceipt) { + let persisted_validation_data = PersistedValidationData { + parent_head, + relay_parent_number, + relay_parent_storage_root: Hash::repeat_byte(69), + max_pov_size: 1_000_000, + }; + + let candidate = CommittedCandidateReceipt { + descriptor: CandidateDescriptor { + para_id, + relay_parent, + collator: test_helpers::dummy_collator(), + persisted_validation_data_hash: persisted_validation_data.hash(), + pov_hash: Hash::repeat_byte(1), + erasure_root: Hash::repeat_byte(1), + signature: test_helpers::dummy_collator_signature(), + para_head: para_head.hash(), + validation_code_hash: Hash::repeat_byte(42).into(), + }, + commitments: CandidateCommitments { + upward_messages: Vec::new(), + horizontal_messages: Vec::new(), + new_validation_code: None, + head_data: para_head, + processed_downward_messages: 0, + hrmp_watermark, + }, + }; + + (persisted_validation_data, candidate) + } + + #[test] + fn scope_rejects_ancestors_that_skip_blocks() { + let para_id = ParaId::from(5u32); + let relay_parent = RelayChainBlockInfo { + number: 10, + hash: Hash::repeat_byte(10), + storage_root: Hash::repeat_byte(69), + }; + + let ancestors = vec![RelayChainBlockInfo { + number: 8, + hash: Hash::repeat_byte(8), + storage_root: Hash::repeat_byte(69), + }]; + + let max_depth = 2; + let base_constraints = make_constraints(8, vec![8, 9], vec![1, 2, 3].into()); + + assert_matches!( + Scope::with_ancestors(para_id, relay_parent, base_constraints, max_depth, ancestors,), + Err(UnexpectedAncestor) + ); + } + + #[test] + fn scope_rejects_ancestor_for_0_block() { + let para_id = ParaId::from(5u32); + let relay_parent = RelayChainBlockInfo { + number: 0, + hash: Hash::repeat_byte(0), + storage_root: Hash::repeat_byte(69), + }; + + let ancestors = vec![RelayChainBlockInfo { + number: 99999, + hash: Hash::repeat_byte(99), + storage_root: Hash::repeat_byte(69), + }]; + + let max_depth = 2; + let base_constraints = make_constraints(0, vec![], vec![1, 2, 3].into()); + + assert_matches!( + Scope::with_ancestors(para_id, relay_parent, base_constraints, max_depth, ancestors,), + Err(UnexpectedAncestor) + ); + } + + #[test] + fn scope_only_takes_ancestors_up_to_min() { + let para_id = ParaId::from(5u32); + let relay_parent = RelayChainBlockInfo { + number: 5, + hash: Hash::repeat_byte(0), + storage_root: Hash::repeat_byte(69), + }; + + let ancestors = vec![ + RelayChainBlockInfo { + number: 4, + hash: Hash::repeat_byte(4), + storage_root: Hash::repeat_byte(69), + }, + RelayChainBlockInfo { + number: 3, + hash: Hash::repeat_byte(3), + storage_root: Hash::repeat_byte(69), + }, + RelayChainBlockInfo { + number: 2, + hash: Hash::repeat_byte(2), + storage_root: Hash::repeat_byte(69), + }, + ]; + + let max_depth = 2; + let base_constraints = make_constraints(3, vec![2], vec![1, 2, 3].into()); + + let scope = + Scope::with_ancestors(para_id, relay_parent, base_constraints, max_depth, ancestors) + .unwrap(); + + assert_eq!(scope.ancestors.len(), 2); + assert_eq!(scope.ancestors_by_hash.len(), 2); + } + + #[test] + fn storage_add_candidate() { + let mut storage = CandidateStorage::new(); + + let (pvd, candidate) = make_committed_candidate( + ParaId::from(5u32), + Hash::repeat_byte(69), + 8, + vec![4, 5, 6].into(), + vec![1, 2, 3].into(), + 7, + ); + + let candidate_hash = candidate.hash(); + let parent_head_hash = pvd.parent_head.hash(); + + storage.add_candidate(candidate, pvd).unwrap(); + assert!(storage.contains(&candidate_hash)); + assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 1); + } + + #[test] + fn storage_retain() { + let mut storage = CandidateStorage::new(); + + let (pvd, candidate) = make_committed_candidate( + ParaId::from(5u32), + Hash::repeat_byte(69), + 8, + vec![4, 5, 6].into(), + vec![1, 2, 3].into(), + 7, + ); + + let candidate_hash = candidate.hash(); + let parent_head_hash = pvd.parent_head.hash(); + + storage.add_candidate(candidate, pvd).unwrap(); + storage.retain(|_| true); + assert!(storage.contains(&candidate_hash)); + assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 1); + + storage.retain(|_| false); + assert!(!storage.contains(&candidate_hash)); + assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 0); + } + + #[test] + fn populate_works_recursively() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + let relay_parent_b = Hash::repeat_byte(2); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 0, + ); + let candidate_a_hash = candidate_a.hash(); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_b, + 1, + vec![0x0b].into(), + vec![0x0c].into(), + 1, + ); + let candidate_b_hash = candidate_b.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + + let ancestors = vec![RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }]; + + let relay_parent_b_info = RelayChainBlockInfo { + number: pvd_b.relay_parent_number, + hash: relay_parent_b, + storage_root: pvd_b.relay_parent_storage_root, + }; + + storage.add_candidate(candidate_a, pvd_a).unwrap(); + storage.add_candidate(candidate_b, pvd_b).unwrap(); + let scope = + Scope::with_ancestors(para_id, relay_parent_b_info, base_constraints, 4, ancestors) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 2); + assert!(candidates.contains(&candidate_a_hash)); + assert!(candidates.contains(&candidate_b_hash)); + + assert_eq!(tree.nodes.len(), 2); + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[0].depth, 0); + + assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); + assert_eq!(tree.nodes[1].candidate_hash, candidate_b_hash); + assert_eq!(tree.nodes[1].depth, 1); + } + + #[test] + fn children_of_root_are_contiguous() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + let relay_parent_b = Hash::repeat_byte(2); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 0, + ); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_b, + 1, + vec![0x0b].into(), + vec![0x0c].into(), + 1, + ); + + let (pvd_a2, candidate_a2) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b, 1].into(), + 0, + ); + let candidate_a2_hash = candidate_a2.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + + let ancestors = vec![RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }]; + + let relay_parent_b_info = RelayChainBlockInfo { + number: pvd_b.relay_parent_number, + hash: relay_parent_b, + storage_root: pvd_b.relay_parent_storage_root, + }; + + storage.add_candidate(candidate_a, pvd_a).unwrap(); + storage.add_candidate(candidate_b, pvd_b).unwrap(); + let scope = + Scope::with_ancestors(para_id, relay_parent_b_info, base_constraints, 4, ancestors) + .unwrap(); + let mut tree = FragmentTree::populate(scope, &storage); + + storage.add_candidate(candidate_a2, pvd_a2).unwrap(); + tree.add_and_populate(candidate_a2_hash, &storage); + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 3); + + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[1].parent, NodePointer::Root); + assert_eq!(tree.nodes[2].parent, NodePointer::Storage(0)); + } + + #[test] + fn add_candidate_child_of_root() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 0, + ); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0c].into(), + 0, + ); + let candidate_b_hash = candidate_b.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + storage.add_candidate(candidate_a, pvd_a).unwrap(); + let scope = + Scope::with_ancestors(para_id, relay_parent_a_info, base_constraints, 4, vec![]) + .unwrap(); + let mut tree = FragmentTree::populate(scope, &storage); + + storage.add_candidate(candidate_b, pvd_b).unwrap(); + tree.add_and_populate(candidate_b_hash, &storage); + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 2); + + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[1].parent, NodePointer::Root); + } + + #[test] + fn add_candidate_child_of_non_root() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 0, + ); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0b].into(), + vec![0x0c].into(), + 0, + ); + let candidate_b_hash = candidate_b.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + storage.add_candidate(candidate_a, pvd_a).unwrap(); + let scope = + Scope::with_ancestors(para_id, relay_parent_a_info, base_constraints, 4, vec![]) + .unwrap(); + let mut tree = FragmentTree::populate(scope, &storage); + + storage.add_candidate(candidate_b, pvd_b).unwrap(); + tree.add_and_populate(candidate_b_hash, &storage); + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 2); + + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); + } + + #[test] + fn graceful_cycle_of_0() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0a].into(), // input same as output + 0, + ); + let candidate_a_hash = candidate_a.hash(); + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + let max_depth = 4; + storage.add_candidate(candidate_a, pvd_a).unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 1); + assert_eq!(tree.nodes.len(), max_depth + 1); + + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); + assert_eq!(tree.nodes[2].parent, NodePointer::Storage(1)); + assert_eq!(tree.nodes[3].parent, NodePointer::Storage(2)); + assert_eq!(tree.nodes[4].parent, NodePointer::Storage(3)); + + assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[1].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[2].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[3].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[4].candidate_hash, candidate_a_hash); + } + + #[test] + fn graceful_cycle_of_1() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), // input same as output + 0, + ); + let candidate_a_hash = candidate_a.hash(); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0b].into(), + vec![0x0a].into(), // input same as output + 0, + ); + let candidate_b_hash = candidate_b.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + let max_depth = 4; + storage.add_candidate(candidate_a, pvd_a).unwrap(); + storage.add_candidate(candidate_b, pvd_b).unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 2); + assert_eq!(tree.nodes.len(), max_depth + 1); + + assert_eq!(tree.nodes[0].parent, NodePointer::Root); + assert_eq!(tree.nodes[1].parent, NodePointer::Storage(0)); + assert_eq!(tree.nodes[2].parent, NodePointer::Storage(1)); + assert_eq!(tree.nodes[3].parent, NodePointer::Storage(2)); + assert_eq!(tree.nodes[4].parent, NodePointer::Storage(3)); + + assert_eq!(tree.nodes[0].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[1].candidate_hash, candidate_b_hash); + assert_eq!(tree.nodes[2].candidate_hash, candidate_a_hash); + assert_eq!(tree.nodes[3].candidate_hash, candidate_b_hash); + assert_eq!(tree.nodes[4].candidate_hash, candidate_a_hash); + } + + #[test] + fn hypothetical_depths_known_and_unknown() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), // input same as output + 0, + ); + let candidate_a_hash = candidate_a.hash(); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0b].into(), + vec![0x0a].into(), // input same as output + 0, + ); + let candidate_b_hash = candidate_b.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + let max_depth = 4; + storage.add_candidate(candidate_a, pvd_a).unwrap(); + storage.add_candidate(candidate_b, pvd_b).unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 2); + assert_eq!(tree.nodes.len(), max_depth + 1); + + assert_eq!( + tree.hypothetical_depths( + candidate_a_hash, + HeadData::from(vec![0x0a]).hash(), + relay_parent_a, + ), + vec![0, 2, 4], + ); + + assert_eq!( + tree.hypothetical_depths( + candidate_b_hash, + HeadData::from(vec![0x0b]).hash(), + relay_parent_a, + ), + vec![1, 3], + ); + + assert_eq!( + tree.hypothetical_depths( + CandidateHash(Hash::repeat_byte(21)), + HeadData::from(vec![0x0a]).hash(), + relay_parent_a, + ), + vec![0, 2, 4], + ); + + assert_eq!( + tree.hypothetical_depths( + CandidateHash(Hash::repeat_byte(22)), + HeadData::from(vec![0x0b]).hash(), + relay_parent_a, + ), + vec![1, 3] + ); + } +} diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs new file mode 100644 index 000000000000..0e447aa69b1f --- /dev/null +++ b/node/core/prospective-parachains/src/lib.rs @@ -0,0 +1,591 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Implementation of the Prospective Parachains subsystem - this tracks and handles +//! prospective parachain fragments and informs other backing-stage subsystems +//! of work to be done. +//! +//! This is the main coordinator of work within the node for the collation and +//! backing phases of parachain consensus. +//! +//! This is primarily an implementation of "Fragment Trees", as described in +//! [`polkadot_node_subsystem_util::inclusion_emulator::staging`]. +//! +//! This also handles concerns such as the relay-chain being forkful, +//! session changes, predicting validator group assignments. + +use std::collections::{HashMap, HashSet}; + +use futures::{channel::oneshot, prelude::*}; + +use polkadot_node_subsystem::{ + messages::{ + ChainApiMessage, FragmentTreeMembership, HypotheticalDepthRequest, + ProspectiveParachainsMessage, RuntimeApiMessage, RuntimeApiRequest, + }, + overseer, ActiveLeavesUpdate, FromOverseer, OverseerSignal, SpawnedSubsystem, SubsystemError, +}; +use polkadot_node_subsystem_util::inclusion_emulator::staging::{Constraints, RelayChainBlockInfo}; +use polkadot_primitives::vstaging::{ + BlockNumber, CandidateHash, CommittedCandidateReceipt, CoreState, Hash, Id as ParaId, + PersistedValidationData, +}; + +use crate::{ + error::{FatalError, FatalResult, JfyiError, JfyiErrorResult, Result}, + fragment_tree::{CandidateStorage, FragmentTree, Scope as TreeScope}, +}; + +mod error; +mod fragment_tree; + +const LOG_TARGET: &str = "parachain::prospective-parachains"; + +// The maximum depth the subsystem will allow. 'depth' is defined as the +// amount of blocks between the para head in a relay-chain block's state +// and a candidate with a particular relay-parent. +// +// This value is chosen mostly for reasons of resource-limitation. +// Without it, a malicious validator group could create arbitrarily long, +// useless prospective parachains and DoS honest nodes. +const MAX_DEPTH: usize = 4; + +// The maximum ancestry we support. +const MAX_ANCESTRY: usize = 5; + +struct RelayBlockViewData { + // Scheduling info for paras and upcoming paras. + fragment_trees: HashMap, +} + +struct View { + // Active or recent relay-chain blocks by block hash. + active_leaves: HashMap, + candidate_storage: HashMap, +} + +impl View { + fn new() -> Self { + View { active_leaves: HashMap::new(), candidate_storage: HashMap::new() } + } +} + +/// The prospective parachains subsystem. +#[derive(Default)] +pub struct ProspectiveParachainsSubsystem; + +#[overseer::subsystem(ProspectiveParachains, error = SubsystemError, prefix = self::overseer)] +impl ProspectiveParachainsSubsystem +where + Context: Send + Sync, +{ + fn start(self, ctx: Context) -> SpawnedSubsystem { + SpawnedSubsystem { + future: run(ctx) + .map_err(|e| SubsystemError::with_origin("prospective-parachains", e)) + .boxed(), + name: "prospective-parachains-subsystem", + } + } +} + +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] +async fn run(mut ctx: Context) -> FatalResult<()> { + let mut view = View::new(); + loop { + crate::error::log_error( + run_iteration(&mut ctx, &mut view).await, + "Encountered issue during run iteration", + )?; + } +} + +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] +async fn run_iteration(ctx: &mut Context, view: &mut View) -> Result<()> { + loop { + match ctx.recv().await.map_err(FatalError::SubsystemReceive)? { + FromOverseer::Signal(OverseerSignal::Conclude) => return Ok(()), + FromOverseer::Signal(OverseerSignal::ActiveLeaves(update)) => { + handle_active_leaves_update(&mut *ctx, view, update).await?; + }, + FromOverseer::Signal(OverseerSignal::BlockFinalized(..)) => {}, + FromOverseer::Communication { msg } => match msg { + ProspectiveParachainsMessage::CandidateSeconded(para, candidate, pvd, tx) => + handle_candidate_seconded(&mut *ctx, view, para, candidate, pvd, tx).await?, + ProspectiveParachainsMessage::CandidateBacked(para, candidate_hash) => + handle_candidate_backed(&mut *ctx, view, para, candidate_hash).await?, + ProspectiveParachainsMessage::GetBackableCandidate( + relay_parent, + para, + required_path, + tx, + ) => answer_get_backable_candidate(&view, relay_parent, para, required_path, tx), + ProspectiveParachainsMessage::GetHypotheticalDepth(request, tx) => + answer_hypothetical_depths_request(&view, request, tx), + ProspectiveParachainsMessage::GetTreeMembership(para, candidate, tx) => + answer_tree_membership_request(&view, para, candidate, tx), + ProspectiveParachainsMessage::GetMinimumRelayParent(para, relay_parent, tx) => + answer_minimum_relay_parent_request(&view, para, relay_parent, tx), + }, + } + } +} + +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] +async fn handle_active_leaves_update( + ctx: &mut Context, + view: &mut View, + update: ActiveLeavesUpdate, +) -> JfyiErrorResult<()> { + // 1. clean up inactive leaves + // 2. determine all scheduled para at new block + // 3. construct new fragment tree for each para for each new leaf + // 4. prune candidate storage. + + for deactivated in &update.deactivated { + view.active_leaves.remove(deactivated); + } + + for activated in update.activated.into_iter() { + let hash = activated.hash; + let scheduled_paras = fetch_upcoming_paras(&mut *ctx, hash).await?; + + let block_info: RelayChainBlockInfo = match fetch_block_info(&mut *ctx, hash).await? { + None => { + gum::warn!( + target: LOG_TARGET, + block_hash = ?hash, + "Failed to get block info for newly activated leaf block." + ); + + // `update.activated` is an option, but we can use this + // to exit the 'loop' and skip this block without skipping + // pruning logic. + continue + }, + Some(info) => info, + }; + + let ancestry = fetch_ancestry(&mut *ctx, hash, MAX_ANCESTRY).await?; + + // Find constraints. + let mut fragment_trees = HashMap::new(); + for para in scheduled_paras { + let candidate_storage = + view.candidate_storage.entry(para).or_insert_with(CandidateStorage::new); + + let constraints = fetch_base_constraints(&mut *ctx, hash, para).await?; + + let constraints = match constraints { + Some(c) => c, + None => { + // This indicates a runtime conflict of some kind. + + gum::debug!( + target: LOG_TARGET, + para_id = ?para, + relay_parent = ?hash, + "Failed to get inclusion constraints." + ); + + continue + }, + }; + + let scope = TreeScope::with_ancestors( + para, + block_info.clone(), + constraints, + MAX_DEPTH, + ancestry.iter().cloned(), + ) + .expect("ancestors are provided in reverse order and correctly; qed"); + + let tree = FragmentTree::populate(scope, &*candidate_storage); + fragment_trees.insert(para, tree); + } + + view.active_leaves.insert(hash, RelayBlockViewData { fragment_trees }); + } + + if !update.deactivated.is_empty() { + // This has potential to be a hotspot. + prune_view_candidate_storage(view); + } + + Ok(()) +} + +fn prune_view_candidate_storage(view: &mut View) { + let active_leaves = &view.active_leaves; + view.candidate_storage.retain(|para_id, storage| { + let mut coverage = HashSet::new(); + let mut contained = false; + for head in active_leaves.values() { + if let Some(tree) = head.fragment_trees.get(¶_id) { + coverage.extend(tree.candidates()); + contained = true; + } + } + + if !contained { + return false + } + + storage.retain(|h| coverage.contains(&h)); + + // Even if `storage` is now empty, we retain. + // This maintains a convenient invariant that para-id storage exists + // as long as there's an active head which schedules the para. + true + }) +} + +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] +async fn handle_candidate_seconded( + _ctx: &mut Context, + view: &mut View, + para: ParaId, + candidate: CommittedCandidateReceipt, + pvd: PersistedValidationData, + tx: oneshot::Sender, +) -> JfyiErrorResult<()> { + // Add the candidate to storage. + // Then attempt to add it to all trees. + let storage = match view.candidate_storage.get_mut(¶) { + None => { + gum::warn!( + target: LOG_TARGET, + para_id = ?para, + candidate_hash = ?candidate.hash(), + "Received seconded candidate for inactive para", + ); + + let _ = tx.send(Vec::new()); + return Ok(()) + }, + Some(storage) => storage, + }; + + let candidate_hash = match storage.add_candidate(candidate, pvd) { + Ok(c) => c, + Err(crate::fragment_tree::CandidateStorageInsertionError::CandidateAlreadyKnown(_)) => { + let _ = tx.send(Vec::new()); + return Ok(()) + }, + Err( + crate::fragment_tree::CandidateStorageInsertionError::PersistedValidationDataMismatch, + ) => { + // We can't log the candidate hash without either doing more ~expensive + // hashing but this branch indicates something is seriously wrong elsewhere + // so it's doubtful that it would affect debugging. + + gum::warn!( + target: LOG_TARGET, + para = ?para, + "Received seconded candidate had mismatching validation data", + ); + + let _ = tx.send(Vec::new()); + return Ok(()) + }, + }; + + let mut membership = Vec::new(); + for (relay_parent, leaf_data) in &mut view.active_leaves { + if let Some(tree) = leaf_data.fragment_trees.get_mut(¶) { + tree.add_and_populate(candidate_hash, &*storage); + if let Some(depths) = tree.candidate(&candidate_hash) { + membership.push((*relay_parent, depths)); + } + } + } + let _ = tx.send(membership); + + Ok(()) +} + +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] +async fn handle_candidate_backed( + _ctx: &mut Context, + view: &mut View, + para: ParaId, + candidate_hash: CandidateHash, +) -> JfyiErrorResult<()> { + let storage = match view.candidate_storage.get_mut(¶) { + None => { + gum::warn!( + target: LOG_TARGET, + para_id = ?para, + ?candidate_hash, + "Received instructio to back candidate", + ); + + return Ok(()) + }, + Some(storage) => storage, + }; + + if !storage.contains(&candidate_hash) { + gum::warn!( + target: LOG_TARGET, + para_id = ?para, + ?candidate_hash, + "Received instruction to mark unknown candidate as backed.", + ); + + return Ok(()) + } + + if storage.is_backed(&candidate_hash) { + gum::debug!( + target: LOG_TARGET, + para_id = ?para, + ?candidate_hash, + "Received redundant instruction to mark candidate as backed", + ); + + return Ok(()) + } + + storage.mark_backed(&candidate_hash); + Ok(()) +} + +fn answer_get_backable_candidate( + view: &View, + relay_parent: Hash, + para: ParaId, + required_path: Vec, + tx: oneshot::Sender>, +) { + let data = match view.active_leaves.get(&relay_parent) { + None => { + gum::debug!( + target: LOG_TARGET, + ?relay_parent, + para_id = ?para, + "Requested backable candidate for inactive relay-parent." + ); + + let _ = tx.send(None); + return + }, + Some(d) => d, + }; + + let tree = match data.fragment_trees.get(¶) { + None => { + gum::debug!( + target: LOG_TARGET, + ?relay_parent, + para_id = ?para, + "Requested backable candidate for inactive para." + ); + + let _ = tx.send(None); + return + }, + Some(tree) => tree, + }; + + let storage = match view.candidate_storage.get(¶) { + None => { + gum::warn!( + target: LOG_TARGET, + ?relay_parent, + para_id = ?para, + "No candidate storage for active para", + ); + + let _ = tx.send(None); + return + }, + Some(s) => s, + }; + + let _ = tx.send(tree.select_child(&required_path, |candidate| storage.is_backed(candidate))); +} + +fn answer_hypothetical_depths_request( + view: &View, + request: HypotheticalDepthRequest, + tx: oneshot::Sender>, +) { + match view + .active_leaves + .get(&request.fragment_tree_relay_parent) + .and_then(|l| l.fragment_trees.get(&request.candidate_para)) + { + Some(fragment_tree) => { + let depths = fragment_tree.hypothetical_depths( + request.candidate_hash, + request.parent_head_data_hash, + request.candidate_relay_parent, + ); + let _ = tx.send(depths); + }, + None => { + let _ = tx.send(Vec::new()); + }, + } +} + +fn answer_tree_membership_request( + view: &View, + para: ParaId, + candidate: CandidateHash, + tx: oneshot::Sender, +) { + let mut membership = Vec::new(); + for (relay_parent, view_data) in &view.active_leaves { + if let Some(tree) = view_data.fragment_trees.get(¶) { + if let Some(depths) = tree.candidate(&candidate) { + membership.push((*relay_parent, depths)); + } + } + } + let _ = tx.send(membership); +} + +fn answer_minimum_relay_parent_request( + view: &View, + para: ParaId, + relay_parent: Hash, + tx: oneshot::Sender>, +) { + let res = view + .active_leaves + .get(&relay_parent) + .and_then(|data| data.fragment_trees.get(¶)) + .map(|tree| tree.scope().earliest_relay_parent().number); + + let _ = tx.send(res); +} + +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] +async fn fetch_base_constraints( + ctx: &mut Context, + relay_parent: Hash, + para_id: ParaId, +) -> JfyiErrorResult> { + let (tx, rx) = oneshot::channel(); + ctx.send_message(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::StagingValidityConstraints(para_id, tx), + )) + .await; + + Ok(rx.await.map_err(JfyiError::RuntimeApiRequestCanceled)??.map(From::from)) +} + +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] +async fn fetch_upcoming_paras( + ctx: &mut Context, + relay_parent: Hash, +) -> JfyiErrorResult> { + let (tx, rx) = oneshot::channel(); + + // This'll have to get more sophisticated with parathreads, + // but for now we can just use the `AvailabilityCores`. + ctx.send_message(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::AvailabilityCores(tx), + )) + .await; + + let cores = rx.await.map_err(JfyiError::RuntimeApiRequestCanceled)??; + let mut upcoming = HashSet::new(); + for core in cores { + match core { + CoreState::Occupied(occupied) => { + if let Some(next_up_on_available) = occupied.next_up_on_available { + upcoming.insert(next_up_on_available.para_id); + } + if let Some(next_up_on_time_out) = occupied.next_up_on_time_out { + upcoming.insert(next_up_on_time_out.para_id); + } + }, + CoreState::Scheduled(scheduled) => { + upcoming.insert(scheduled.para_id); + }, + CoreState::Free => {}, + } + } + + Ok(upcoming.into_iter().collect()) +} + +// Fetch ancestors in descending order, up to the amount requested. +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] +async fn fetch_ancestry( + ctx: &mut Context, + relay_hash: Hash, + ancestors: usize, +) -> JfyiErrorResult> { + let (tx, rx) = oneshot::channel(); + ctx.send_message(ChainApiMessage::Ancestors { + hash: relay_hash, + k: ancestors, + response_channel: tx, + }) + .await; + + let hashes = rx.map_err(JfyiError::ChainApiRequestCanceled).await??; + let mut block_info = Vec::with_capacity(hashes.len()); + for hash in hashes { + match fetch_block_info(ctx, relay_hash).await? { + None => { + gum::warn!( + target: LOG_TARGET, + relay_hash = ?hash, + "Failed to fetch info for hash returned from ancestry.", + ); + + // Return, however far we got. + return Ok(block_info) + }, + Some(info) => { + block_info.push(info); + }, + } + } + + Ok(block_info) +} + +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] +async fn fetch_block_info( + ctx: &mut Context, + relay_hash: Hash, +) -> JfyiErrorResult> { + let (tx, rx) = oneshot::channel(); + + ctx.send_message(ChainApiMessage::BlockHeader(relay_hash, tx)).await; + let header = rx.map_err(JfyiError::ChainApiRequestCanceled).await??; + Ok(header.map(|header| RelayChainBlockInfo { + hash: relay_hash, + number: header.number, + storage_root: header.state_root, + })) +} + +#[derive(Clone)] +struct MetricsInner; + +/// Prospective parachain metrics. +#[derive(Default, Clone)] +pub struct Metrics(Option); diff --git a/node/core/runtime-api/src/cache.rs b/node/core/runtime-api/src/cache.rs index 6f5fdc5d4657..4df2206a9e76 100644 --- a/node/core/runtime-api/src/cache.rs +++ b/node/core/runtime-api/src/cache.rs @@ -20,12 +20,15 @@ use memory_lru::{MemoryLruCache, ResidentSize}; use parity_util_mem::{MallocSizeOf, MallocSizeOfExt}; use sp_consensus_babe::Epoch; -use polkadot_primitives::v2::{ - AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, - CommittedCandidateReceipt, CoreState, DisputeState, GroupRotationInfo, Hash, Id as ParaId, - InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, - PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, - ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, +use polkadot_primitives::{ + v2::{ + AuthorityDiscoveryId, BlockNumber, CandidateCommitments, CandidateEvent, CandidateHash, + CommittedCandidateReceipt, CoreState, DisputeState, GroupRotationInfo, Hash, Id as ParaId, + InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, + PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, + ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + }, + vstaging as vstaging_primitives, }; const AUTHORITIES_CACHE_SIZE: usize = 128 * 1024; @@ -49,6 +52,8 @@ const VALIDATION_CODE_HASH_CACHE_SIZE: usize = 64 * 1024; const VERSION_CACHE_SIZE: usize = 4 * 1024; const DISPUTES_CACHE_SIZE: usize = 64 * 1024; +const STAGING_VALIDITY_CONSTRAINTS_CACHE_SIZE: usize = 10 * 1024; + struct ResidentSizeOf(T); impl ResidentSize for ResidentSizeOf { @@ -115,6 +120,10 @@ pub(crate) struct RequestResultCache { (Hash, ParaId, OccupiedCoreAssumption), ResidentSizeOf>, >, + + staging_validity_constraints: + MemoryLruCache<(Hash, ParaId), ResidentSizeOf>>, + version: MemoryLruCache>, disputes: MemoryLruCache< Hash, @@ -146,6 +155,11 @@ impl Default for RequestResultCache { on_chain_votes: MemoryLruCache::new(ON_CHAIN_VOTES_CACHE_SIZE), pvfs_require_precheck: MemoryLruCache::new(PVFS_REQUIRE_PRECHECK_SIZE), validation_code_hash: MemoryLruCache::new(VALIDATION_CODE_HASH_CACHE_SIZE), + + staging_validity_constraints: MemoryLruCache::new( + STAGING_VALIDITY_CONSTRAINTS_CACHE_SIZE, + ), + version: MemoryLruCache::new(VERSION_CACHE_SIZE), disputes: MemoryLruCache::new(DISPUTES_CACHE_SIZE), } @@ -406,6 +420,21 @@ impl RequestResultCache { self.validation_code_hash.insert(key, ResidentSizeOf(value)); } + pub(crate) fn staging_validity_constraints( + &mut self, + key: (Hash, ParaId), + ) -> Option<&Option> { + self.staging_validity_constraints.get(&key).map(|v| &v.0) + } + + pub(crate) fn cache_staging_validity_constraints( + &mut self, + key: (Hash, ParaId), + value: Option, + ) { + self.staging_validity_constraints.insert(key, ResidentSizeOf(value)); + } + pub(crate) fn version(&mut self, relay_parent: &Hash) -> Option<&u32> { self.version.get(&relay_parent).map(|v| &v.0) } @@ -462,6 +491,9 @@ pub(crate) enum RequestResult { // This is a request with side-effects and no result, hence (). SubmitPvfCheckStatement(Hash, PvfCheckStatement, ValidatorSignature, ()), ValidationCodeHash(Hash, ParaId, OccupiedCoreAssumption, Option), + + StagingValidityConstraints(Hash, ParaId, Option), + Version(Hash, u32), StagingDisputes(Hash, Vec<(SessionIndex, CandidateHash, DisputeState)>), } diff --git a/node/core/runtime-api/src/lib.rs b/node/core/runtime-api/src/lib.rs index 1e8908ebe544..dd924675fdb8 100644 --- a/node/core/runtime-api/src/lib.rs +++ b/node/core/runtime-api/src/lib.rs @@ -160,6 +160,11 @@ where ValidationCodeHash(relay_parent, para_id, assumption, hash) => self .requests_cache .cache_validation_code_hash((relay_parent, para_id, assumption), hash), + + StagingValidityConstraints(relay_parent, para_id, constraints) => self + .requests_cache + .cache_staging_validity_constraints((relay_parent, para_id), constraints), + Version(relay_parent, version) => self.requests_cache.cache_version(relay_parent, version), StagingDisputes(relay_parent, disputes) => @@ -267,6 +272,9 @@ where .map(|sender| Request::ValidationCodeHash(para, assumption, sender)), Request::StagingDisputes(sender) => query!(disputes(), sender).map(|sender| Request::StagingDisputes(sender)), + Request::StagingValidityConstraints(para, sender) => + query!(staging_validity_constraints(para), sender) + .map(|sender| Request::StagingValidityConstraints(para, sender)), } } @@ -521,5 +529,8 @@ where query!(ValidationCodeHash, validation_code_hash(para, assumption), ver = 2, sender), Request::StagingDisputes(sender) => query!(StagingDisputes, staging_get_disputes(), ver = 2, sender), + Request::StagingValidityConstraints(para, sender) => { + query!(StagingValidityConstraints, staging_validity_constraints(para), ver = 2, sender) + }, } } diff --git a/node/core/runtime-api/src/tests.rs b/node/core/runtime-api/src/tests.rs index 4e75df100504..0762f364a732 100644 --- a/node/core/runtime-api/src/tests.rs +++ b/node/core/runtime-api/src/tests.rs @@ -19,12 +19,15 @@ use super::*; use ::test_helpers::{dummy_committed_candidate_receipt, dummy_validation_code}; use polkadot_node_primitives::{BabeAllowedSlots, BabeEpoch, BabeEpochConfiguration}; use polkadot_node_subsystem_test_helpers::make_subsystem_context; -use polkadot_primitives::v2::{ - AuthorityDiscoveryId, BlockNumber, CandidateEvent, CandidateHash, CommittedCandidateReceipt, - CoreState, DisputeState, GroupRotationInfo, Id as ParaId, InboundDownwardMessage, - InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, - ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, - ValidatorId, ValidatorIndex, ValidatorSignature, +use polkadot_primitives::{ + v2::{ + AuthorityDiscoveryId, BlockNumber, CandidateEvent, CandidateHash, + CommittedCandidateReceipt, CoreState, DisputeState, GroupRotationInfo, Id as ParaId, + InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, + PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, + ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + }, + vstaging, }; use sp_core::testing::TaskExecutor; use std::{ @@ -193,6 +196,10 @@ sp_api::mock_impl_runtime_apis! { fn staging_get_disputes() -> Vec<(SessionIndex, CandidateHash, DisputeState)> { unimplemented!() } + + fn staging_validity_constraints(_: ParaId) -> Option { + unimplemented!("Staging API not implemented"); + } } impl BabeApi for MockRuntimeApi { diff --git a/node/overseer/src/dummy.rs b/node/overseer/src/dummy.rs index b4a97c3e6321..19d24fb82dfa 100644 --- a/node/overseer/src/dummy.rs +++ b/node/overseer/src/dummy.rs @@ -86,6 +86,7 @@ pub fn dummy_overseer_builder<'a, Spawner, SupportsParachains>( DummySubsystem, DummySubsystem, DummySubsystem, + DummySubsystem, >, SubsystemError, > @@ -127,6 +128,7 @@ pub fn one_for_all_overseer_builder<'a, Spawner, SupportsParachains, Sub>( Sub, Sub, Sub, + Sub, >, SubsystemError, > @@ -154,7 +156,8 @@ where + Subsystem, SubsystemError> + Subsystem, SubsystemError> + Subsystem, SubsystemError> - + Subsystem, SubsystemError>, + + Subsystem, SubsystemError> + + Subsystem, SubsystemError>, { let metrics = ::register(registry)?; @@ -179,7 +182,8 @@ where .gossip_support(subsystem.clone()) .dispute_coordinator(subsystem.clone()) .dispute_distribution(subsystem.clone()) - .chain_selection(subsystem) + .chain_selection(subsystem.clone()) + .prospective_parachains(subsystem.clone()) .activation_external_listeners(Default::default()) .span_per_active_leaf(Default::default()) .active_leaves(Default::default()) diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index bcf486d2a0db..21cd09aee03d 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -83,8 +83,8 @@ use polkadot_node_subsystem_types::messages::{ BitfieldSigningMessage, CandidateBackingMessage, CandidateValidationMessage, ChainApiMessage, ChainSelectionMessage, CollationGenerationMessage, CollatorProtocolMessage, DisputeCoordinatorMessage, DisputeDistributionMessage, GossipSupportMessage, - NetworkBridgeMessage, ProvisionerMessage, PvfCheckerMessage, RuntimeApiMessage, - StatementDistributionMessage, + NetworkBridgeMessage, ProspectiveParachainsMessage, ProvisionerMessage, PvfCheckerMessage, + RuntimeApiMessage, StatementDistributionMessage, }; pub use polkadot_node_subsystem_types::{ errors::{SubsystemError, SubsystemResult}, @@ -564,6 +564,12 @@ pub struct Overseer { #[subsystem(blocking, ChainSelectionMessage, sends: [ChainApiMessage])] chain_selection: ChainSelection, + #[subsystem(ProspectiveParachainsMessage, sends: [ + RuntimeApiMessage, + ChainApiMessage, + ])] + prospective_parachains: ProspectiveParachains, + /// External listeners waiting for a hash to be in the active-leave set. pub activation_external_listeners: HashMap>>>, diff --git a/node/overseer/src/tests.rs b/node/overseer/src/tests.rs index 9fb030140191..ab7303297aea 100644 --- a/node/overseer/src/tests.rs +++ b/node/overseer/src/tests.rs @@ -29,7 +29,7 @@ use polkadot_node_subsystem_types::{ ActivatedLeaf, LeafStatus, }; use polkadot_primitives::v2::{ - CandidateHash, CandidateReceipt, CollatorPair, InvalidDisputeStatementKind, + CandidateHash, CandidateReceipt, CollatorPair, Id as ParaId, InvalidDisputeStatementKind, ValidDisputeStatementKind, ValidatorIndex, }; @@ -910,10 +910,17 @@ fn test_chain_selection_msg() -> ChainSelectionMessage { ChainSelectionMessage::Approved(Default::default()) } +fn test_prospective_parachains_msg() -> ProspectiveParachainsMessage { + ProspectiveParachainsMessage::CandidateBacked( + ParaId::from(5), + CandidateHash(Hash::repeat_byte(0)), + ) +} + // Checks that `stop`, `broadcast_signal` and `broadcast_message` are implemented correctly. #[test] fn overseer_all_subsystems_receive_signals_and_messages() { - const NUM_SUBSYSTEMS: usize = 21; + const NUM_SUBSYSTEMS: usize = 22; // -4 for BitfieldSigning, GossipSupport, AvailabilityDistribution and PvfCheckerSubsystem. const NUM_SUBSYSTEMS_MESSAGED: usize = NUM_SUBSYSTEMS - 4; @@ -998,6 +1005,9 @@ fn overseer_all_subsystems_receive_signals_and_messages() { handle .send_msg_anon(AllMessages::ChainSelection(test_chain_selection_msg())) .await; + handle + .send_msg_anon(AllMessages::ProspectiveParachains(test_prospective_parachains_msg())) + .await; // handle.send_msg_anon(AllMessages::PvfChecker(test_pvf_checker_msg())).await; // Wait until all subsystems have received. Otherwise the messages might race against @@ -1053,6 +1063,7 @@ fn context_holds_onto_message_until_enough_signals_received() { let (dispute_distribution_bounded_tx, _) = metered::channel(CHANNEL_CAPACITY); let (chain_selection_bounded_tx, _) = metered::channel(CHANNEL_CAPACITY); let (pvf_checker_bounded_tx, _) = metered::channel(CHANNEL_CAPACITY); + let (prospective_parachains_bounded_tx, _) = metered::channel(CHANNEL_CAPACITY); let (candidate_validation_unbounded_tx, _) = metered::unbounded(); let (candidate_backing_unbounded_tx, _) = metered::unbounded(); @@ -1075,6 +1086,7 @@ fn context_holds_onto_message_until_enough_signals_received() { let (dispute_distribution_unbounded_tx, _) = metered::unbounded(); let (chain_selection_unbounded_tx, _) = metered::unbounded(); let (pvf_checker_unbounded_tx, _) = metered::unbounded(); + let (prospective_parachains_unbounded_tx, _) = metered::unbounded(); let channels_out = ChannelsOut { candidate_validation: candidate_validation_bounded_tx.clone(), @@ -1098,6 +1110,7 @@ fn context_holds_onto_message_until_enough_signals_received() { dispute_distribution: dispute_distribution_bounded_tx.clone(), chain_selection: chain_selection_bounded_tx.clone(), pvf_checker: pvf_checker_bounded_tx.clone(), + prospective_parachains: prospective_parachains_bounded_tx.clone(), candidate_validation_unbounded: candidate_validation_unbounded_tx.clone(), candidate_backing_unbounded: candidate_backing_unbounded_tx.clone(), @@ -1120,6 +1133,7 @@ fn context_holds_onto_message_until_enough_signals_received() { dispute_distribution_unbounded: dispute_distribution_unbounded_tx.clone(), chain_selection_unbounded: chain_selection_unbounded_tx.clone(), pvf_checker_unbounded: pvf_checker_unbounded_tx.clone(), + prospective_parachains_unbounded: prospective_parachains_unbounded_tx.clone(), }; let (mut signal_tx, signal_rx) = metered::channel(CHANNEL_CAPACITY); diff --git a/node/service/src/overseer.rs b/node/service/src/overseer.rs index bb3d9e840f1c..e0dce76b9393 100644 --- a/node/service/src/overseer.rs +++ b/node/service/src/overseer.rs @@ -178,6 +178,7 @@ pub fn prepared_overseer_builder<'a, Spawner, RuntimeClient>( DisputeCoordinatorSubsystem, DisputeDistributionSubsystem, ChainSelectionSubsystem, + polkadot_overseer::DummySubsystem, // TODO [now]: use real prospective parachains >, Error, > @@ -291,6 +292,7 @@ where Metrics::register(registry)?, )) .chain_selection(ChainSelectionSubsystem::new(chain_selection_config, parachains_db)) + .prospective_parachains(polkadot_overseer::DummySubsystem) .leaves(Vec::from_iter( leaves .into_iter() diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index db74ab11cd4d..db2bd89286b7 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -38,14 +38,17 @@ use polkadot_node_primitives::{ CollationSecondedSignal, DisputeMessage, ErasureChunk, PoV, SignedDisputeStatement, SignedFullStatement, ValidationResult, }; -use polkadot_primitives::v2::{ - AuthorityDiscoveryId, BackedCandidate, BlockNumber, CandidateEvent, CandidateHash, - CandidateIndex, CandidateReceipt, CollatorId, CommittedCandidateReceipt, CoreState, - DisputeState, GroupIndex, GroupRotationInfo, Hash, Header as BlockHeader, Id as ParaId, - InboundDownwardMessage, InboundHrmpMessage, MultiDisputeStatementSet, OccupiedCoreAssumption, - PersistedValidationData, PvfCheckStatement, SessionIndex, SessionInfo, - SignedAvailabilityBitfield, SignedAvailabilityBitfields, ValidationCode, ValidationCodeHash, - ValidatorId, ValidatorIndex, ValidatorSignature, +use polkadot_primitives::{ + v2::{ + AuthorityDiscoveryId, BackedCandidate, BlockNumber, CandidateEvent, CandidateHash, + CandidateIndex, CandidateReceipt, CollatorId, CommittedCandidateReceipt, CoreState, + DisputeState, GroupIndex, GroupRotationInfo, Hash, Header as BlockHeader, Id as ParaId, + InboundDownwardMessage, InboundHrmpMessage, MultiDisputeStatementSet, + OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, SessionIndex, + SessionInfo, SignedAvailabilityBitfield, SignedAvailabilityBitfields, ValidationCode, + ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, + }, + vstaging as vstaging_primitives, }; use polkadot_statement_table::v2::Misbehavior; use std::{ @@ -697,6 +700,9 @@ pub enum RuntimeApiRequest { StagingDisputes( RuntimeApiSender)>>, ), + /// Get the validity constraints of the given para. + /// This is a staging API that will not be available on production runtimes. + StagingValidityConstraints(ParaId, RuntimeApiSender>), } /// A message to the Runtime API subsystem. @@ -933,3 +939,64 @@ pub enum GossipSupportMessage { /// Currently non-instantiable. #[derive(Debug)] pub enum PvfCheckerMessage {} + +/// A request for the depths a hypothetical candidate would occupy within +/// some fragment tree. +#[derive(Debug)] +pub struct HypotheticalDepthRequest { + /// The hash of the potential candidate. + pub candidate_hash: CandidateHash, + /// The para of the candidate. + pub candidate_para: ParaId, + /// The hash of the parent head-data of the candidate. + pub parent_head_data_hash: Hash, + /// The relay-parent of the candidate. + pub candidate_relay_parent: Hash, + /// The relay-parent of the fragment tree we are comparing to. + pub fragment_tree_relay_parent: Hash, +} + +/// Indicates the relay-parents whose fragment tree a candidate +/// is present in and the depths of that tree the candidate is present in. +pub type FragmentTreeMembership = Vec<(Hash, Vec)>; + +/// Messages sent to the Prospective Parachains subsystem. +#[derive(Debug)] +pub enum ProspectiveParachainsMessage { + /// Inform the Prospective Parachains Subsystem of a new candidate. + /// + /// The response sender accepts the candidate membership, which is empty + /// if the candidate was already known. + CandidateSeconded( + ParaId, + CommittedCandidateReceipt, + PersistedValidationData, + oneshot::Sender, + ), + /// Inform the Prospective Parachains Subsystem that a previously seconded candidate + /// has been backed. This requires that `CandidateSeconded` was sent for the candidate + /// some time in the past. + CandidateBacked(ParaId, CandidateHash), + /// Get a backable candidate hash for the given parachain, under the given relay-parent hash, + /// which is a descendant of the given candidate hashes. Returns `None` on the channel + /// if no such candidate exists. + GetBackableCandidate(Hash, ParaId, Vec, oneshot::Sender>), + /// Get the hypothetical depths that a candidate with the given properties would + /// occupy in the fragment tree for the given relay-parent. + /// + /// If the candidate is already known, this returns the depths the candidate + /// occupies. + /// + /// Returns an empty vector either if there is no such depth or the fragment tree relay-parent + /// is unknown. + GetHypotheticalDepth(HypotheticalDepthRequest, oneshot::Sender>), + /// Get the membership of the candidate in all fragment trees. + GetTreeMembership(ParaId, CandidateHash, oneshot::Sender), + /// Get the minimum accepted relay-parent number in the fragment tree + /// for the given relay-parent and para-id. + /// + /// That is, if the relay-parent is known and there's a fragment tree for it, + /// in this para-id, this returns the minimum relay-parent block number in the + /// same chain which is accepted in the fragment tree for the para-id. + GetMinimumRelayParent(ParaId, Hash, oneshot::Sender>), +} diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index e886a9a0ff22..60eecb9b5180 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -114,8 +114,9 @@ //! in practice at most once every few weeks. use polkadot_primitives::vstaging::{ - BlockNumber, CandidateCommitments, CollatorId, CollatorSignature, Hash, HeadData, Id as ParaId, - PersistedValidationData, UpgradeRestriction, ValidationCodeHash, + BlockNumber, CandidateCommitments, CollatorId, CollatorSignature, + Constraints as PrimitiveConstraints, Hash, HeadData, Id as ParaId, PersistedValidationData, + UpgradeRestriction, ValidationCodeHash, }; use std::collections::HashMap; @@ -169,6 +170,40 @@ pub struct Constraints { pub future_validation_code: Option<(BlockNumber, ValidationCodeHash)>, } +impl From for Constraints { + fn from(c: PrimitiveConstraints) -> Self { + Constraints { + min_relay_parent_number: c.min_relay_parent_number, + max_pov_size: c.max_pov_size as _, + max_code_size: c.max_code_size as _, + ump_remaining: c.ump_remaining as _, + ump_remaining_bytes: c.ump_remaining_bytes as _, + dmp_remaining_messages: c.dmp_remaining_messages as _, + hrmp_inbound: InboundHrmpLimitations { + valid_watermarks: c.hrmp_inbound.valid_watermarks, + }, + hrmp_channels_out: c + .hrmp_channels_out + .into_iter() + .map(|(para_id, limits)| { + ( + para_id, + OutboundHrmpChannelLimitations { + bytes_remaining: limits.bytes_remaining as _, + messages_remaining: limits.messages_remaining as _, + }, + ) + }) + .collect(), + max_hrmp_num_per_candidate: c.max_hrmp_num_per_candidate as _, + required_parent: c.required_parent, + validation_code_hash: c.validation_code_hash, + upgrade_restriction: c.upgrade_restriction, + future_validation_code: c.future_validation_code, + } + } +} + /// Kinds of errors that can occur when modifying constraints. #[derive(Debug, Clone, PartialEq)] pub enum ModificationError { @@ -225,7 +260,8 @@ impl Constraints { &self, modifications: &ConstraintModifications, ) -> Result<(), ModificationError> { - if let Some(hrmp_watermark) = modifications.hrmp_watermark { + if let Some(HrmpWatermarkUpdate::Trunk(hrmp_watermark)) = modifications.hrmp_watermark { + // head updates are always valid. if self .hrmp_inbound .valid_watermarks @@ -300,12 +336,22 @@ impl Constraints { new.required_parent = required_parent.clone(); } - if let Some(hrmp_watermark) = modifications.hrmp_watermark { - match new.hrmp_inbound.valid_watermarks.iter().position(|w| w == &hrmp_watermark) { - Some(pos) => { + if let Some(ref hrmp_watermark) = modifications.hrmp_watermark { + match new.hrmp_inbound.valid_watermarks.binary_search(&hrmp_watermark.watermark()) { + Ok(pos) => { + // Exact match, so this is OK in all cases. let _ = new.hrmp_inbound.valid_watermarks.drain(..pos + 1); }, - None => return Err(ModificationError::DisallowedHrmpWatermark(hrmp_watermark)), + Err(pos) => match hrmp_watermark { + HrmpWatermarkUpdate::Head(_) => { + // Updates to Head are always OK. + let _ = new.hrmp_inbound.valid_watermarks.drain(..pos); + }, + HrmpWatermarkUpdate::Trunk(n) => { + // Trunk update landing on disallowed watermark is not OK. + return Err(ModificationError::DisallowedHrmpWatermark(*n)) + }, + }, } } @@ -388,13 +434,33 @@ pub struct OutboundHrmpChannelModification { pub messages_submitted: usize, } +/// An update to the HRMP Watermark. +#[derive(Debug, Clone, PartialEq)] +pub enum HrmpWatermarkUpdate { + /// This is an update placing the watermark at the head of the chain, + /// which is always legal. + Head(BlockNumber), + /// This is an update placing the watermark behind the head of the + /// chain, which is only legal if it lands on a block where messages + /// were queued. + Trunk(BlockNumber), +} + +impl HrmpWatermarkUpdate { + fn watermark(&self) -> BlockNumber { + match *self { + HrmpWatermarkUpdate::Head(n) | HrmpWatermarkUpdate::Trunk(n) => n, + } + } +} + /// Modifications to constraints as a result of prospective candidates. #[derive(Debug, Clone, PartialEq)] pub struct ConstraintModifications { /// The required parent head to build upon. pub required_parent: Option, /// The new HRMP watermark - pub hrmp_watermark: Option, + pub hrmp_watermark: Option, /// Outbound HRMP channel modifications. pub outbound_hrmp: HashMap, /// The amount of UMP messages sent. @@ -546,7 +612,13 @@ impl Fragment { let commitments = &candidate.commitments; ConstraintModifications { required_parent: Some(commitments.head_data.clone()), - hrmp_watermark: Some(commitments.hrmp_watermark), + hrmp_watermark: Some({ + if commitments.hrmp_watermark == relay_parent.number { + HrmpWatermarkUpdate::Head(commitments.hrmp_watermark) + } else { + HrmpWatermarkUpdate::Trunk(commitments.hrmp_watermark) + } + }), outbound_hrmp: { let mut outbound_hrmp = HashMap::<_, OutboundHrmpChannelModification>::new(); @@ -843,10 +915,10 @@ mod tests { } #[test] - fn constraints_disallowed_watermark() { + fn constraints_disallowed_trunk_watermark() { let constraints = make_constraints(); let mut modifications = ConstraintModifications::identity(); - modifications.hrmp_watermark = Some(7); + modifications.hrmp_watermark = Some(HrmpWatermarkUpdate::Trunk(7)); assert_eq!( constraints.check_modifications(&modifications), @@ -859,6 +931,18 @@ mod tests { ); } + #[test] + fn constraints_always_allow_head_watermark() { + let constraints = make_constraints(); + let mut modifications = ConstraintModifications::identity(); + modifications.hrmp_watermark = Some(HrmpWatermarkUpdate::Head(7)); + + assert!(constraints.check_modifications(&modifications).is_ok()); + + let new_constraints = constraints.apply_modifications(&modifications).unwrap(); + assert_eq!(new_constraints.hrmp_inbound.valid_watermarks, vec![8]); + } + #[test] fn constraints_no_such_hrmp_channel() { let constraints = make_constraints(); diff --git a/primitives/src/runtime_api.rs b/primitives/src/runtime_api.rs index 84d2cf0ec4ca..fe695336eb82 100644 --- a/primitives/src/runtime_api.rs +++ b/primitives/src/runtime_api.rs @@ -44,7 +44,7 @@ //! For more details about how the API versioning works refer to `spi_api` //! documentation [here](https://docs.substrate.io/rustdocs/latest/sp_api/macro.decl_runtime_apis.html). -use crate::v2; +use crate::{v2, vstaging}; use parity_scale_codec::{Decode, Encode}; use polkadot_core_primitives as pcp; use polkadot_parachain::primitives as ppp; @@ -155,5 +155,9 @@ sp_api::decl_runtime_apis! { /// Returns all onchain disputes. /// This is a staging method! Do not use on production runtimes! fn staging_get_disputes() -> Vec<(v2::SessionIndex, v2::CandidateHash, v2::DisputeState)>; + + /// Returns the base constraints of the given para, if they exist. + /// This is a staging method! Do not use on production runtimes! + fn staging_validity_constraints(_: ppp::Id) -> Option; } } diff --git a/primitives/src/v2/mod.rs b/primitives/src/v2/mod.rs index 649ffd2f375e..0a9de44480f4 100644 --- a/primitives/src/v2/mod.rs +++ b/primitives/src/v2/mod.rs @@ -1129,6 +1129,7 @@ pub struct AbridgedHrmpChannel { /// A possible upgrade restriction that prevents a parachain from performing an upgrade. #[derive(Copy, Clone, Encode, Decode, PartialEq, RuntimeDebug, TypeInfo)] +#[cfg_attr(feature = "std", derive(MallocSizeOf))] pub enum UpgradeRestriction { /// There is an upgrade restriction and there are no details about its specifics nor how long /// it could last. diff --git a/primitives/src/vstaging/mod.rs b/primitives/src/vstaging/mod.rs index c6dd4d1bb76a..87cf8c8ba85c 100644 --- a/primitives/src/vstaging/mod.rs +++ b/primitives/src/vstaging/mod.rs @@ -18,3 +18,67 @@ // Put any primitives used by staging API functions here pub use crate::v2::*; +use sp_std::prelude::*; + +use parity_scale_codec::{Decode, Encode}; +use primitives::RuntimeDebug; +use scale_info::TypeInfo; + +#[cfg(feature = "std")] +use parity_util_mem::MallocSizeOf; + +/// Useful type alias for Para IDs. +pub type ParaId = Id; + +/// Constraints on inbound HRMP channels. +#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +#[cfg_attr(feature = "std", derive(MallocSizeOf))] +pub struct InboundHrmpLimitations { + /// An exhaustive set of all valid watermarks, sorted ascending + pub valid_watermarks: Vec, +} + +/// Constraints on outbound HRMP channels. +#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +#[cfg_attr(feature = "std", derive(MallocSizeOf))] +pub struct OutboundHrmpChannelLimitations { + /// The maximum bytes that can be written to the channel. + pub bytes_remaining: u32, + /// The maximum messages that can be written to the channel. + pub messages_remaining: u32, +} + +/// Constraints on the actions that can be taken by a new parachain +/// block. These limitations are implicitly associated with some particular +/// parachain, which should be apparent from usage. +#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +#[cfg_attr(feature = "std", derive(MallocSizeOf))] +pub struct Constraints { + /// The minimum relay-parent number accepted under these constraints. + pub min_relay_parent_number: BlockNumber, + /// The maximum Proof-of-Validity size allowed, in bytes. + pub max_pov_size: u32, + /// The maximum new validation code size allowed, in bytes. + pub max_code_size: u32, + /// The amount of UMP messages remaining. + pub ump_remaining: u32, + /// The amount of UMP bytes remaining. + pub ump_remaining_bytes: u32, + /// The amount of remaining DMP messages. + pub dmp_remaining_messages: u32, + /// The limitations of all registered inbound HRMP channels. + pub hrmp_inbound: InboundHrmpLimitations, + /// The limitations of all registered outbound HRMP channels. + pub hrmp_channels_out: Vec<(ParaId, OutboundHrmpChannelLimitations)>, + /// The maximum number of HRMP messages allowed per candidate. + pub max_hrmp_num_per_candidate: u32, + /// The required parent head-data of the parachain. + pub required_parent: HeadData, + /// The expected validation-code-hash of this parachain. + pub validation_code_hash: ValidationCodeHash, + /// The code upgrade restriction signal as-of this parachain. + pub upgrade_restriction: Option, + /// The future validation code hash, if any, and at what relay-parent + /// number the upgrade would be minimally applied. + pub future_validation_code: Option<(BlockNumber, ValidationCodeHash)>, +} diff --git a/runtime/kusama/src/lib.rs b/runtime/kusama/src/lib.rs index 935863d683ad..35247456ecea 100644 --- a/runtime/kusama/src/lib.rs +++ b/runtime/kusama/src/lib.rs @@ -1849,6 +1849,10 @@ sp_api::impl_runtime_apis! { fn staging_get_disputes() -> Vec<(SessionIndex, CandidateHash, DisputeState)> { unimplemented!() } + + fn staging_validity_constraints(_: ParaId) -> Option { + unimplemented!("Staging API not implemented"); + } } impl beefy_primitives::BeefyApi for Runtime { diff --git a/runtime/polkadot/src/lib.rs b/runtime/polkadot/src/lib.rs index 78d2862e91bd..6c6e854839e0 100644 --- a/runtime/polkadot/src/lib.rs +++ b/runtime/polkadot/src/lib.rs @@ -1730,6 +1730,10 @@ sp_api::impl_runtime_apis! { fn staging_get_disputes() -> Vec<(SessionIndex, CandidateHash, DisputeState)> { unimplemented!() } + + fn staging_validity_constraints(_: ParaId) -> Option { + unimplemented!("Staging API not implemented"); + } } impl beefy_primitives::BeefyApi for Runtime { diff --git a/runtime/rococo/src/lib.rs b/runtime/rococo/src/lib.rs index 2b98e31dd497..9ae752fc6388 100644 --- a/runtime/rococo/src/lib.rs +++ b/runtime/rococo/src/lib.rs @@ -1261,6 +1261,10 @@ sp_api::impl_runtime_apis! { fn staging_get_disputes() -> Vec<(SessionIndex, CandidateHash, DisputeState)> { unimplemented!() } + + fn staging_validity_constraints(_: ParaId) -> Option { + unimplemented!("Staging API not implemented"); + } } impl fg_primitives::GrandpaApi for Runtime { diff --git a/runtime/test-runtime/src/lib.rs b/runtime/test-runtime/src/lib.rs index 783c1801a8e9..9311e90e7122 100644 --- a/runtime/test-runtime/src/lib.rs +++ b/runtime/test-runtime/src/lib.rs @@ -908,6 +908,10 @@ sp_api::impl_runtime_apis! { fn staging_get_disputes() -> Vec<(SessionIndex, CandidateHash, DisputeState)> { polkadot_runtime_parachains::runtime_api_impl::vstaging::get_session_disputes::() } + + fn staging_validity_constraints(_: ParaId) -> Option { + unimplemented!("Staging API not implemented"); + } } impl beefy_primitives::BeefyApi for Runtime { diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs index cd266a91667a..aff9c6b4b242 100644 --- a/runtime/westend/src/lib.rs +++ b/runtime/westend/src/lib.rs @@ -1375,6 +1375,10 @@ sp_api::impl_runtime_apis! { fn staging_get_disputes() -> Vec<(SessionIndex, CandidateHash, DisputeState)> { runtime_parachains::runtime_api_impl::vstaging::get_session_disputes::() } + + fn staging_validity_constraints(_: ParaId) -> Option { + unimplemented!("Staging API not implemented"); + } } impl beefy_primitives::BeefyApi for Runtime { From 88be44519f8091d4e0dde991abdb68081aa7bdfe Mon Sep 17 00:00:00 2001 From: asynchronous rob Date: Sun, 10 Jul 2022 04:20:11 +0200 Subject: [PATCH 04/76] Integrate prospective parachains subsystem into backing: Part 1 (#5557) * BEGIN ASYNC candidate-backing CHANGES * rename & document modes * answer prospective validation data requests * GetMinimumRelayParents request is now plural * implement an implicit view utility for backing subsystems * implicit-view: get allowed relay parents * refactorings and improvements to implicit view * add some TODOs for tests * split implicit view updates into 2 functions * backing: define State to prepare for functional refactor * add some docs * backing: implement bones of new leaf activation logic * backing: create per-relay-parent-states * use new handle_active_leaves_update * begin extracting logic from CandidateBackingJob * mostly extract statement import from job logic * handle statement imports outside of job logic * do some TODO planning for prospective parachains integration * finish rewriting backing subsystem in functional style * add prospective parachains mode to relay parent entries * fmt * add a RejectedByProspectiveParachains error * notify prospective parachains of seconded and backed candidates * always validate candidates exhaustively in backing. * return persisted_validation_data from validation * handle rejections by prospective parachains * implement seconding sanity check * invoke validate_and_second * Alter statement table to allow multiple seconded messages per validator * refactor backing to have statements carry PVD * clean up all warnings * Add tests for implicit view * Improve doc comments * Prospective parachains mode based on Runtime API version * Add a TODO * Rework seconding_sanity_check * Iterate over responses * Update backing tests * collator-protocol: load PVD from runtime * Fix validator side tests * Update statement-distribution to fetch PVD * Fix statement-distribution tests * Backing tests with prospective paras #1 * fix per_relay_parent pruning in backing * Test multiple leaves * Test seconding sanity check * Import statement order Before creating an entry in `PerCandidateState` map wait for the approval from the prospective parachains * Add a test for correct state updates * Second multiple candidates per relay parent test * Add backing tests with prospective paras * Second more than one test without prospective paras * Add a test for prospective para blocks * Update malus * typos Co-authored-by: Chris Sosnin --- node/core/backing/src/error.rs | 23 +- node/core/backing/src/lib.rs | 2252 +++++++++++------ .../backing/src/{tests.rs => tests/mod.rs} | 657 ++++- .../src/tests/prospective_parachains.rs | 1352 ++++++++++ .../src/fragment_tree.rs | 21 +- node/core/prospective-parachains/src/lib.rs | 86 +- .../src/variants/suggest_garbage_candidate.rs | 47 +- .../src/validator_side/mod.rs | 60 +- .../src/validator_side/tests.rs | 48 +- .../statement-distribution/src/error.rs | 12 +- .../network/statement-distribution/src/lib.rs | 90 +- .../statement-distribution/src/tests.rs | 91 +- node/overseer/src/lib.rs | 2 + node/primitives/src/disputes/mod.rs | 26 +- node/primitives/src/lib.rs | 77 + node/subsystem-types/src/messages.rs | 55 +- .../src/backing_implicit_view.rs | 687 +++++ node/subsystem-util/src/lib.rs | 4 + primitives/src/v2/signed.rs | 29 +- .../src/runtime_api_impl/vstaging.rs | 4 + statement-table/src/generic.rs | 95 +- statement-table/src/lib.rs | 2 +- 22 files changed, 4666 insertions(+), 1054 deletions(-) rename node/core/backing/src/{tests.rs => tests/mod.rs} (71%) create mode 100644 node/core/backing/src/tests/prospective_parachains.rs create mode 100644 node/subsystem-util/src/backing_implicit_view.rs diff --git a/node/core/backing/src/error.rs b/node/core/backing/src/error.rs index 604c6c0a0c37..13d33d852f60 100644 --- a/node/core/backing/src/error.rs +++ b/node/core/backing/src/error.rs @@ -17,9 +17,9 @@ use fatality::Nested; use futures::channel::{mpsc, oneshot}; -use polkadot_node_subsystem::{messages::ValidationFailed, SubsystemError}; +use polkadot_node_subsystem::{messages::ValidationFailed, RuntimeApiError, SubsystemError}; use polkadot_node_subsystem_util::Error as UtilError; -use polkadot_primitives::v2::BackedCandidate; +use polkadot_primitives::v2::{BackedCandidate, ValidationCodeHash}; use crate::LOG_TARGET; @@ -42,16 +42,31 @@ pub enum Error { #[error("FetchPoV failed")] FetchPoV, + #[error("Fetching validation code by hash failed {0:?}, {1:?}")] + FetchValidationCode(ValidationCodeHash, RuntimeApiError), + + #[error("Fetching Runtime API version failed {0:?}")] + FetchRuntimeApiVersion(RuntimeApiError), + + #[error("No validation code {0:?}")] + NoValidationCode(ValidationCodeHash), + + #[error("Candidate rejected by prospective parachains subsystem")] + RejectedByProspectiveParachains, + #[fatal] #[error("Failed to spawn background task")] FailedToSpawnBackgroundTask, - #[error("ValidateFromChainState channel closed before receipt")] - ValidateFromChainState(#[source] oneshot::Canceled), + #[error("ValidateFromExhaustive channel closed before receipt")] + ValidateFromExhaustive(#[source] oneshot::Canceled), #[error("StoreAvailableData channel closed before receipt")] StoreAvailableData(#[source] oneshot::Canceled), + #[error("RuntimeAPISubsystem channel closed before receipt")] + RuntimeApiUnavailable(#[source] oneshot::Canceled), + #[error("a channel was closed before receipt in try_join!")] JoinMultiple(#[source] oneshot::Canceled), diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index a189b5955c89..9d5f521da1f8 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -14,44 +14,95 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Implements a `CandidateBackingSubsystem`. +//! Implements the `CandidateBackingSubsystem`. +//! +//! This subsystem maintains the entire responsibility of tracking parachain +//! candidates which can be backed, as well as the issuance of statements +//! about candidates when run on a validator node. +//! +//! There are two types of statements: `Seconded` and `Valid`. +//! `Seconded` implies `Valid`, and nothing should be stated as +//! `Valid` unless its already been `Seconded`. +//! +//! Validators may only second candidates which fall under their own group +//! assignment, and they may only second one candidate per depth per active leaf. +//! Candidates which are stated as either `Second` or `Valid` by a majority of the +//! assigned group of validators may be backed on-chain and proceed to the availability +//! stage. +//! +//! Depth is a concept relating to asynchronous backing, by which validators +//! short sub-chains of candidates are backed and extended off-chain, and then placed +//! asynchronously into blocks of the relay chain as those are authored and as the +//! relay-chain state becomes ready for them. Asynchronous backing allows parachains to +//! grow mostly independently from the state of the relay chain, which gives more time for +//! parachains to be validated and thereby increases performance. +//! +//! Most of the work of asynchronous backing is handled by the Prospective Parachains +//! subsystem. The 'depth' of a parachain block with respect to a relay chain block is +//! a measure of how many parachain blocks are between the most recent included parachain block +//! in the post-state of the relay-chain block and the candidate. For instance, +//! a candidate that descends directly from the most recent parachain block in the relay-chain +//! state has depth 0. The child of that candidate would have depth 1. And so on. +//! +//! The candidate backing subsystem keeps track of a set of 'active leaves' which are the +//! most recent blocks in the relay-chain (which is in fact a tree) which could be built +//! upon. Depth is always measured against active leaves, and the valid relay-parent that +//! each candidate can have is determined by the active leaves. The Prospective Parachains +//! subsystem enforces that the relay-parent increases monotonically, so that logic +//! is not handled here. By communicating with the Prospective Parachains subsystem, +//! this subsystem extrapolates an "implicit view" from the set of currently active leaves, +//! which determines the set of all recent relay-chain block hashes which could be relay-parents +//! for candidates backed in children of the active leaves. +//! +//! In fact, this subsystem relies on the Statement Distribution subsystem to prevent spam +//! by enforcing the rule that each validator may second at most one candidate per depth per +//! active leaf. This bounds the number of candidates that the system needs to consider and +//! is not handled within this subsystem, except for candidates seconded locally. +//! +//! This subsystem also handles relay-chain heads which don't support asynchronous backing. +//! For such active leaves, the only valid relay-parent is the leaf hash itself and the only +//! allowed depth is 0. #![deny(unused_crate_dependencies)] use std::{ - collections::{HashMap, HashSet}, + collections::{BTreeMap, HashMap, HashSet}, sync::Arc, }; use bitvec::vec::BitVec; use futures::{ channel::{mpsc, oneshot}, - FutureExt, SinkExt, StreamExt, + future::BoxFuture, + stream::FuturesOrdered, + FutureExt, SinkExt, StreamExt, TryFutureExt, }; use error::{Error, FatalResult}; use polkadot_node_primitives::{ - AvailableData, InvalidCandidate, PoV, SignedDisputeStatement, SignedFullStatement, Statement, - ValidationResult, BACKING_EXECUTION_TIMEOUT, + AvailableData, InvalidCandidate, PoV, SignedDisputeStatement, SignedFullStatementWithPVD, + StatementWithPVD, ValidationResult, BACKING_EXECUTION_TIMEOUT, }; use polkadot_node_subsystem::{ - jaeger, messages::{ AvailabilityDistributionMessage, AvailabilityStoreMessage, CandidateBackingMessage, CandidateValidationMessage, CollatorProtocolMessage, DisputeCoordinatorMessage, - ProvisionableData, ProvisionerMessage, RuntimeApiRequest, StatementDistributionMessage, + HypotheticalDepthRequest, ProspectiveParachainsMessage, ProvisionableData, + ProvisionerMessage, RuntimeApiMessage, RuntimeApiRequest, StatementDistributionMessage, }, - overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, PerLeafSpan, SpawnedSubsystem, - Stage, SubsystemError, + overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; use polkadot_node_subsystem_util::{ - self as util, request_from_runtime, request_session_index_for_child, request_validator_groups, + self as util, + backing_implicit_view::{FetchError as ImplicitViewFetchError, View as ImplicitView}, + request_from_runtime, request_session_index_for_child, request_validator_groups, request_validators, Validator, }; use polkadot_primitives::v2::{ - BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, CollatorId, - CommittedCandidateReceipt, CoreIndex, CoreState, Hash, Id as ParaId, SessionIndex, - SigningContext, ValidatorId, ValidatorIndex, ValidatorSignature, ValidityAttestation, + BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, + CommittedCandidateReceipt, CoreIndex, CoreState, Hash, Id as ParaId, PersistedValidationData, + SessionIndex, SigningContext, ValidationCode, ValidatorId, ValidatorIndex, ValidatorSignature, + ValidityAttestation, }; use sp_keystore::SyncCryptoStorePtr; use statement_table::{ @@ -60,7 +111,7 @@ use statement_table::{ SignedStatement as TableSignedStatement, Statement as TableStatement, Summary as TableSummary, }, - Context as TableContextTrait, Table, + Config as TableConfig, Context as TableContextTrait, Table, }; mod error; @@ -108,9 +159,9 @@ impl std::fmt::Debug for ValidatedCandidateCommand { impl ValidatedCandidateCommand { fn candidate_hash(&self) -> CandidateHash { match *self { - ValidatedCandidateCommand::Second(Ok((ref candidate, _, _))) => candidate.hash(), + ValidatedCandidateCommand::Second(Ok(ref outputs)) => outputs.candidate.hash(), ValidatedCandidateCommand::Second(Err(ref candidate)) => candidate.hash(), - ValidatedCandidateCommand::Attest(Ok((ref candidate, _, _))) => candidate.hash(), + ValidatedCandidateCommand::Attest(Ok(ref outputs)) => outputs.candidate.hash(), ValidatedCandidateCommand::Attest(Err(ref candidate)) => candidate.hash(), ValidatedCandidateCommand::AttestNoPoV(candidate_hash) => candidate_hash, } @@ -147,6 +198,113 @@ where } } +struct PerRelayParentState { + prospective_parachains_mode: ProspectiveParachainsMode, + /// The hash of the relay parent on top of which this job is doing it's work. + parent: Hash, + /// The session index this corresponds to. + session_index: SessionIndex, + /// The `ParaId` assigned to the local validator at this relay parent. + assignment: Option, + /// The candidates that are backed by enough validators in their group, by hash. + backed: HashSet, + /// The table of candidates and statements under this relay-parent. + table: Table, + /// The table context, including groups. + table_context: TableContext, + /// We issued `Seconded` or `Valid` statements on about these candidates. + issued_statements: HashSet, + /// These candidates are undergoing validation in the background. + awaiting_validation: HashSet, + /// Data needed for retrying in case of `ValidatedCandidateCommand::AttestNoPoV`. + fallbacks: HashMap, +} + +struct PerCandidateState { + persisted_validation_data: PersistedValidationData, + seconded_locally: bool, + para_id: ParaId, + relay_parent: Hash, +} + +#[derive(Debug, Clone, Copy, PartialEq)] +enum ProspectiveParachainsMode { + // v2 runtime API: no prospective parachains. + Disabled, + // vstaging runtime API: prospective parachains. + Enabled, +} + +impl ProspectiveParachainsMode { + fn is_enabled(&self) -> bool { + self == &ProspectiveParachainsMode::Enabled + } +} + +struct ActiveLeafState { + prospective_parachains_mode: ProspectiveParachainsMode, + /// The candidates seconded at various depths under this active + /// leaf. A candidate can only be seconded when its hypothetical + /// depth under every active leaf has an empty entry in this map. + /// + /// When prospective parachains are disabled, the only depth + /// which is allowed is 0. + seconded_at_depth: BTreeMap, +} + +/// The state of the subsystem. +struct State { + /// The utility for managing the implicit and explicit views in a consistent way. + /// + /// We only feed leaves which have prospective parachains enabled to this view. + implicit_view: ImplicitView, + /// State tracked for all active leaves, whether or not they have prospective parachains + /// enabled. + per_leaf: HashMap, + /// State tracked for all relay-parents backing work is ongoing for. This includes + /// all active leaves. + /// + /// relay-parents fall into one of 3 categories. + /// 1. active leaves which do support prospective parachains + /// 2. active leaves which do not support prospective parachains + /// 3. relay-chain blocks which are ancestors of an active leaf and + /// do support prospective parachains. + /// + /// Relay-chain blocks which don't support prospective parachains are + /// never included in the fragment trees of active leaves which do. + /// + /// While it would be technically possible to support such leaves in + /// fragment trees, it only benefits the transition period when asynchronous + /// backing is being enabled and complicates code complexity. + per_relay_parent: HashMap, + /// State tracked for all candidates relevant to the implicit view. + /// + /// This is guaranteed to have an entry for each candidate with a relay parent in the implicit + /// or explicit view for which a `Seconded` statement has been successfully imported. + per_candidate: HashMap, + /// A cloneable sender which is dispatched to background candidate validation tasks to inform + /// the main task of the result. + background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, + /// The handle to the keystore used for signing. + keystore: SyncCryptoStorePtr, +} + +impl State { + fn new( + background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, + keystore: SyncCryptoStorePtr, + ) -> Self { + State { + implicit_view: ImplicitView::default(), + per_leaf: HashMap::default(), + per_relay_parent: HashMap::default(), + per_candidate: HashMap::new(), + background_validation_tx, + keystore, + } + } +} + #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn run( mut ctx: Context, @@ -154,18 +312,11 @@ async fn run( metrics: Metrics, ) -> FatalResult<()> { let (background_validation_tx, mut background_validation_rx) = mpsc::channel(16); - let mut jobs = HashMap::new(); + let mut state = State::new(background_validation_tx, keystore); loop { - let res = run_iteration( - &mut ctx, - keystore.clone(), - &metrics, - &mut jobs, - background_validation_tx.clone(), - &mut background_validation_rx, - ) - .await; + let res = + run_iteration(&mut ctx, &mut state, &metrics, &mut background_validation_rx).await; match res { Ok(()) => break, @@ -179,10 +330,8 @@ async fn run( #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn run_iteration( ctx: &mut Context, - keystore: SyncCryptoStorePtr, + state: &mut State, metrics: &Metrics, - jobs: &mut HashMap>, - background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, background_validation_rx: &mut mpsc::Receiver<(Hash, ValidatedCandidateCommand)>, ) -> Result<(), Error> { loop { @@ -191,9 +340,10 @@ async fn run_iteration( if let Some((relay_parent, command)) = validated_command { handle_validated_candidate_command( &mut *ctx, - jobs, + state, relay_parent, command, + metrics, ).await?; } else { panic!("background_validation_tx always alive at this point; qed"); @@ -201,243 +351,24 @@ async fn run_iteration( } from_overseer = ctx.recv().fuse() => { match from_overseer? { - FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => handle_active_leaves_update( - &mut *ctx, - update, - jobs, - &keystore, - &background_validation_tx, - &metrics, - ).await?, + FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => { + handle_active_leaves_update( + &mut *ctx, + update, + state, + ).await?; + } FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => {} FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()), - FromOrchestra::Communication { msg } => handle_communication(&mut *ctx, jobs, msg).await?, + FromOrchestra::Communication { msg } => { + handle_communication(&mut *ctx, state, msg, metrics).await?; + } } } ) } } -#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] -async fn handle_validated_candidate_command( - ctx: &mut Context, - jobs: &mut HashMap>, - relay_parent: Hash, - command: ValidatedCandidateCommand, -) -> Result<(), Error> { - if let Some(job) = jobs.get_mut(&relay_parent) { - job.job.handle_validated_candidate_command(&job.span, ctx, command).await?; - } else { - // simple race condition; can be ignored - this relay-parent - // is no longer relevant. - } - - Ok(()) -} - -#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] -async fn handle_communication( - ctx: &mut Context, - jobs: &mut HashMap>, - message: CandidateBackingMessage, -) -> Result<(), Error> { - match message { - CandidateBackingMessage::Second(relay_parent, candidate, pov) => { - if let Some(job) = jobs.get_mut(&relay_parent) { - job.job.handle_second_msg(&job.span, ctx, candidate, pov).await?; - } - }, - CandidateBackingMessage::Statement(relay_parent, statement) => { - if let Some(job) = jobs.get_mut(&relay_parent) { - job.job.handle_statement_message(&job.span, ctx, statement).await?; - } - }, - CandidateBackingMessage::GetBackedCandidates(relay_parent, requested_candidates, tx) => - if let Some(job) = jobs.get_mut(&relay_parent) { - job.job.handle_get_backed_candidates_message(requested_candidates, tx)?; - }, - } - - Ok(()) -} - -#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] -async fn handle_active_leaves_update( - ctx: &mut Context, - update: ActiveLeavesUpdate, - jobs: &mut HashMap>, - keystore: &SyncCryptoStorePtr, - background_validation_tx: &mpsc::Sender<(Hash, ValidatedCandidateCommand)>, - metrics: &Metrics, -) -> Result<(), Error> { - for deactivated in update.deactivated { - jobs.remove(&deactivated); - } - - let leaf = match update.activated { - None => return Ok(()), - Some(a) => a, - }; - - macro_rules! try_runtime_api { - ($x: expr) => { - match $x { - Ok(x) => x, - Err(e) => { - gum::warn!( - target: LOG_TARGET, - err = ?e, - "Failed to fetch runtime API data for job", - ); - - // We can't do candidate validation work if we don't have the - // requisite runtime API data. But these errors should not take - // down the node. - return Ok(()); - } - } - } - } - - let parent = leaf.hash; - let span = PerLeafSpan::new(leaf.span, "backing"); - let _span = span.child("runtime-apis"); - - let (validators, groups, session_index, cores) = futures::try_join!( - request_validators(parent, ctx.sender()).await, - request_validator_groups(parent, ctx.sender()).await, - request_session_index_for_child(parent, ctx.sender()).await, - request_from_runtime(parent, ctx.sender(), |tx| { - RuntimeApiRequest::AvailabilityCores(tx) - },) - .await, - ) - .map_err(Error::JoinMultiple)?; - - let validators: Vec<_> = try_runtime_api!(validators); - let (validator_groups, group_rotation_info) = try_runtime_api!(groups); - let session_index = try_runtime_api!(session_index); - let cores = try_runtime_api!(cores); - - drop(_span); - let _span = span.child("validator-construction"); - - let signing_context = SigningContext { parent_hash: parent, session_index }; - let validator = - match Validator::construct(&validators, signing_context.clone(), keystore.clone()).await { - Ok(v) => Some(v), - Err(util::Error::NotAValidator) => None, - Err(e) => { - gum::warn!( - target: LOG_TARGET, - err = ?e, - "Cannot participate in candidate backing", - ); - - return Ok(()) - }, - }; - - drop(_span); - let mut assignments_span = span.child("compute-assignments"); - - let mut groups = HashMap::new(); - - let n_cores = cores.len(); - - let mut assignment = None; - - for (idx, core) in cores.into_iter().enumerate() { - // Ignore prospective assignments on occupied cores for the time being. - if let CoreState::Scheduled(scheduled) = core { - let core_index = CoreIndex(idx as _); - let group_index = group_rotation_info.group_for_core(core_index, n_cores); - if let Some(g) = validator_groups.get(group_index.0 as usize) { - if validator.as_ref().map_or(false, |v| g.contains(&v.index())) { - assignment = Some((scheduled.para_id, scheduled.collator)); - } - groups.insert(scheduled.para_id, g.clone()); - } - } - } - - let table_context = TableContext { groups, validators, validator }; - - let (assignment, required_collator) = match assignment { - None => { - assignments_span.add_string_tag("assigned", "false"); - (None, None) - }, - Some((assignment, required_collator)) => { - assignments_span.add_string_tag("assigned", "true"); - assignments_span.add_para_id(assignment); - (Some(assignment), required_collator) - }, - }; - - drop(assignments_span); - let _span = span.child("wait-for-job"); - - let job = CandidateBackingJob { - parent, - session_index, - assignment, - required_collator, - issued_statements: HashSet::new(), - awaiting_validation: HashSet::new(), - fallbacks: HashMap::new(), - seconded: None, - unbacked_candidates: HashMap::new(), - backed: HashSet::new(), - keystore: keystore.clone(), - table: Table::default(), - table_context, - background_validation_tx: background_validation_tx.clone(), - metrics: metrics.clone(), - _marker: std::marker::PhantomData, - }; - - jobs.insert(parent, JobAndSpan { job, span }); - - Ok(()) -} - -struct JobAndSpan { - job: CandidateBackingJob, - span: PerLeafSpan, -} - -/// Holds all data needed for candidate backing job operation. -struct CandidateBackingJob { - /// The hash of the relay parent on top of which this job is doing it's work. - parent: Hash, - /// The session index this corresponds to. - session_index: SessionIndex, - /// The `ParaId` assigned to this validator - assignment: Option, - /// The collator required to author the candidate, if any. - required_collator: Option, - /// Spans for all candidates that are not yet backable. - unbacked_candidates: HashMap, - /// We issued `Seconded`, `Valid` or `Invalid` statements on about these candidates. - issued_statements: HashSet, - /// These candidates are undergoing validation in the background. - awaiting_validation: HashSet, - /// Data needed for retrying in case of `ValidatedCandidateCommand::AttestNoPoV`. - fallbacks: HashMap)>, - /// `Some(h)` if this job has already issued `Seconded` statement for some candidate with `h` hash. - seconded: Option, - /// The candidates that are includable, by hash. Each entry here indicates - /// that we've sent the provisioner the backed candidate. - backed: HashSet, - keystore: SyncCryptoStorePtr, - table: Table, - table_context: TableContext, - background_validation_tx: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, - metrics: Metrics, - _marker: std::marker::PhantomData, -} - /// In case a backing validator does not provide a PoV, we need to retry with other backing /// validators. /// @@ -499,10 +430,10 @@ struct InvalidErasureRoot; // It looks like it's not possible to do an `impl From` given the current state of // the code. So this does the necessary conversion. -fn primitive_statement_to_table(s: &SignedFullStatement) -> TableSignedStatement { +fn primitive_statement_to_table(s: &SignedFullStatementWithPVD) -> TableSignedStatement { let statement = match s.payload() { - Statement::Seconded(c) => TableStatement::Seconded(c.clone()), - Statement::Valid(h) => TableStatement::Valid(h.clone()), + StatementWithPVD::Seconded(c, _) => TableStatement::Seconded(c.clone()), + StatementWithPVD::Valid(h) => TableStatement::Valid(h.clone()), }; TableSignedStatement { @@ -586,21 +517,17 @@ async fn store_available_data( // // This will compute the erasure root internally and compare it to the expected erasure root. // This returns `Err()` iff there is an internal error. Otherwise, it returns either `Ok(Ok(()))` or `Ok(Err(_))`. - async fn make_pov_available( sender: &mut impl overseer::CandidateBackingSenderTrait, n_validators: usize, pov: Arc, candidate_hash: CandidateHash, - validation_data: polkadot_primitives::v2::PersistedValidationData, + validation_data: PersistedValidationData, expected_erasure_root: Hash, - span: Option<&jaeger::Span>, ) -> Result, Error> { let available_data = AvailableData { pov, validation_data }; { - let _span = span.as_ref().map(|s| s.child("erasure-coding").with_candidate(candidate_hash)); - let chunks = erasure_coding::obtain_chunks_v1(n_validators, &available_data)?; let branches = erasure_coding::branches(chunks.as_ref()); @@ -612,8 +539,6 @@ async fn make_pov_available( } { - let _span = span.as_ref().map(|s| s.child("store-data").with_candidate(candidate_hash)); - store_available_data(sender, n_validators as u32, candidate_hash, available_data).await?; } @@ -644,13 +569,17 @@ async fn request_pov( async fn request_candidate_validation( sender: &mut impl overseer::CandidateBackingSenderTrait, + pvd: PersistedValidationData, + code: ValidationCode, candidate_receipt: CandidateReceipt, pov: Arc, ) -> Result { let (tx, rx) = oneshot::channel(); sender - .send_message(CandidateValidationMessage::ValidateFromChainState( + .send_message(CandidateValidationMessage::ValidateFromExhaustive( + pvd, + code, candidate_receipt, pov, BACKING_EXECUTION_TIMEOUT, @@ -661,21 +590,26 @@ async fn request_candidate_validation( match rx.await { Ok(Ok(validation_result)) => Ok(validation_result), Ok(Err(err)) => Err(Error::ValidationFailed(err)), - Err(err) => Err(Error::ValidateFromChainState(err)), + Err(err) => Err(Error::ValidateFromExhaustive(err)), } } -type BackgroundValidationResult = - Result<(CandidateReceipt, CandidateCommitments, Arc), CandidateReceipt>; +struct BackgroundValidationOutputs { + candidate: CandidateReceipt, + commitments: CandidateCommitments, + persisted_validation_data: PersistedValidationData, +} + +type BackgroundValidationResult = Result; struct BackgroundValidationParams { sender: S, tx_command: mpsc::Sender<(Hash, ValidatedCandidateCommand)>, candidate: CandidateReceipt, relay_parent: Hash, + persisted_validation_data: PersistedValidationData, pov: PoVData, n_validators: usize, - span: Option, make_command: F, } @@ -690,16 +624,33 @@ async fn validate_and_make_available( mut tx_command, candidate, relay_parent, + persisted_validation_data, pov, n_validators, - span, make_command, } = params; + let validation_code = { + let validation_code_hash = candidate.descriptor().validation_code_hash; + let (tx, rx) = oneshot::channel(); + sender + .send_message(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::ValidationCodeByHash(validation_code_hash, tx), + )) + .await; + + let code = rx.await.map_err(Error::RuntimeApiUnavailable)?; + match code { + Err(e) => return Err(Error::FetchValidationCode(validation_code_hash, e)), + Ok(None) => return Err(Error::NoValidationCode(validation_code_hash)), + Ok(Some(c)) => c, + } + }; + let pov = match pov { PoVData::Ready(pov) => pov, - PoVData::FetchFromValidator { from_validator, candidate_hash, pov_hash } => { - let _span = span.as_ref().map(|s| s.child("request-pov")); + PoVData::FetchFromValidator { from_validator, candidate_hash, pov_hash } => match request_pov(&mut sender, relay_parent, from_validator, candidate_hash, pov_hash) .await { @@ -715,17 +666,18 @@ async fn validate_and_make_available( }, Err(err) => return Err(err), Ok(pov) => pov, - } - }, + }, }; let v = { - let _span = span.as_ref().map(|s| { - s.child("request-validation") - .with_pov(&pov) - .with_para_id(candidate.descriptor().para_id) - }); - request_candidate_validation(&mut sender, candidate.clone(), pov.clone()).await? + request_candidate_validation( + &mut sender, + persisted_validation_data, + validation_code, + candidate.clone(), + pov.clone(), + ) + .await? }; let res = match v { @@ -741,14 +693,17 @@ async fn validate_and_make_available( n_validators, pov.clone(), candidate.hash(), - validation_data, + validation_data.clone(), candidate.descriptor.erasure_root, - span.as_ref(), ) .await?; match erasure_valid { - Ok(()) => Ok((candidate, commitments, pov.clone())), + Ok(()) => Ok(BackgroundValidationOutputs { + candidate, + commitments, + persisted_validation_data: validation_data, + }), Err(InvalidErasureRoot) => { gum::debug!( target: LOG_TARGET, @@ -786,626 +741,1279 @@ async fn validate_and_make_available( struct ValidatorIndexOutOfBounds; #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] -impl CandidateBackingJob { - async fn handle_validated_candidate_command( - &mut self, - root_span: &jaeger::Span, - ctx: &mut Context, - command: ValidatedCandidateCommand, - ) -> Result<(), Error> { - let candidate_hash = command.candidate_hash(); - self.awaiting_validation.remove(&candidate_hash); - - match command { - ValidatedCandidateCommand::Second(res) => { - match res { - Ok((candidate, commitments, _)) => { - // sanity check. - if self.seconded.is_none() && - !self.issued_statements.contains(&candidate_hash) - { - self.seconded = Some(candidate_hash); - self.issued_statements.insert(candidate_hash); - self.metrics.on_candidate_seconded(); - - let statement = Statement::Seconded(CommittedCandidateReceipt { - descriptor: candidate.descriptor.clone(), - commitments, - }); - if let Some(stmt) = self - .sign_import_and_distribute_statement(ctx, statement, root_span) - .await? - { - ctx.send_message(CollatorProtocolMessage::Seconded( - self.parent, - stmt, - )) - .await; - } - } - }, - Err(candidate) => { - ctx.send_message(CollatorProtocolMessage::Invalid(self.parent, candidate)) - .await; - }, - } - }, - ValidatedCandidateCommand::Attest(res) => { - // We are done - avoid new validation spawns: - self.fallbacks.remove(&candidate_hash); - // sanity check. - if !self.issued_statements.contains(&candidate_hash) { - if res.is_ok() { - let statement = Statement::Valid(candidate_hash); - self.sign_import_and_distribute_statement(ctx, statement, &root_span) - .await?; - } - self.issued_statements.insert(candidate_hash); - } - }, - ValidatedCandidateCommand::AttestNoPoV(candidate_hash) => { - if let Some((attesting, span)) = self.fallbacks.get_mut(&candidate_hash) { - if let Some(index) = attesting.backing.pop() { - attesting.from_validator = index; - // Ok, another try: - let c_span = span.as_ref().map(|s| s.child("try")); - let attesting = attesting.clone(); - self.kick_off_validation_work(ctx, attesting, c_span).await? - } - } else { - gum::warn!( - target: LOG_TARGET, - "AttestNoPoV was triggered without fallback being available." - ); - debug_assert!(false); - } +async fn handle_communication( + ctx: &mut Context, + state: &mut State, + message: CandidateBackingMessage, + metrics: &Metrics, +) -> Result<(), Error> { + match message { + CandidateBackingMessage::Second(_relay_parent, candidate, pvd, pov) => { + handle_second_message(ctx, state, candidate, pvd, pov, metrics).await?; + }, + CandidateBackingMessage::Statement(relay_parent, statement) => { + handle_statement_message(ctx, state, relay_parent, statement, metrics).await?; + }, + CandidateBackingMessage::GetBackedCandidates(relay_parent, requested_candidates, tx) => + if let Some(rp_state) = state.per_relay_parent.get(&relay_parent) { + handle_get_backed_candidates_message(rp_state, requested_candidates, tx, metrics)?; }, - } - - Ok(()) } - async fn background_validate_and_make_available( - &mut self, - ctx: &mut Context, - params: BackgroundValidationParams< - impl overseer::CandidateBackingSenderTrait, - impl Fn(BackgroundValidationResult) -> ValidatedCandidateCommand + Send + 'static + Sync, - >, - ) -> Result<(), Error> { - let candidate_hash = params.candidate.hash(); - if self.awaiting_validation.insert(candidate_hash) { - // spawn background task. - let bg = async move { - if let Err(e) = validate_and_make_available(params).await { - if let Error::BackgroundValidationMpsc(error) = e { - gum::debug!( - target: LOG_TARGET, - ?error, - "Mpsc background validation mpsc died during validation- leaf no longer active?" - ); - } else { - gum::error!( + Ok(()) +} + +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn prospective_parachains_mode( + ctx: &mut Context, + leaf_hash: Hash, +) -> Result { + // TODO: call a Runtime API once staging version is available + // https://github.com/paritytech/substrate/discussions/11338 + + let (tx, rx) = oneshot::channel(); + ctx.send_message(RuntimeApiMessage::Request(leaf_hash, RuntimeApiRequest::Version(tx))) + .await; + + let version = rx + .await + .map_err(Error::RuntimeApiUnavailable)? + .map_err(Error::FetchRuntimeApiVersion)?; + + if version == 3 { + Ok(ProspectiveParachainsMode::Enabled) + } else { + if version != 2 { + gum::warn!( + target: LOG_TARGET, + "Runtime API version is {}, expected 2 or 3. Prospective parachains are disabled", + version + ); + } + Ok(ProspectiveParachainsMode::Disabled) + } +} + +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn handle_active_leaves_update( + ctx: &mut Context, + update: ActiveLeavesUpdate, + state: &mut State, +) -> Result<(), Error> { + enum LeafHasProspectiveParachains { + Enabled(Result, ImplicitViewFetchError>), + Disabled, + } + + // Activate in implicit view before deactivate, per the docs + // on ImplicitView, this is more efficient. + let res = if let Some(leaf) = update.activated { + // Only activate in implicit view if prospective + // parachains are enabled. + let mode = prospective_parachains_mode(ctx, leaf.hash).await?; + + let leaf_hash = leaf.hash; + Some(( + leaf, + match mode { + ProspectiveParachainsMode::Disabled => LeafHasProspectiveParachains::Disabled, + ProspectiveParachainsMode::Enabled => LeafHasProspectiveParachains::Enabled( + state.implicit_view.activate_leaf(ctx.sender(), leaf_hash).await, + ), + }, + )) + } else { + None + }; + + for deactivated in update.deactivated { + state.per_leaf.remove(&deactivated); + state.implicit_view.deactivate_leaf(deactivated); + } + + // clean up `per_relay_parent` according to ancestry + // of leaves. we do this so we can clean up candidates right after + // as a result. + // + // when prospective parachains are disabled, the implicit view is empty, + // which means we'll clean up everything. This is correct. + { + let remaining: HashSet<_> = state.implicit_view.all_allowed_relay_parents().collect(); + state.per_relay_parent.retain(|r, _| remaining.contains(&r)); + } + + // clean up `per_candidate` according to which relay-parents + // are known. + // + // when prospective parachains are disabled, we clean up all candidates + // because we've cleaned up all relay parents. this is correct. + state + .per_candidate + .retain(|_, pc| state.per_relay_parent.contains_key(&pc.relay_parent)); + + // Get relay parents which might be fresh but might be known already + // that are explicit or implicit from the new active leaf. + let fresh_relay_parents = match res { + None => return Ok(()), + Some((leaf, LeafHasProspectiveParachains::Disabled)) => { + // defensive in this case - for enabled, this manifests as an error. + if state.per_leaf.contains_key(&leaf.hash) { + return Ok(()) + } + + state.per_leaf.insert( + leaf.hash, + ActiveLeafState { + prospective_parachains_mode: ProspectiveParachainsMode::Disabled, + // This is empty because the only allowed relay-parent and depth + // when prospective parachains are disabled is the leaf hash and 0, + // respectively. We've just learned about the leaf hash, so we cannot + // have any candidates seconded with it as a relay-parent yet. + seconded_at_depth: BTreeMap::new(), + }, + ); + + vec![leaf.hash] + }, + Some((leaf, LeafHasProspectiveParachains::Enabled(Ok(_)))) => { + let fresh_relay_parents = + state.implicit_view.known_allowed_relay_parents_under(&leaf.hash, None); + + // At this point, all candidates outside of the implicit view + // have been cleaned up. For all which remain, which we've seconded, + // we ask the prospective parachains subsystem where they land in the fragment + // tree for the given active leaf. This comprises our `seconded_at_depth`. + + let remaining_seconded = state + .per_candidate + .iter() + .filter(|(_, cd)| cd.seconded_locally) + .map(|(c_hash, cd)| (*c_hash, cd.para_id)); + + // one-to-one correspondence to remaining_seconded + let mut membership_answers = FuturesOrdered::new(); + + for (candidate_hash, para_id) in remaining_seconded { + let (tx, rx) = oneshot::channel(); + membership_answers.push(rx.map_ok(move |membership| (candidate_hash, membership))); + + ctx.send_message(ProspectiveParachainsMessage::GetTreeMembership( + para_id, + candidate_hash, + tx, + )) + .await; + } + + let mut seconded_at_depth = BTreeMap::new(); + for response in membership_answers.next().await { + match response { + Err(oneshot::Canceled) => { + gum::warn!( target: LOG_TARGET, - "Failed to validate and make available: {:?}", - e + "Prospective parachains subsystem unreachable for membership request", ); - } + + continue + }, + Ok((candidate_hash, membership)) => { + // This request gives membership in all fragment trees. We have some + // wasted data here, and it can be optimized if it proves + // relevant to performance. + if let Some((_, depths)) = + membership.into_iter().find(|(leaf_hash, _)| leaf_hash == &leaf.hash) + { + for depth in depths { + seconded_at_depth.insert(depth, candidate_hash); + } + } + }, } - }; + } + + state.per_leaf.insert( + leaf.hash, + ActiveLeafState { + prospective_parachains_mode: ProspectiveParachainsMode::Enabled, + seconded_at_depth, + }, + ); + + match fresh_relay_parents { + Some(f) => f.to_vec(), + None => { + gum::warn!( + target: LOG_TARGET, + leaf_hash = ?leaf.hash, + "Implicit view gave no relay-parents" + ); + + vec![leaf.hash] + }, + } + }, + Some((leaf, LeafHasProspectiveParachains::Enabled(Err(e)))) => { + gum::debug!( + target: LOG_TARGET, + leaf_hash = ?leaf.hash, + err = ?e, + "Failed to load implicit view for leaf." + ); + + return Ok(()) + }, + }; - ctx.spawn("backing-validation", bg.boxed()) - .map_err(|_| Error::FailedToSpawnBackgroundTask)?; + // add entries in `per_relay_parent`. for all new relay-parents. + for maybe_new in fresh_relay_parents { + if state.per_relay_parent.contains_key(&maybe_new) { + continue } - Ok(()) + let mode = match state.per_leaf.get(&maybe_new) { + None => { + // If the relay-parent isn't a leaf itself, + // then it is guaranteed by the prospective parachains + // subsystem that it is an ancestor of a leaf which + // has prospective parachains enabled and that the + // block itself did. + ProspectiveParachainsMode::Enabled + }, + Some(l) => l.prospective_parachains_mode, + }; + + // construct a `PerRelayParent` from the runtime API + // and insert it. + let per = construct_per_relay_parent_state(ctx, maybe_new, &state.keystore, mode).await?; + + if let Some(per) = per { + state.per_relay_parent.insert(maybe_new, per); + } } - /// Kick off background validation with intent to second. - async fn validate_and_second( - &mut self, - parent_span: &jaeger::Span, - root_span: &jaeger::Span, - ctx: &mut Context, - candidate: &CandidateReceipt, - pov: Arc, - ) -> Result<(), Error> { - // Check that candidate is collated by the right collator. - if self - .required_collator - .as_ref() - .map_or(false, |c| c != &candidate.descriptor().collator) - { - ctx.send_message(CollatorProtocolMessage::Invalid(self.parent, candidate.clone())) - .await; - return Ok(()) + Ok(()) +} + +/// Load the data necessary to do backing work on top of a relay-parent. +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn construct_per_relay_parent_state( + ctx: &mut Context, + relay_parent: Hash, + keystore: &SyncCryptoStorePtr, + mode: ProspectiveParachainsMode, +) -> Result, Error> { + macro_rules! try_runtime_api { + ($x: expr) => { + match $x { + Ok(x) => x, + Err(e) => { + gum::warn!( + target: LOG_TARGET, + err = ?e, + "Failed to fetch runtime API data for job", + ); + + // We can't do candidate validation work if we don't have the + // requisite runtime API data. But these errors should not take + // down the node. + return Ok(None); + } + } } + } - let candidate_hash = candidate.hash(); - let mut span = self.get_unbacked_validation_child( - root_span, - candidate_hash, - candidate.descriptor().para_id, - ); + let parent = relay_parent; - span.as_mut().map(|span| span.add_follows_from(parent_span)); + let (validators, groups, session_index, cores) = futures::try_join!( + request_validators(parent, ctx.sender()).await, + request_validator_groups(parent, ctx.sender()).await, + request_session_index_for_child(parent, ctx.sender()).await, + request_from_runtime(parent, ctx.sender(), |tx| { + RuntimeApiRequest::AvailabilityCores(tx) + },) + .await, + ) + .map_err(Error::JoinMultiple)?; - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?candidate_hash, - candidate_receipt = ?candidate, - "Validate and second candidate", - ); + let validators: Vec<_> = try_runtime_api!(validators); + let (validator_groups, group_rotation_info) = try_runtime_api!(groups); + let session_index = try_runtime_api!(session_index); + let cores = try_runtime_api!(cores); - let bg_sender = ctx.sender().clone(); - self.background_validate_and_make_available( - ctx, - BackgroundValidationParams { - sender: bg_sender, - tx_command: self.background_validation_tx.clone(), - candidate: candidate.clone(), - relay_parent: self.parent, - pov: PoVData::Ready(pov), - n_validators: self.table_context.validators.len(), - span, - make_command: ValidatedCandidateCommand::Second, + let signing_context = SigningContext { parent_hash: parent, session_index }; + let validator = + match Validator::construct(&validators, signing_context.clone(), keystore.clone()).await { + Ok(v) => Some(v), + Err(util::Error::NotAValidator) => None, + Err(e) => { + gum::warn!( + target: LOG_TARGET, + err = ?e, + "Cannot participate in candidate backing", + ); + + return Ok(None) }, - ) - .await?; + }; + + let mut groups = HashMap::new(); + let n_cores = cores.len(); + let mut assignment = None; - Ok(()) + for (idx, core) in cores.into_iter().enumerate() { + // Ignore prospective assignments on occupied cores for the time being. + if let CoreState::Scheduled(scheduled) = core { + let core_index = CoreIndex(idx as _); + let group_index = group_rotation_info.group_for_core(core_index, n_cores); + if let Some(g) = validator_groups.get(group_index.0 as usize) { + if validator.as_ref().map_or(false, |v| g.contains(&v.index())) { + assignment = Some((scheduled.para_id, scheduled.collator)); + } + groups.insert(scheduled.para_id, g.clone()); + } + } + } + + let table_context = TableContext { groups, validators, validator }; + let table_config = TableConfig { + allow_multiple_seconded: match mode { + ProspectiveParachainsMode::Enabled => true, + ProspectiveParachainsMode::Disabled => false, + }, + }; + + // TODO [now]: I've removed the `required_collator` more broadly, + // because it's not used in practice and was intended for parathreads. + // + // We should attempt parathreads another way, I think, so it makes sense + // to remove. + let assignment = assignment.map(|(a, _required_collator)| a); + + Ok(Some(PerRelayParentState { + prospective_parachains_mode: mode, + parent, + session_index, + assignment, + backed: HashSet::new(), + table: Table::new(table_config), + table_context, + issued_statements: HashSet::new(), + awaiting_validation: HashSet::new(), + fallbacks: HashMap::new(), + })) +} + +enum SecondingAllowed { + No, + Yes(Vec<(Hash, Vec)>), +} + +/// Checks whether a candidate can be seconded based on its hypothetical +/// depths in the fragment tree and what we've already seconded in all +/// active leaves. +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn seconding_sanity_check( + ctx: &mut Context, + active_leaves: &HashMap, + implicit_view: &ImplicitView, + candidate_hash: CandidateHash, + candidate_para: ParaId, + parent_head_data_hash: Hash, + head_data_hash: Hash, + candidate_relay_parent: Hash, +) -> SecondingAllowed { + // Note that `GetHypotheticalDepths` doesn't account for recursion, + // i.e. candidates can appear at multiple depths in the tree and in fact + // at all depths, and we don't know what depths a candidate will ultimately occupy + // because that's dependent on other candidates we haven't yet received. + // + // The only way to effectively rule this out is to have candidate receipts + // directly commit to the parachain block number or some other incrementing + // counter. That requires a major primitives format upgrade, so for now + // we just rule out trivial cycles. + if parent_head_data_hash == head_data_hash { + return SecondingAllowed::No } - async fn sign_import_and_distribute_statement( - &mut self, - ctx: &mut Context, - statement: Statement, - root_span: &jaeger::Span, - ) -> Result, Error> { - if let Some(signed_statement) = self.sign_statement(statement).await { - self.import_statement(ctx, &signed_statement, root_span).await?; - let smsg = StatementDistributionMessage::Share(self.parent, signed_statement.clone()); - ctx.send_unbounded_message(smsg); - - Ok(Some(signed_statement)) + let mut membership = Vec::new(); + let mut responses = FuturesOrdered::>>::new(); + + for (head, leaf_state) in active_leaves { + if leaf_state.prospective_parachains_mode.is_enabled() { + // Check that the candidate relay parent is allowed for para, skip the + // leaf otherwise. + let allowed_parents_for_para = + implicit_view.known_allowed_relay_parents_under(head, Some(candidate_para)); + if !allowed_parents_for_para.unwrap_or_default().contains(&candidate_relay_parent) { + continue + } + + let (tx, rx) = oneshot::channel(); + ctx.send_message(ProspectiveParachainsMessage::GetHypotheticalDepth( + HypotheticalDepthRequest { + candidate_hash, + candidate_para, + parent_head_data_hash, + candidate_relay_parent, + fragment_tree_relay_parent: *head, + }, + tx, + )) + .await; + responses.push(rx.map_ok(move |depths| (depths, head, leaf_state)).boxed()); } else { - Ok(None) + if head == &candidate_relay_parent { + if leaf_state.seconded_at_depth.contains_key(&0) { + // The leaf is already occupied. + return SecondingAllowed::No + } + responses.push(futures::future::ok((vec![0], head, leaf_state)).boxed()); + } } } - /// Check if there have happened any new misbehaviors and issue necessary messages. - fn issue_new_misbehaviors(&mut self, sender: &mut impl overseer::CandidateBackingSenderTrait) { - // collect the misbehaviors to avoid double mutable self borrow issues - let misbehaviors: Vec<_> = self.table.drain_misbehaviors().collect(); - for (validator_id, report) in misbehaviors { - // The provisioner waits on candidate-backing, which means - // that we need to send unbounded messages to avoid cycles. - // - // Misbehaviors are bounded by the number of validators and - // the block production protocol. - sender.send_unbounded_message(ProvisionerMessage::ProvisionableData( - self.parent, - ProvisionableData::MisbehaviorReport(self.parent, validator_id, report), - )); - } + if responses.is_empty() { + return SecondingAllowed::No } - /// Import a statement into the statement table and return the summary of the import. - async fn import_statement( - &mut self, - ctx: &mut Context, - statement: &SignedFullStatement, - root_span: &jaeger::Span, - ) -> Result, Error> { - gum::debug!( - target: LOG_TARGET, - statement = ?statement.payload().to_compact(), - validator_index = statement.validator_index().0, - "Importing statement", - ); + while let Some(response) = responses.next().await { + match response { + Err(oneshot::Canceled) => { + gum::warn!( + target: LOG_TARGET, + "Failed to reach prospective parachains subsystem for hypothetical depths", + ); - let candidate_hash = statement.payload().candidate_hash(); - let import_statement_span = { - // create a span only for candidates we're already aware of. - self.get_unbacked_statement_child( - root_span, - candidate_hash, - statement.validator_index(), - ) - }; + return SecondingAllowed::No + }, + Ok((depths, head, leaf_state)) => { + for depth in &depths { + if leaf_state.seconded_at_depth.contains_key(&depth) { + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + depth, + leaf_hash = ?head, + "Refusing to second candidate at depth - already occupied." + ); - if let Err(ValidatorIndexOutOfBounds) = self - .dispatch_new_statement_to_dispute_coordinator(ctx.sender(), candidate_hash, &statement) - .await - { - gum::warn!( - target: LOG_TARGET, - session_index = ?self.session_index, - relay_parent = ?self.parent, - validator_index = statement.validator_index().0, - "Supposedly 'Signed' statement has validator index out of bounds." - ); + return SecondingAllowed::No + } + } - return Ok(None) + membership.push((*head, depths)); + }, } + } - let stmt = primitive_statement_to_table(statement); + // At this point we've checked the depths of the candidate against all active + // leaves. + SecondingAllowed::Yes(membership) +} - let summary = self.table.import_statement(&self.table_context, stmt); +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn handle_validated_candidate_command( + ctx: &mut Context, + state: &mut State, + relay_parent: Hash, + command: ValidatedCandidateCommand, + metrics: &Metrics, +) -> Result<(), Error> { + match state.per_relay_parent.get_mut(&relay_parent) { + Some(rp_state) => { + let candidate_hash = command.candidate_hash(); + rp_state.awaiting_validation.remove(&candidate_hash); + + match command { + ValidatedCandidateCommand::Second(res) => match res { + Ok(outputs) => { + let BackgroundValidationOutputs { + candidate, + commitments, + persisted_validation_data, + } = outputs; + + if rp_state.issued_statements.contains(&candidate_hash) { + return Ok(()) + } - let unbacked_span = if let Some(attested) = summary - .as_ref() - .and_then(|s| self.table.attested_candidate(&s.candidate, &self.table_context)) - { - let candidate_hash = attested.candidate.hash(); - // `HashSet::insert` returns true if the thing wasn't in there already. - if self.backed.insert(candidate_hash) { - let span = self.remove_unbacked_span(&candidate_hash); + // sanity check that we're allowed to second the candidate + // and that it doesn't conflict with other candidates we've + // seconded. + let fragment_tree_membership = match seconding_sanity_check( + ctx, + &state.per_leaf, + &state.implicit_view, + candidate_hash, + candidate.descriptor().para_id, + persisted_validation_data.parent_head.hash(), + commitments.head_data.hash(), + candidate.descriptor().relay_parent, + ) + .await + { + SecondingAllowed::No => return Ok(()), + SecondingAllowed::Yes(membership) => membership, + }; - if let Some(backed) = table_attested_to_backed(attested, &self.table_context) { - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?candidate_hash, - relay_parent = ?self.parent, - para_id = %backed.candidate.descriptor.para_id, - "Candidate backed", - ); + let statement = StatementWithPVD::Seconded( + CommittedCandidateReceipt { + descriptor: candidate.descriptor.clone(), + commitments, + }, + persisted_validation_data, + ); - // The provisioner waits on candidate-backing, which means - // that we need to send unbounded messages to avoid cycles. - // - // Backed candidates are bounded by the number of validators, - // parachains, and the block production rate of the relay chain. - let message = ProvisionerMessage::ProvisionableData( - self.parent, - ProvisionableData::BackedCandidate(backed.receipt()), - ); - ctx.send_unbounded_message(message); + // If we get an Error::RejectedByProspectiveParachains, + // then the statement has not been distributed or imported into + // the table. + let res = sign_import_and_distribute_statement( + ctx, + rp_state, + &mut state.per_candidate, + statement, + state.keystore.clone(), + metrics, + ) + .await; + + if let Err(Error::RejectedByProspectiveParachains) = res { + let candidate_hash = candidate.hash(); + gum::debug!( + target: LOG_TARGET, + relay_parent = ?candidate.descriptor().relay_parent, + ?candidate_hash, + "Attempted to second candidate but was rejected by prospective parachains", + ); + + // Ensure the collator is reported. + ctx.send_message(CollatorProtocolMessage::Invalid( + candidate.descriptor().relay_parent, + candidate, + )) + .await; - span.as_ref().map(|s| s.child("backed")); - span - } else { - None - } - } else { - None - } - } else { - None - }; + return Ok(()) + } + + if let Some(stmt) = res? { + match state.per_candidate.get_mut(&candidate_hash) { + None => { + gum::warn!( + target: LOG_TARGET, + ?candidate_hash, + "Missing `per_candidate` for seconded candidate.", + ); + }, + Some(p) => p.seconded_locally = true, + } - self.issue_new_misbehaviors(ctx.sender()); + // update seconded depths in active leaves. + for (leaf, depths) in fragment_tree_membership { + let leaf_data = match state.per_leaf.get_mut(&leaf) { + None => { + gum::warn!( + target: LOG_TARGET, + leaf_hash = ?leaf, + "Missing `per_leaf` for known active leaf." + ); + + continue + }, + Some(d) => d, + }; + + for depth in depths { + leaf_data.seconded_at_depth.insert(depth, candidate_hash); + } + } - // It is important that the child span is dropped before its parent span (`unbacked_span`) - drop(import_statement_span); - drop(unbacked_span); + rp_state.issued_statements.insert(candidate_hash); - Ok(summary) + metrics.on_candidate_seconded(); + ctx.send_message(CollatorProtocolMessage::Seconded( + rp_state.parent, + StatementWithPVD::drop_pvd_from_signed(stmt), + )) + .await; + } + }, + Err(candidate) => { + ctx.send_message(CollatorProtocolMessage::Invalid( + rp_state.parent, + candidate, + )) + .await; + }, + }, + ValidatedCandidateCommand::Attest(res) => { + // We are done - avoid new validation spawns: + rp_state.fallbacks.remove(&candidate_hash); + // sanity check. + if !rp_state.issued_statements.contains(&candidate_hash) { + if res.is_ok() { + let statement = StatementWithPVD::Valid(candidate_hash); + + sign_import_and_distribute_statement( + ctx, + rp_state, + &mut state.per_candidate, + statement, + state.keystore.clone(), + metrics, + ) + .await?; + } + rp_state.issued_statements.insert(candidate_hash); + } + }, + ValidatedCandidateCommand::AttestNoPoV(candidate_hash) => { + if let Some(attesting) = rp_state.fallbacks.get_mut(&candidate_hash) { + if let Some(index) = attesting.backing.pop() { + attesting.from_validator = index; + let attesting = attesting.clone(); + + // The candidate state should be available because we've + // validated it before, the relay-parent is still around, + // and candidates are pruned on the basis of relay-parents. + // + // If it's not, then no point in validating it anyway. + if let Some(pvd) = state + .per_candidate + .get(&candidate_hash) + .map(|pc| pc.persisted_validation_data.clone()) + { + kick_off_validation_work( + ctx, + rp_state, + pvd, + &state.background_validation_tx, + attesting, + ) + .await?; + } + } + } else { + gum::warn!( + target: LOG_TARGET, + "AttestNoPoV was triggered without fallback being available." + ); + debug_assert!(false); + } + }, + } + }, + None => { + // simple race condition; can be ignored = this relay-parent + // is no longer relevant. + }, } - /// The dispute coordinator keeps track of all statements by validators about every recent - /// candidate. - /// - /// When importing a statement, this should be called access the candidate receipt either - /// from the statement itself or from the underlying statement table in order to craft - /// and dispatch the notification to the dispute coordinator. - /// - /// This also does bounds-checking on the validator index and will return an error if the - /// validator index is out of bounds for the current validator set. It's expected that - /// this should never happen due to the interface of the candidate backing subsystem - - /// the networking component responsible for feeding statements to the backing subsystem - /// is meant to check the signature and provenance of all statements before submission. - async fn dispatch_new_statement_to_dispute_coordinator( - &self, - sender: &mut impl overseer::CandidateBackingSenderTrait, - candidate_hash: CandidateHash, - statement: &SignedFullStatement, - ) -> Result<(), ValidatorIndexOutOfBounds> { - // Dispatch the statement to the dispute coordinator. - let validator_index = statement.validator_index(); - let signing_context = - SigningContext { parent_hash: self.parent, session_index: self.session_index }; - - let validator_public = match self.table_context.validators.get(validator_index.0 as usize) { - None => return Err(ValidatorIndexOutOfBounds), - Some(v) => v, - }; + Ok(()) +} - let maybe_candidate_receipt = match statement.payload() { - Statement::Seconded(receipt) => Some(receipt.to_plain()), - Statement::Valid(candidate_hash) => { - // Valid statements are only supposed to be imported - // once we've seen at least one `Seconded` statement. - self.table.get_candidate(&candidate_hash).map(|c| c.to_plain()) - }, - }; +async fn sign_statement( + rp_state: &PerRelayParentState, + statement: StatementWithPVD, + keystore: SyncCryptoStorePtr, + metrics: &Metrics, +) -> Option { + let signed = rp_state + .table_context + .validator + .as_ref()? + .sign(keystore, statement) + .await + .ok() + .flatten()?; + metrics.on_statement_signed(); + Some(signed) +} - let maybe_signed_dispute_statement = SignedDisputeStatement::from_backing_statement( - statement.as_unchecked(), - signing_context, - validator_public.clone(), - ) - .ok(); +/// The dispute coordinator keeps track of all statements by validators about every recent +/// candidate. +/// +/// When importing a statement, this should be called access the candidate receipt either +/// from the statement itself or from the underlying statement table in order to craft +/// and dispatch the notification to the dispute coordinator. +/// +/// This also does bounds-checking on the validator index and will return an error if the +/// validator index is out of bounds for the current validator set. It's expected that +/// this should never happen due to the interface of the candidate backing subsystem - +/// the networking component responsible for feeding statements to the backing subsystem +/// is meant to check the signature and provenance of all statements before submission. +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn dispatch_new_statement_to_dispute_coordinator( + ctx: &mut Context, + rp_state: &PerRelayParentState, + candidate_hash: CandidateHash, + statement: &SignedFullStatementWithPVD, +) -> Result<(), ValidatorIndexOutOfBounds> { + // Dispatch the statement to the dispute coordinator. + let validator_index = statement.validator_index(); + let signing_context = + SigningContext { parent_hash: rp_state.parent, session_index: rp_state.session_index }; + + let validator_public = match rp_state.table_context.validators.get(validator_index.0 as usize) { + None => return Err(ValidatorIndexOutOfBounds), + Some(v) => v, + }; - if let (Some(candidate_receipt), Some(dispute_statement)) = - (maybe_candidate_receipt, maybe_signed_dispute_statement) - { - sender - .send_message(DisputeCoordinatorMessage::ImportStatements { - candidate_hash, - candidate_receipt, - session: self.session_index, - statements: vec![(dispute_statement, validator_index)], - pending_confirmation: None, - }) + let maybe_candidate_receipt = match statement.payload() { + StatementWithPVD::Seconded(receipt, _) => Some(receipt.to_plain()), + StatementWithPVD::Valid(candidate_hash) => { + // Valid statements are only supposed to be imported + // once we've seen at least one `Seconded` statement. + rp_state.table.get_candidate(&candidate_hash).map(|c| c.to_plain()) + }, + }; + + let maybe_signed_dispute_statement = SignedDisputeStatement::from_backing_statement( + statement.as_unchecked(), + signing_context, + validator_public.clone(), + ) + .ok(); + + if let (Some(candidate_receipt), Some(dispute_statement)) = + (maybe_candidate_receipt, maybe_signed_dispute_statement) + { + ctx.send_message(DisputeCoordinatorMessage::ImportStatements { + candidate_hash, + candidate_receipt, + session: rp_state.session_index, + statements: vec![(dispute_statement, validator_index)], + pending_confirmation: None, + }) + .await; + } + + Ok(()) +} + +/// Import a statement into the statement table and return the summary of the import. +/// +/// This will fail with `Error::RejectedByProspectiveParachains` if the message type +/// is seconded, the candidate is fresh, +/// and any of the following are true: +/// 1. There is no `PersistedValidationData` attached. +/// 2. Prospective parachains are enabled for the relay parent and the prospective parachains +/// subsystem returned an empty `FragmentTreeMembership` +/// i.e. did not recognize the candidate as being applicable to any of the active leaves. +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn import_statement( + ctx: &mut Context, + rp_state: &mut PerRelayParentState, + per_candidate: &mut HashMap, + statement: &SignedFullStatementWithPVD, +) -> Result, Error> { + gum::debug!( + target: LOG_TARGET, + statement = ?statement.payload().to_compact(), + validator_index = statement.validator_index().0, + "Importing statement", + ); + + let candidate_hash = statement.payload().candidate_hash(); + + // If this is a new candidate (statement is 'seconded' and candidate is unknown), + // we need to create an entry in the `PerCandidateState` map. + // + // If the relay parent supports prospective parachains, we also need + // to inform the prospective parachains subsystem of the seconded candidate + // If `ProspectiveParachainsMessage::Second` fails, then we return + // Error::RejectedByProspectiveParachains. + // + // Persisted Validation Data should be available - it may already be available + // if this is a candidate we are seconding. + // + // We should also not accept any candidates which have no valid depths under any of + // our active leaves. + if let StatementWithPVD::Seconded(candidate, pvd) = statement.payload() { + if !per_candidate.contains_key(&candidate_hash) { + if rp_state.prospective_parachains_mode.is_enabled() { + let (tx, rx) = oneshot::channel(); + ctx.send_message(ProspectiveParachainsMessage::CandidateSeconded( + candidate.descriptor().para_id, + candidate.clone(), + pvd.clone(), + tx, + )) .await; + + match rx.await { + Err(oneshot::Canceled) => { + gum::warn!( + target: LOG_TARGET, + "Could not reach the Prospective Parachains subsystem." + ); + + return Err(Error::RejectedByProspectiveParachains) + }, + Ok(membership) => + if membership.is_empty() { + return Err(Error::RejectedByProspectiveParachains) + }, + } + } + + // Only save the candidate if it was approved by prospective parachains. + per_candidate.insert( + candidate_hash, + PerCandidateState { + persisted_validation_data: pvd.clone(), + // This is set after importing when seconding locally. + seconded_locally: false, + para_id: candidate.descriptor().para_id, + relay_parent: candidate.descriptor().relay_parent, + }, + ); } + } + + if let Err(ValidatorIndexOutOfBounds) = + dispatch_new_statement_to_dispute_coordinator(ctx, rp_state, candidate_hash, statement) + .await + { + gum::warn!( + target: LOG_TARGET, + session_index = ?rp_state.session_index, + relay_parent = ?rp_state.parent, + validator_index = statement.validator_index().0, + "Supposedly 'Signed' statement has validator index out of bounds." + ); - Ok(()) + return Ok(None) } - async fn handle_second_msg( - &mut self, - root_span: &jaeger::Span, - ctx: &mut Context, - candidate: CandidateReceipt, - pov: PoV, - ) -> Result<(), Error> { - let _timer = self.metrics.time_process_second(); - - let candidate_hash = candidate.hash(); - let span = root_span - .child("second") - .with_stage(jaeger::Stage::CandidateBacking) - .with_pov(&pov) - .with_candidate(candidate_hash) - .with_relay_parent(self.parent); - - // Sanity check that candidate is from our assignment. - if Some(candidate.descriptor().para_id) != self.assignment { - gum::debug!( - target: LOG_TARGET, - our_assignment = ?self.assignment, - collation = ?candidate.descriptor().para_id, - "Subsystem asked to second for para outside of our assignment", - ); + let stmt = primitive_statement_to_table(statement); - return Ok(()) - } + let summary = rp_state.table.import_statement(&rp_state.table_context, stmt); - // If the message is a `CandidateBackingMessage::Second`, sign and dispatch a - // Seconded statement only if we have not seconded any other candidate and - // have not signed a Valid statement for the requested candidate. - if self.seconded.is_none() { - // This job has not seconded a candidate yet. + if let Some(attested) = summary + .as_ref() + .and_then(|s| rp_state.table.attested_candidate(&s.candidate, &rp_state.table_context)) + { + // `HashSet::insert` returns true if the thing wasn't in there already. + if rp_state.backed.insert(candidate_hash) { + if let Some(backed) = table_attested_to_backed(attested, &rp_state.table_context) { + let para_id = backed.candidate.descriptor.para_id; + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?candidate_hash, + relay_parent = ?rp_state.parent, + %para_id, + "Candidate backed", + ); - if !self.issued_statements.contains(&candidate_hash) { - let pov = Arc::new(pov); - self.validate_and_second(&span, &root_span, ctx, &candidate, pov).await?; + // Inform the prospective parachains subsystem + // that the candidate is now backed. + if rp_state.prospective_parachains_mode.is_enabled() { + ctx.send_message(ProspectiveParachainsMessage::CandidateBacked( + para_id, + candidate_hash, + )) + .await; + } + + // The provisioner waits on candidate-backing, which means + // that we need to send unbounded messages to avoid cycles. + // + // Backed candidates are bounded by the number of validators, + // parachains, and the block production rate of the relay chain. + let message = ProvisionerMessage::ProvisionableData( + rp_state.parent, + ProvisionableData::BackedCandidate(backed.receipt()), + ); + ctx.send_unbounded_message(message); } } + } + + issue_new_misbehaviors(ctx, rp_state.parent, &mut rp_state.table); + + Ok(summary) +} - Ok(()) +/// Check if there have happened any new misbehaviors and issue necessary messages. +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +fn issue_new_misbehaviors( + ctx: &mut Context, + relay_parent: Hash, + table: &mut Table, +) { + // collect the misbehaviors to avoid double mutable self borrow issues + let misbehaviors: Vec<_> = table.drain_misbehaviors().collect(); + for (validator_id, report) in misbehaviors { + // The provisioner waits on candidate-backing, which means + // that we need to send unbounded messages to avoid cycles. + // + // Misbehaviors are bounded by the number of validators and + // the block production protocol. + ctx.send_unbounded_message(ProvisionerMessage::ProvisionableData( + relay_parent, + ProvisionableData::MisbehaviorReport(relay_parent, validator_id, report), + )); } +} - async fn handle_statement_message( - &mut self, - root_span: &jaeger::Span, - ctx: &mut Context, - statement: SignedFullStatement, - ) -> Result<(), Error> { - let _timer = self.metrics.time_process_statement(); - let _span = root_span - .child("statement") - .with_stage(jaeger::Stage::CandidateBacking) - .with_candidate(statement.payload().candidate_hash()) - .with_relay_parent(self.parent); - - match self.maybe_validate_and_import(&root_span, ctx, statement).await { - Err(Error::ValidationFailed(_)) => Ok(()), - Err(e) => Err(e), - Ok(()) => Ok(()), - } +/// Sign, import, and distribute a statement. +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn sign_import_and_distribute_statement( + ctx: &mut Context, + rp_state: &mut PerRelayParentState, + per_candidate: &mut HashMap, + statement: StatementWithPVD, + keystore: SyncCryptoStorePtr, + metrics: &Metrics, +) -> Result, Error> { + if let Some(signed_statement) = sign_statement(&*rp_state, statement, keystore, metrics).await { + import_statement(ctx, rp_state, per_candidate, &signed_statement).await?; + + let smsg = StatementDistributionMessage::Share( + rp_state.parent, + StatementWithPVD::drop_pvd_from_signed(signed_statement.clone()), + ); + ctx.send_unbounded_message(smsg); + + Ok(Some(signed_statement)) + } else { + Ok(None) + } +} + +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn background_validate_and_make_available( + ctx: &mut Context, + rp_state: &mut PerRelayParentState, + params: BackgroundValidationParams< + impl overseer::CandidateBackingSenderTrait, + impl Fn(BackgroundValidationResult) -> ValidatedCandidateCommand + Send + 'static + Sync, + >, +) -> Result<(), Error> { + let candidate_hash = params.candidate.hash(); + if rp_state.awaiting_validation.insert(candidate_hash) { + // spawn background task. + let bg = async move { + if let Err(e) = validate_and_make_available(params).await { + if let Error::BackgroundValidationMpsc(error) = e { + gum::debug!( + target: LOG_TARGET, + ?error, + "Mpsc background validation mpsc died during validation- leaf no longer active?" + ); + } else { + gum::error!( + target: LOG_TARGET, + "Failed to validate and make available: {:?}", + e + ); + } + } + }; + + ctx.spawn("backing-validation", bg.boxed()) + .map_err(|_| Error::FailedToSpawnBackgroundTask)?; } - fn handle_get_backed_candidates_message( - &mut self, - requested_candidates: Vec, - tx: oneshot::Sender>, - ) -> Result<(), Error> { - let _timer = self.metrics.time_get_backed_candidates(); + Ok(()) +} - let backed = requested_candidates - .into_iter() - .filter_map(|hash| { - self.table - .attested_candidate(&hash, &self.table_context) - .and_then(|attested| table_attested_to_backed(attested, &self.table_context)) - }) - .collect(); - - tx.send(backed).map_err(|data| Error::Send(data))?; - Ok(()) +/// Kick off validation work and distribute the result as a signed statement. +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn kick_off_validation_work( + ctx: &mut Context, + rp_state: &mut PerRelayParentState, + persisted_validation_data: PersistedValidationData, + background_validation_tx: &mpsc::Sender<(Hash, ValidatedCandidateCommand)>, + attesting: AttestingData, +) -> Result<(), Error> { + let candidate_hash = attesting.candidate.hash(); + if rp_state.issued_statements.contains(&candidate_hash) { + return Ok(()) } - /// Kick off validation work and distribute the result as a signed statement. - async fn kick_off_validation_work( - &mut self, - ctx: &mut Context, - attesting: AttestingData, - span: Option, - ) -> Result<(), Error> { - let candidate_hash = attesting.candidate.hash(); - if self.issued_statements.contains(&candidate_hash) { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?candidate_hash, + candidate_receipt = ?attesting.candidate, + "Kicking off validation", + ); + + let bg_sender = ctx.sender().clone(); + let pov = PoVData::FetchFromValidator { + from_validator: attesting.from_validator, + candidate_hash, + pov_hash: attesting.pov_hash, + }; + + background_validate_and_make_available( + ctx, + rp_state, + BackgroundValidationParams { + sender: bg_sender, + tx_command: background_validation_tx.clone(), + candidate: attesting.candidate, + relay_parent: rp_state.parent, + persisted_validation_data, + pov, + n_validators: rp_state.table_context.validators.len(), + make_command: ValidatedCandidateCommand::Attest, + }, + ) + .await +} + +/// Import the statement and kick off validation work if it is a part of our assignment. +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn maybe_validate_and_import( + ctx: &mut Context, + state: &mut State, + relay_parent: Hash, + statement: SignedFullStatementWithPVD, +) -> Result<(), Error> { + let rp_state = match state.per_relay_parent.get_mut(&relay_parent) { + Some(r) => r, + None => { + gum::trace!( + target: LOG_TARGET, + ?relay_parent, + "Received statement for unknown relay-parent" + ); + return Ok(()) - } + }, + }; - let descriptor = attesting.candidate.descriptor().clone(); + let res = import_statement(ctx, rp_state, &mut state.per_candidate, &statement).await; + // if we get an Error::RejectedByProspectiveParachains, + // we will do nothing. + if let Err(Error::RejectedByProspectiveParachains) = res { gum::debug!( target: LOG_TARGET, - candidate_hash = ?candidate_hash, - candidate_receipt = ?attesting.candidate, - "Kicking off validation", + ?relay_parent, + "Statement rejected by prospective parachains." ); - // Check that candidate is collated by the right collator. - if self.required_collator.as_ref().map_or(false, |c| c != &descriptor.collator) { - // If not, we've got the statement in the table but we will - // not issue validation work for it. - // - // Act as though we've issued a statement. - self.issued_statements.insert(candidate_hash); + return Ok(()) + } + + if let Some(summary) = res? { + // import_statement already takes care of communicating with the + // prospective parachains subsystem. At this point, the candidate + // has already been accepted into the fragment trees. + + let candidate_hash = summary.candidate; + + if Some(summary.group_id) != rp_state.assignment { return Ok(()) } + let attesting = match statement.payload() { + StatementWithPVD::Seconded(receipt, _) => { + let attesting = AttestingData { + candidate: rp_state + .table + .get_candidate(&candidate_hash) + .ok_or(Error::CandidateNotFound)? + .to_plain(), + pov_hash: receipt.descriptor.pov_hash, + from_validator: statement.validator_index(), + backing: Vec::new(), + }; + rp_state.fallbacks.insert(summary.candidate, attesting.clone()); + attesting + }, + StatementWithPVD::Valid(candidate_hash) => { + if let Some(attesting) = rp_state.fallbacks.get_mut(candidate_hash) { + let our_index = rp_state.table_context.validator.as_ref().map(|v| v.index()); + if our_index == Some(statement.validator_index()) { + return Ok(()) + } - let bg_sender = ctx.sender().clone(); - let pov = PoVData::FetchFromValidator { - from_validator: attesting.from_validator, - candidate_hash, - pov_hash: attesting.pov_hash, - }; - self.background_validate_and_make_available( - ctx, - BackgroundValidationParams { - sender: bg_sender, - tx_command: self.background_validation_tx.clone(), - candidate: attesting.candidate, - relay_parent: self.parent, - pov, - n_validators: self.table_context.validators.len(), - span, - make_command: ValidatedCandidateCommand::Attest, + if rp_state.awaiting_validation.contains(candidate_hash) { + // Job already running: + attesting.backing.push(statement.validator_index()); + return Ok(()) + } else { + // No job, so start another with current validator: + attesting.from_validator = statement.validator_index(); + attesting.clone() + } + } else { + return Ok(()) + } }, - ) - .await + }; + + // After `import_statement` succeeds, the candidate entry is guaranteed + // to exist. + if let Some(pvd) = state + .per_candidate + .get(&candidate_hash) + .map(|pc| pc.persisted_validation_data.clone()) + { + kick_off_validation_work( + ctx, + rp_state, + pvd, + &state.background_validation_tx, + attesting, + ) + .await?; + } } + Ok(()) +} - /// Import the statement and kick off validation work if it is a part of our assignment. - async fn maybe_validate_and_import( - &mut self, - root_span: &jaeger::Span, - ctx: &mut Context, - statement: SignedFullStatement, - ) -> Result<(), Error> { - if let Some(summary) = self.import_statement(ctx, &statement, root_span).await? { - if Some(summary.group_id) != self.assignment { - return Ok(()) - } - let (attesting, span) = match statement.payload() { - Statement::Seconded(receipt) => { - let candidate_hash = summary.candidate; - - let span = self.get_unbacked_validation_child( - root_span, - summary.candidate, - summary.group_id, - ); +/// Kick off background validation with intent to second. +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn validate_and_second( + ctx: &mut Context, + rp_state: &mut PerRelayParentState, + persisted_validation_data: PersistedValidationData, + candidate: &CandidateReceipt, + pov: Arc, + background_validation_tx: &mpsc::Sender<(Hash, ValidatedCandidateCommand)>, +) -> Result<(), Error> { + let candidate_hash = candidate.hash(); + + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?candidate_hash, + candidate_receipt = ?candidate, + "Validate and second candidate", + ); + + let bg_sender = ctx.sender().clone(); + background_validate_and_make_available( + ctx, + rp_state, + BackgroundValidationParams { + sender: bg_sender, + tx_command: background_validation_tx.clone(), + candidate: candidate.clone(), + relay_parent: rp_state.parent, + persisted_validation_data, + pov: PoVData::Ready(pov), + n_validators: rp_state.table_context.validators.len(), + make_command: ValidatedCandidateCommand::Second, + }, + ) + .await?; - let attesting = AttestingData { - candidate: self - .table - .get_candidate(&candidate_hash) - .ok_or(Error::CandidateNotFound)? - .to_plain(), - pov_hash: receipt.descriptor.pov_hash, - from_validator: statement.validator_index(), - backing: Vec::new(), - }; - let child = span.as_ref().map(|s| s.child("try")); - self.fallbacks.insert(summary.candidate, (attesting.clone(), span)); - (attesting, child) - }, - Statement::Valid(candidate_hash) => { - if let Some((attesting, span)) = self.fallbacks.get_mut(candidate_hash) { - let our_index = self.table_context.validator.as_ref().map(|v| v.index()); - if our_index == Some(statement.validator_index()) { - return Ok(()) - } + Ok(()) +} - if self.awaiting_validation.contains(candidate_hash) { - // Job already running: - attesting.backing.push(statement.validator_index()); - return Ok(()) - } else { - // No job, so start another with current validator: - attesting.from_validator = statement.validator_index(); - (attesting.clone(), span.as_ref().map(|s| s.child("try"))) - } - } else { - return Ok(()) - } - }, - }; +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn handle_second_message( + ctx: &mut Context, + state: &mut State, + candidate: CandidateReceipt, + persisted_validation_data: PersistedValidationData, + pov: PoV, + metrics: &Metrics, +) -> Result<(), Error> { + let _timer = metrics.time_process_second(); - self.kick_off_validation_work(ctx, attesting, span).await?; - } - Ok(()) + let candidate_hash = candidate.hash(); + let relay_parent = candidate.descriptor().relay_parent; + + if candidate.descriptor().persisted_validation_data_hash != persisted_validation_data.hash() { + gum::warn!( + target: LOG_TARGET, + ?candidate_hash, + "Candidate backing was asked to second candidate with wrong PVD", + ); + + return Ok(()) } - async fn sign_statement(&mut self, statement: Statement) -> Option { - let signed = self - .table_context - .validator - .as_ref()? - .sign(self.keystore.clone(), statement) - .await - .ok() - .flatten()?; - self.metrics.on_statement_signed(); - Some(signed) + let rp_state = match state.per_relay_parent.get_mut(&relay_parent) { + None => { + gum::trace!( + target: LOG_TARGET, + ?relay_parent, + ?candidate_hash, + "We were asked to second a candidate outside of our view." + ); + + return Ok(()) + }, + Some(r) => r, + }; + + // Sanity check that candidate is from our assignment. + if Some(candidate.descriptor().para_id) != rp_state.assignment { + gum::debug!( + target: LOG_TARGET, + our_assignment = ?rp_state.assignment, + collation = ?candidate.descriptor().para_id, + "Subsystem asked to second for para outside of our assignment", + ); + + return Ok(()) } - /// Insert or get the unbacked-span for the given candidate hash. - fn insert_or_get_unbacked_span( - &mut self, - parent_span: &jaeger::Span, - hash: CandidateHash, - para_id: Option, - ) -> Option<&jaeger::Span> { - if !self.backed.contains(&hash) { - // only add if we don't consider this backed. - let span = self.unbacked_candidates.entry(hash).or_insert_with(|| { - let s = parent_span.child("unbacked-candidate").with_candidate(hash); - if let Some(para_id) = para_id { - s.with_para_id(para_id) - } else { - s - } - }); - Some(span) - } else { - None - } + // If the message is a `CandidateBackingMessage::Second`, sign and dispatch a + // Seconded statement only if we have not signed a Valid statement for the requested candidate. + // + // The actual logic of issuing the signed statement checks that this isn't + // conflicting with other seconded candidates. Not doing that check here + // gives other subsystems the ability to get us to execute arbitrary candidates, + // but no more. + if !rp_state.issued_statements.contains(&candidate_hash) { + let pov = Arc::new(pov); + + validate_and_second( + ctx, + rp_state, + persisted_validation_data, + &candidate, + pov, + &state.background_validation_tx, + ) + .await?; } - fn get_unbacked_validation_child( - &mut self, - parent_span: &jaeger::Span, - hash: CandidateHash, - para_id: ParaId, - ) -> Option { - self.insert_or_get_unbacked_span(parent_span, hash, Some(para_id)).map(|span| { - span.child("validation") - .with_candidate(hash) - .with_stage(Stage::CandidateBacking) - }) + Ok(()) +} + +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn handle_statement_message( + ctx: &mut Context, + state: &mut State, + relay_parent: Hash, + statement: SignedFullStatementWithPVD, + metrics: &Metrics, +) -> Result<(), Error> { + let _timer = metrics.time_process_statement(); + + match maybe_validate_and_import(ctx, state, relay_parent, statement).await { + Err(Error::ValidationFailed(_)) => Ok(()), + Err(e) => Err(e), + Ok(()) => Ok(()), } +} - fn get_unbacked_statement_child( - &mut self, - parent_span: &jaeger::Span, - hash: CandidateHash, - validator: ValidatorIndex, - ) -> Option { - self.insert_or_get_unbacked_span(parent_span, hash, None).map(|span| { - span.child("import-statement") - .with_candidate(hash) - .with_validator_index(validator) +fn handle_get_backed_candidates_message( + rp_state: &PerRelayParentState, + requested_candidates: Vec, + tx: oneshot::Sender>, + metrics: &Metrics, +) -> Result<(), Error> { + let _timer = metrics.time_get_backed_candidates(); + + let backed = requested_candidates + .into_iter() + .filter_map(|hash| { + rp_state + .table + .attested_candidate(&hash, &rp_state.table_context) + .and_then(|attested| table_attested_to_backed(attested, &rp_state.table_context)) }) - } + .collect(); - fn remove_unbacked_span(&mut self, hash: &CandidateHash) -> Option { - self.unbacked_candidates.remove(hash) - } + tx.send(backed).map_err(|data| Error::Send(data))?; + Ok(()) } diff --git a/node/core/backing/src/tests.rs b/node/core/backing/src/tests/mod.rs similarity index 71% rename from node/core/backing/src/tests.rs rename to node/core/backing/src/tests/mod.rs index 0243c68c7c4c..402462913749 100644 --- a/node/core/backing/src/tests.rs +++ b/node/core/backing/src/tests/mod.rs @@ -17,17 +17,18 @@ use super::*; use ::test_helpers::{ dummy_candidate_receipt_bad_sig, dummy_collator, dummy_collator_signature, - dummy_committed_candidate_receipt, dummy_hash, dummy_validation_code, + dummy_committed_candidate_receipt, dummy_hash, }; use assert_matches::assert_matches; use futures::{future, Future}; -use polkadot_node_primitives::{BlockData, InvalidCandidate}; +use polkadot_node_primitives::{BlockData, InvalidCandidate, SignedFullStatement, Statement}; use polkadot_node_subsystem::{ + jaeger, messages::{ AllMessages, CollatorProtocolMessage, RuntimeApiMessage, RuntimeApiRequest, ValidationFailed, }, - ActivatedLeaf, ActiveLeavesUpdate, FromOrchestra, LeafStatus, OverseerSignal, + ActivatedLeaf, ActiveLeavesUpdate, FromOrchestra, LeafStatus, OverseerSignal, TimeoutExt, }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::v2::{ @@ -41,6 +42,10 @@ use sp_tracing as _; use statement_table::v2::Misbehavior; use std::collections::HashMap; +mod prospective_parachains; + +const API_VERSION_PROSPECTIVE_DISABLED: u32 = 2; + fn validator_pubkeys(val_ids: &[Sr25519Keyring]) -> Vec { val_ids.iter().map(|v| v.public().into()).collect() } @@ -53,6 +58,15 @@ fn table_statement_to_primitive(statement: TableStatement) -> Statement { } } +fn dummy_pvd() -> PersistedValidationData { + PersistedValidationData { + parent_head: HeadData(vec![7, 8, 9]), + relay_parent_number: 0_u32.into(), + max_pov_size: 1024, + relay_parent_storage_root: dummy_hash(), + } +} + struct TestState { chain_ids: Vec, keystore: SyncCryptoStorePtr, @@ -175,21 +189,22 @@ fn test_harness>( )); } -fn make_erasure_root(test: &TestState, pov: PoV) -> Hash { - let available_data = - AvailableData { validation_data: test.validation_data.clone(), pov: Arc::new(pov) }; +fn make_erasure_root(test: &TestState, pov: PoV, validation_data: PersistedValidationData) -> Hash { + let available_data = AvailableData { validation_data, pov: Arc::new(pov) }; let chunks = erasure_coding::obtain_chunks_v1(test.validators.len(), &available_data).unwrap(); erasure_coding::branches(&chunks).root() } -#[derive(Default)] +#[derive(Default, Clone)] struct TestCandidateBuilder { para_id: ParaId, head_data: HeadData, pov_hash: Hash, relay_parent: Hash, erasure_root: Hash, + persisted_validation_data_hash: Hash, + validation_code: Vec, } impl TestCandidateBuilder { @@ -203,8 +218,8 @@ impl TestCandidateBuilder { collator: dummy_collator(), signature: dummy_collator_signature(), para_head: dummy_hash(), - validation_code_hash: dummy_validation_code().hash(), - persisted_validation_data_hash: dummy_hash(), + validation_code_hash: ValidationCode(self.validation_code).hash(), + persisted_validation_data_hash: self.persisted_validation_data_hash, }, commitments: CandidateCommitments { head_data: self.head_data, @@ -232,6 +247,17 @@ async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestS )))) .await; + // Prospective parachains mode is temporarily defined by the Runtime API version. + // Disable it for the test leaf. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx)) + ) if parent == test_state.relay_parent => { + tx.send(Ok(API_VERSION_PROSPECTIVE_DISABLED)).unwrap(); + } + ); + // Check that subsystem job issues a request for a validator set. assert_matches!( virtual_overseer.recv().await, @@ -310,6 +336,8 @@ fn backing_second_works() { test_startup(&mut virtual_overseer, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); @@ -319,7 +347,9 @@ fn backing_second_works() { relay_parent: test_state.relay_parent, pov_hash, head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone()), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), ..Default::default() } .build(); @@ -327,31 +357,50 @@ fn backing_second_works() { let second = CandidateBackingMessage::Second( test_state.relay_parent, candidate.to_plain(), + pvd.clone(), pov.clone(), ); virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if hash == validation_code.hash() => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + } + ); + assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromChainState( + CandidateValidationMessage::ValidateFromExhaustive( + _pvd, + _validation_code, candidate_receipt, - pov, + _pov, timeout, tx, - ) - ) if pov == pov && &candidate_receipt.descriptor == candidate.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT && candidate.commitments.hash() == candidate_receipt.commitments_hash => { - tx.send(Ok( - ValidationResult::Valid(CandidateCommitments { + ), + ) if _pvd == pvd && + _validation_code == validation_code && + *_pov == pov && &candidate_receipt.descriptor == candidate.descriptor() && + timeout == BACKING_EXECUTION_TIMEOUT && + candidate.commitments.hash() == candidate_receipt.commitments_hash => + { + tx.send(Ok(ValidationResult::Valid( + CandidateCommitments { head_data: expected_head_data.clone(), horizontal_messages: Vec::new(), upward_messages: Vec::new(), new_validation_code: None, processed_downward_messages: 0, hrmp_watermark: 0, - }, test_state.validation_data.clone()), - )).unwrap(); + }, + test_state.validation_data.clone(), + ))) + .unwrap(); } ); @@ -407,6 +456,8 @@ fn backing_works() { test_startup(&mut virtual_overseer, &test_state).await; let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); let pov_hash = pov.hash(); @@ -417,7 +468,8 @@ fn backing_works() { relay_parent: test_state.relay_parent, pov_hash, head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone()), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + validation_code: validation_code.0.clone(), ..Default::default() } .build(); @@ -440,9 +492,9 @@ fn backing_works() { .await .expect("Insert key into keystore"); - let signed_a = SignedFullStatement::sign( + let signed_a = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Seconded(candidate_a.clone()), + StatementWithPVD::Seconded(candidate_a.clone(), pvd.clone()), &test_state.signing_context, ValidatorIndex(2), &public2.into(), @@ -452,9 +504,9 @@ fn backing_works() { .flatten() .expect("should be signed"); - let signed_b = SignedFullStatement::sign( + let signed_b = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Valid(candidate_a_hash), + StatementWithPVD::Valid(candidate_a_hash), &test_state.signing_context, ValidatorIndex(5), &public1.into(), @@ -477,6 +529,15 @@ fn backing_works() { ) .await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if hash == validation_code.hash() => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + } + ); + // Sending a `Statement::Seconded` for our assignment will start // validation process. The first thing requested is the PoV. assert_matches!( @@ -497,13 +558,20 @@ fn backing_works() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromChainState( - c, - pov, + CandidateValidationMessage::ValidateFromExhaustive( + _pvd, + _validation_code, + candidate_receipt, + _pov, timeout, tx, - ) - ) if pov == pov && c.descriptor() == candidate_a.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT && c.commitments_hash == candidate_a_commitments_hash=> { + ), + ) if _pvd == pvd && + _validation_code == validation_code && + *_pov == pov && &candidate_receipt.descriptor == candidate_a.descriptor() && + timeout == BACKING_EXECUTION_TIMEOUT && + candidate_a_commitments_hash == candidate_receipt.commitments_hash => + { tx.send(Ok( ValidationResult::Valid(CandidateCommitments { head_data: expected_head_data.clone(), @@ -584,6 +652,8 @@ fn backing_works_while_validation_ongoing() { test_startup(&mut virtual_overseer, &test_state).await; let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); let pov_hash = pov.hash(); @@ -594,7 +664,8 @@ fn backing_works_while_validation_ongoing() { relay_parent: test_state.relay_parent, pov_hash, head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone()), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + validation_code: validation_code.0.clone(), ..Default::default() } .build(); @@ -624,9 +695,9 @@ fn backing_works_while_validation_ongoing() { .await .expect("Insert key into keystore"); - let signed_a = SignedFullStatement::sign( + let signed_a = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Seconded(candidate_a.clone()), + StatementWithPVD::Seconded(candidate_a.clone(), pvd.clone()), &test_state.signing_context, ValidatorIndex(2), &public2.into(), @@ -636,9 +707,9 @@ fn backing_works_while_validation_ongoing() { .flatten() .expect("should be signed"); - let signed_b = SignedFullStatement::sign( + let signed_b = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Valid(candidate_a_hash), + StatementWithPVD::Valid(candidate_a_hash), &test_state.signing_context, ValidatorIndex(5), &public1.into(), @@ -648,9 +719,9 @@ fn backing_works_while_validation_ongoing() { .flatten() .expect("should be signed"); - let signed_c = SignedFullStatement::sign( + let signed_c = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Valid(candidate_a_hash), + StatementWithPVD::Valid(candidate_a_hash), &test_state.signing_context, ValidatorIndex(3), &public3.into(), @@ -672,6 +743,15 @@ fn backing_works_while_validation_ongoing() { ) .await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if hash == validation_code.hash() => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + } + ); + // Sending a `Statement::Seconded` for our assignment will start // validation process. The first thing requested is PoV from the // `PoVDistribution`. @@ -693,13 +773,20 @@ fn backing_works_while_validation_ongoing() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromChainState( - c, - pov, + CandidateValidationMessage::ValidateFromExhaustive( + _pvd, + _validation_code, + candidate_receipt, + _pov, timeout, tx, - ) - ) if pov == pov && c.descriptor() == candidate_a.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT && candidate_a_commitments_hash == c.commitments_hash => { + ), + ) if _pvd == pvd && + _validation_code == validation_code && + *_pov == pov && &candidate_receipt.descriptor == candidate_a.descriptor() && + timeout == BACKING_EXECUTION_TIMEOUT && + candidate_a_commitments_hash == candidate_receipt.commitments_hash => + { // we never validate the candidate. our local node // shouldn't issue any statements. std::mem::forget(tx); @@ -793,6 +880,8 @@ fn backing_misbehavior_works() { let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; let pov_hash = pov.hash(); + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); @@ -800,8 +889,9 @@ fn backing_misbehavior_works() { para_id: test_state.chain_ids[0], relay_parent: test_state.relay_parent, pov_hash, - erasure_root: make_erasure_root(&test_state, pov.clone()), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), head_data: expected_head_data.clone(), + validation_code: validation_code.0.clone(), ..Default::default() } .build(); @@ -816,9 +906,9 @@ fn backing_misbehavior_works() { ) .await .expect("Insert key into keystore"); - let seconded_2 = SignedFullStatement::sign( + let seconded_2 = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Seconded(candidate_a.clone()), + StatementWithPVD::Seconded(candidate_a.clone(), pvd.clone()), &test_state.signing_context, ValidatorIndex(2), &public2.into(), @@ -828,9 +918,9 @@ fn backing_misbehavior_works() { .flatten() .expect("should be signed"); - let valid_2 = SignedFullStatement::sign( + let valid_2 = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Valid(candidate_a_hash), + StatementWithPVD::Valid(candidate_a_hash), &test_state.signing_context, ValidatorIndex(2), &public2.into(), @@ -853,6 +943,15 @@ fn backing_misbehavior_works() { ) .await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if hash == validation_code.hash() => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + } + ); + assert_matches!( virtual_overseer.recv().await, AllMessages::AvailabilityDistribution( @@ -869,13 +968,20 @@ fn backing_misbehavior_works() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromChainState( - c, - pov, + CandidateValidationMessage::ValidateFromExhaustive( + _pvd, + _validation_code, + candidate_receipt, + _pov, timeout, tx, - ) - ) if pov == pov && c.descriptor() == candidate_a.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT && candidate_a_commitments_hash == c.commitments_hash => { + ), + ) if _pvd == pvd && + _validation_code == validation_code && + *_pov == pov && &candidate_receipt.descriptor == candidate_a.descriptor() && + timeout == BACKING_EXECUTION_TIMEOUT && + candidate_a_commitments_hash == candidate_receipt.commitments_hash => + { tx.send(Ok( ValidationResult::Valid(CandidateCommitments { head_data: expected_head_data.clone(), @@ -991,8 +1097,17 @@ fn backing_dont_second_invalid() { test_startup(&mut virtual_overseer, &test_state).await; let pov_block_a = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd_a = dummy_pvd(); + let validation_code_a = ValidationCode(vec![1, 2, 3]); let pov_block_b = PoV { block_data: BlockData(vec![45, 46, 47]) }; + let pvd_b = { + let mut pvd_b = pvd_a.clone(); + pvd_b.parent_head = HeadData(vec![14, 15, 16]); + pvd_b.max_pov_size = pvd_a.max_pov_size / 2; + pvd_b + }; + let validation_code_b = ValidationCode(vec![4, 5, 6]); let pov_hash_a = pov_block_a.hash(); let pov_hash_b = pov_block_b.hash(); @@ -1003,7 +1118,9 @@ fn backing_dont_second_invalid() { para_id: test_state.chain_ids[0], relay_parent: test_state.relay_parent, pov_hash: pov_hash_a, - erasure_root: make_erasure_root(&test_state, pov_block_a.clone()), + erasure_root: make_erasure_root(&test_state, pov_block_a.clone(), pvd_a.clone()), + persisted_validation_data_hash: pvd_a.hash(), + validation_code: validation_code_a.0.clone(), ..Default::default() } .build(); @@ -1012,8 +1129,10 @@ fn backing_dont_second_invalid() { para_id: test_state.chain_ids[0], relay_parent: test_state.relay_parent, pov_hash: pov_hash_b, - erasure_root: make_erasure_root(&test_state, pov_block_b.clone()), + erasure_root: make_erasure_root(&test_state, pov_block_b.clone(), pvd_b.clone()), head_data: expected_head_data.clone(), + persisted_validation_data_hash: pvd_b.hash(), + validation_code: validation_code_b.0.clone(), ..Default::default() } .build(); @@ -1021,21 +1140,38 @@ fn backing_dont_second_invalid() { let second = CandidateBackingMessage::Second( test_state.relay_parent, candidate_a.to_plain(), + pvd_a.clone(), pov_block_a.clone(), ); virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if hash == validation_code_a.hash() => { + tx.send(Ok(Some(validation_code_a.clone()))).unwrap(); + } + ); + assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromChainState( - c, - pov, + CandidateValidationMessage::ValidateFromExhaustive( + _pvd, + _validation_code, + candidate_receipt, + _pov, timeout, tx, - ) - ) if pov == pov && c.descriptor() == candidate_a.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT => { + ), + ) if _pvd == pvd_a && + _validation_code == validation_code_a && + *_pov == pov_block_a && &candidate_receipt.descriptor == candidate_a.descriptor() && + timeout == BACKING_EXECUTION_TIMEOUT && + candidate_a.commitments.hash() == candidate_receipt.commitments_hash => + { tx.send(Ok(ValidationResult::Invalid(InvalidCandidate::BadReturn))).unwrap(); } ); @@ -1050,21 +1186,38 @@ fn backing_dont_second_invalid() { let second = CandidateBackingMessage::Second( test_state.relay_parent, candidate_b.to_plain(), + pvd_b.clone(), pov_block_b.clone(), ); virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if hash == validation_code_b.hash() => { + tx.send(Ok(Some(validation_code_b.clone()))).unwrap(); + } + ); + assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromChainState( - c, - pov, + CandidateValidationMessage::ValidateFromExhaustive( + _pvd, + _validation_code, + candidate_receipt, + _pov, timeout, tx, - ) - ) if pov == pov && c.descriptor() == candidate_b.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT => { + ), + ) if _pvd == pvd_b && + _validation_code == validation_code_b && + *_pov == pov_block_b && &candidate_receipt.descriptor == candidate_b.descriptor() && + timeout == BACKING_EXECUTION_TIMEOUT && + candidate_b.commitments.hash() == candidate_receipt.commitments_hash => + { tx.send(Ok( ValidationResult::Valid(CandidateCommitments { head_data: expected_head_data.clone(), @@ -1073,7 +1226,7 @@ fn backing_dont_second_invalid() { new_validation_code: None, processed_downward_messages: 0, hrmp_watermark: 0, - }, test_state.validation_data.clone()), + }, pvd_b.clone()), )).unwrap(); } ); @@ -1125,6 +1278,8 @@ fn backing_second_after_first_fails_works() { test_startup(&mut virtual_overseer, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); let pov_hash = pov.hash(); @@ -1132,7 +1287,9 @@ fn backing_second_after_first_fails_works() { para_id: test_state.chain_ids[0], relay_parent: test_state.relay_parent, pov_hash, - erasure_root: make_erasure_root(&test_state, pov.clone()), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), ..Default::default() } .build(); @@ -1145,9 +1302,9 @@ fn backing_second_after_first_fails_works() { .await .expect("Insert key into keystore"); - let signed_a = SignedFullStatement::sign( + let signed_a = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Seconded(candidate.clone()), + StatementWithPVD::Seconded(candidate.clone(), pvd.clone()), &test_state.signing_context, ValidatorIndex(2), &validator2.into(), @@ -1171,6 +1328,15 @@ fn backing_second_after_first_fails_works() { ) .await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if hash == validation_code.hash() => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + } + ); + // Subsystem requests PoV and requests validation. assert_matches!( virtual_overseer.recv().await, @@ -1189,13 +1355,20 @@ fn backing_second_after_first_fails_works() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromChainState( - c, - pov, + CandidateValidationMessage::ValidateFromExhaustive( + _pvd, + _validation_code, + candidate_receipt, + _pov, timeout, tx, - ) - ) if pov == pov && c.descriptor() == candidate.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT && c.commitments_hash == candidate.commitments.hash() => { + ), + ) if _pvd == pvd && + _validation_code == validation_code && + *_pov == pov && &candidate_receipt.descriptor == candidate.descriptor() && + timeout == BACKING_EXECUTION_TIMEOUT && + candidate.commitments.hash() == candidate_receipt.commitments_hash => + { tx.send(Ok(ValidationResult::Invalid(InvalidCandidate::BadReturn))).unwrap(); } ); @@ -1205,12 +1378,15 @@ fn backing_second_after_first_fails_works() { let second = CandidateBackingMessage::Second( test_state.relay_parent, candidate.to_plain(), + pvd.clone(), pov.clone(), ); virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; let pov_to_second = PoV { block_data: BlockData(vec![3, 2, 1]) }; + let pvd_to_second = dummy_pvd(); + let validation_code_to_second = ValidationCode(vec![5, 6, 7]); let pov_hash = pov_to_second.hash(); @@ -1218,7 +1394,13 @@ fn backing_second_after_first_fails_works() { para_id: test_state.chain_ids[0], relay_parent: test_state.relay_parent, pov_hash, - erasure_root: make_erasure_root(&test_state, pov_to_second.clone()), + erasure_root: make_erasure_root( + &test_state, + pov_to_second.clone(), + pvd_to_second.clone(), + ), + persisted_validation_data_hash: pvd_to_second.hash(), + validation_code: validation_code_to_second.0.clone(), ..Default::default() } .build(); @@ -1226,6 +1408,7 @@ fn backing_second_after_first_fails_works() { let second = CandidateBackingMessage::Second( test_state.relay_parent, candidate_to_second.to_plain(), + pvd_to_second.clone(), pov_to_second.clone(), ); @@ -1234,15 +1417,19 @@ fn backing_second_after_first_fails_works() { // triggered on the prev step. virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if hash == validation_code_to_second.hash() => { + tx.send(Ok(Some(validation_code_to_second.clone()))).unwrap(); + } + ); + assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromChainState( - _, - pov, - _, - _, - ) + CandidateValidationMessage::ValidateFromExhaustive(_, _, _, pov, ..), ) => { assert_eq!(&*pov, &pov_to_second); } @@ -1260,6 +1447,8 @@ fn backing_works_after_failed_validation() { test_startup(&mut virtual_overseer, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); let pov_hash = pov.hash(); @@ -1267,7 +1456,8 @@ fn backing_works_after_failed_validation() { para_id: test_state.chain_ids[0], relay_parent: test_state.relay_parent, pov_hash, - erasure_root: make_erasure_root(&test_state, pov.clone()), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + validation_code: validation_code.0.clone(), ..Default::default() } .build(); @@ -1279,9 +1469,9 @@ fn backing_works_after_failed_validation() { ) .await .expect("Insert key into keystore"); - let signed_a = SignedFullStatement::sign( + let signed_a = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Seconded(candidate.clone()), + StatementWithPVD::Seconded(candidate.clone(), pvd.clone()), &test_state.signing_context, ValidatorIndex(2), &public2.into(), @@ -1305,6 +1495,15 @@ fn backing_works_after_failed_validation() { ) .await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if hash == validation_code.hash() => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + } + ); + // Subsystem requests PoV and requests validation. assert_matches!( virtual_overseer.recv().await, @@ -1323,13 +1522,20 @@ fn backing_works_after_failed_validation() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromChainState( - c, - pov, + CandidateValidationMessage::ValidateFromExhaustive( + _pvd, + _validation_code, + candidate_receipt, + _pov, timeout, tx, - ) - ) if pov == pov && c.descriptor() == candidate.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT && c.commitments_hash == candidate.commitments.hash() => { + ), + ) if _pvd == pvd && + _validation_code == validation_code && + *_pov == pov && &candidate_receipt.descriptor == candidate.descriptor() && + timeout == BACKING_EXECUTION_TIMEOUT && + candidate.commitments.hash() == candidate_receipt.commitments_hash => + { tx.send(Err(ValidationFailed("Internal test error".into()))).unwrap(); } ); @@ -1352,6 +1558,7 @@ fn backing_works_after_failed_validation() { // Test that a `CandidateBackingMessage::Second` issues validation work // and in case validation is successful issues a `StatementDistributionMessage`. #[test] +#[ignore] // `required_collator` is disabled. fn backing_doesnt_second_wrong_collator() { let mut test_state = TestState::default(); test_state.availability_cores[0] = CoreState::Scheduled(ScheduledCore { @@ -1363,6 +1570,8 @@ fn backing_doesnt_second_wrong_collator() { test_startup(&mut virtual_overseer, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); @@ -1372,7 +1581,9 @@ fn backing_doesnt_second_wrong_collator() { relay_parent: test_state.relay_parent, pov_hash, head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone()), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), ..Default::default() } .build(); @@ -1380,6 +1591,7 @@ fn backing_doesnt_second_wrong_collator() { let second = CandidateBackingMessage::Second( test_state.relay_parent, candidate.to_plain(), + pvd.clone(), pov.clone(), ); @@ -1403,6 +1615,7 @@ fn backing_doesnt_second_wrong_collator() { } #[test] +#[ignore] // `required_collator` is disabled. fn validation_work_ignores_wrong_collator() { let mut test_state = TestState::default(); test_state.availability_cores[0] = CoreState::Scheduled(ScheduledCore { @@ -1414,6 +1627,8 @@ fn validation_work_ignores_wrong_collator() { test_startup(&mut virtual_overseer, &test_state).await; let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); let pov_hash = pov.hash(); @@ -1424,7 +1639,9 @@ fn validation_work_ignores_wrong_collator() { relay_parent: test_state.relay_parent, pov_hash, head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone()), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), ..Default::default() } .build(); @@ -1436,9 +1653,9 @@ fn validation_work_ignores_wrong_collator() { ) .await .expect("Insert key into keystore"); - let seconding = SignedFullStatement::sign( + let seconding = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Seconded(candidate_a.clone()), + StatementWithPVD::Seconded(candidate_a.clone(), pvd.clone()), &test_state.signing_context, ValidatorIndex(2), &public2.into(), @@ -1543,6 +1760,8 @@ fn retry_works() { test_startup(&mut virtual_overseer, &test_state).await; let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); let pov_hash = pov.hash(); @@ -1550,7 +1769,9 @@ fn retry_works() { para_id: test_state.chain_ids[0], relay_parent: test_state.relay_parent, pov_hash, - erasure_root: make_erasure_root(&test_state, pov.clone()), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), ..Default::default() } .build(); @@ -1576,9 +1797,9 @@ fn retry_works() { ) .await .expect("Insert key into keystore"); - let signed_a = SignedFullStatement::sign( + let signed_a = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Seconded(candidate.clone()), + StatementWithPVD::Seconded(candidate.clone(), pvd.clone()), &test_state.signing_context, ValidatorIndex(2), &public2.into(), @@ -1587,9 +1808,9 @@ fn retry_works() { .ok() .flatten() .expect("should be signed"); - let signed_b = SignedFullStatement::sign( + let signed_b = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Valid(candidate.hash()), + StatementWithPVD::Valid(candidate.hash()), &test_state.signing_context, ValidatorIndex(3), &public3.into(), @@ -1598,9 +1819,9 @@ fn retry_works() { .ok() .flatten() .expect("should be signed"); - let signed_c = SignedFullStatement::sign( + let signed_c = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Valid(candidate.hash()), + StatementWithPVD::Valid(candidate.hash()), &test_state.signing_context, ValidatorIndex(5), &public5.into(), @@ -1623,6 +1844,15 @@ fn retry_works() { ) .await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if hash == validation_code.hash() => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + } + ); + // Subsystem requests PoV and requests validation. // We cancel - should mean retry on next backing statement. assert_matches!( @@ -1651,7 +1881,7 @@ fn retry_works() { .await; // Not deterministic which message comes first: - for _ in 0u32..2 { + for _ in 0u32..3 { match virtual_overseer.recv().await { AllMessages::Provisioner(ProvisionerMessage::ProvisionableData( _, @@ -1664,6 +1894,12 @@ fn retry_works() { ) if relay_parent == test_state.relay_parent => { std::mem::drop(tx); }, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::ValidationCodeByHash(hash, tx), + )) if hash == validation_code.hash() => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + }, msg => { assert!(false, "Unexpected message: {:?}", msg); }, @@ -1682,6 +1918,15 @@ fn retry_works() { ) .await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if hash == validation_code.hash() => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + } + ); + assert_matches!( virtual_overseer.recv().await, AllMessages::AvailabilityDistribution( @@ -1700,13 +1945,19 @@ fn retry_works() { assert_matches!( virtual_overseer.recv().await, AllMessages::CandidateValidation( - CandidateValidationMessage::ValidateFromChainState( - c, - pov, + CandidateValidationMessage::ValidateFromExhaustive( + _pvd, + _validation_code, + candidate_receipt, + _pov, timeout, - _tx, - ) - ) if pov == pov && c.descriptor() == candidate.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT && c.commitments_hash == candidate.commitments.hash() + .. + ), + ) if _pvd == pvd && + _validation_code == validation_code && + *_pov == pov && &candidate_receipt.descriptor == candidate.descriptor() && + timeout == BACKING_EXECUTION_TIMEOUT && + candidate.commitments.hash() == candidate_receipt.commitments_hash ); virtual_overseer }); @@ -1720,6 +1971,8 @@ fn observes_backing_even_if_not_validator() { test_startup(&mut virtual_overseer, &test_state).await; let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); let pov_hash = pov.hash(); @@ -1730,7 +1983,9 @@ fn observes_backing_even_if_not_validator() { relay_parent: test_state.relay_parent, pov_hash, head_data: expected_head_data.clone(), - erasure_root: make_erasure_root(&test_state, pov.clone()), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), ..Default::default() } .build(); @@ -1760,9 +2015,9 @@ fn observes_backing_even_if_not_validator() { // Produce a 3-of-5 quorum on the candidate. - let signed_a = SignedFullStatement::sign( + let signed_a = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Seconded(candidate_a.clone()), + StatementWithPVD::Seconded(candidate_a.clone(), pvd.clone()), &test_state.signing_context, ValidatorIndex(0), &public0.into(), @@ -1772,9 +2027,9 @@ fn observes_backing_even_if_not_validator() { .flatten() .expect("should be signed"); - let signed_b = SignedFullStatement::sign( + let signed_b = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Valid(candidate_a_hash), + StatementWithPVD::Valid(candidate_a_hash), &test_state.signing_context, ValidatorIndex(5), &public1.into(), @@ -1784,9 +2039,9 @@ fn observes_backing_even_if_not_validator() { .flatten() .expect("should be signed"); - let signed_c = SignedFullStatement::sign( + let signed_c = SignedFullStatementWithPVD::sign( &test_state.keystore, - Statement::Valid(candidate_a_hash), + StatementWithPVD::Valid(candidate_a_hash), &test_state.signing_context, ValidatorIndex(2), &public2.into(), @@ -1847,3 +2102,183 @@ fn observes_backing_even_if_not_validator() { virtual_overseer }); } + +// Tests that it's impossible to second multiple candidates per relay parent +// without prospective parachains. +#[test] +fn cannot_second_multiple_candidates_per_parent() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + test_startup(&mut virtual_overseer, &test_state).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); + + let pov_hash = pov.hash(); + let candidate_builder = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + ..Default::default() + }; + let candidate = candidate_builder.clone().build(); + + let second = CandidateBackingMessage::Second( + test_state.relay_parent, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if hash == validation_code.hash() => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromExhaustive( + _pvd, + _validation_code, + candidate_receipt, + _pov, + timeout, + tx, + ), + ) if _pvd == pvd && + _validation_code == validation_code && + *_pov == pov && &candidate_receipt.descriptor == candidate.descriptor() && + timeout == BACKING_EXECUTION_TIMEOUT && + candidate.commitments.hash() == candidate_receipt.commitments_hash => + { + tx.send(Ok(ValidationResult::Valid( + CandidateCommitments { + head_data: expected_head_data.clone(), + horizontal_messages: Vec::new(), + upward_messages: Vec::new(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }, + test_state.validation_data.clone(), + ))) + .unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::AvailabilityStore( + AvailabilityStoreMessage::StoreAvailableData { candidate_hash, tx, .. } + ) if candidate_hash == candidate.hash() => { + tx.send(Ok(())).unwrap(); + } + ); + + test_dispute_coordinator_notifications( + &mut virtual_overseer, + candidate.hash(), + test_state.session(), + vec![ValidatorIndex(0)], + ) + .await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + _signed_statement, + ) + ) if parent_hash == test_state.relay_parent => {} + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { + assert_eq!(test_state.relay_parent, hash); + assert_matches!(statement.payload(), Statement::Seconded(_)); + } + ); + + // Try to second candidate with the same relay parent again. + + // Make sure the candidate hash is different. + let validation_code = ValidationCode(vec![4, 5, 6]); + let mut candidate_builder = candidate_builder; + candidate_builder.validation_code = validation_code.0.clone(); + let candidate = candidate_builder.build(); + + let second = CandidateBackingMessage::Second( + test_state.relay_parent, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + // The validation is still requested. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(_, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if hash == validation_code.hash() => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromExhaustive(.., tx), + ) => { + tx.send(Ok(ValidationResult::Valid( + CandidateCommitments { + head_data: expected_head_data.clone(), + horizontal_messages: Vec::new(), + upward_messages: Vec::new(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }, + test_state.validation_data.clone(), + ))) + .unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::AvailabilityStore( + AvailabilityStoreMessage::StoreAvailableData { candidate_hash, tx, .. } + ) if candidate_hash == candidate.hash() => { + tx.send(Ok(())).unwrap(); + } + ); + + // Validation done, but the candidate is rejected cause of 0-depth being already occupied. + + assert!(virtual_overseer + .recv() + .timeout(std::time::Duration::from_millis(50)) + .await + .is_none()); + + virtual_overseer + }); +} diff --git a/node/core/backing/src/tests/prospective_parachains.rs b/node/core/backing/src/tests/prospective_parachains.rs new file mode 100644 index 000000000000..5be62b344980 --- /dev/null +++ b/node/core/backing/src/tests/prospective_parachains.rs @@ -0,0 +1,1352 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Tests for the backing subsystem with enabled prospective parachains. + +use polkadot_node_subsystem::{messages::ChainApiMessage, TimeoutExt}; +use polkadot_primitives::v2::{BlockNumber, Header}; + +use super::*; + +const API_VERSION_PROSPECTIVE_ENABLED: u32 = 3; + +struct TestLeaf { + activated: ActivatedLeaf, + min_relay_parents: Vec<(ParaId, u32)>, +} + +fn get_parent_hash(hash: Hash) -> Hash { + Hash::from_low_u64_be(hash.to_low_u64_be() + 1) +} + +async fn activate_leaf( + virtual_overseer: &mut VirtualOverseer, + leaf: TestLeaf, + test_state: &TestState, + seconded_in_view: usize, +) { + let TestLeaf { activated, min_relay_parents } = leaf; + let leaf_hash = activated.hash; + let leaf_number = activated.number; + // Start work on some new parent. + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work( + activated, + )))) + .await; + + // Prospective parachains mode is temporarily defined by the Runtime API version. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx)) + ) if parent == leaf_hash => { + tx.send(Ok(API_VERSION_PROSPECTIVE_ENABLED)).unwrap(); + } + ); + + let min_min = *min_relay_parents + .iter() + .map(|(_, block_num)| block_num) + .min() + .unwrap_or(&leaf_number); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetMinimumRelayParents(parent, tx) + ) if parent == leaf_hash => { + tx.send(min_relay_parents).unwrap(); + } + ); + + let ancestry_len = leaf_number + 1 - min_min; + + let ancestry_hashes = std::iter::successors(Some(leaf_hash), |h| Some(get_parent_hash(*h))) + .take(ancestry_len as usize); + let ancestry_numbers = (min_min..=leaf_number).rev(); + let mut ancestry_iter = ancestry_hashes.clone().zip(ancestry_numbers).peekable(); + + let mut next_overseer_message = None; + // How many blocks were actually requested. + let mut requested_len = 0; + loop { + let (hash, number) = match ancestry_iter.next() { + Some((hash, number)) => (hash, number), + None => break, + }; + + // May be `None` for the last element. + let parent_hash = + ancestry_iter.peek().map(|(h, _)| *h).unwrap_or_else(|| get_parent_hash(hash)); + + let msg = virtual_overseer.recv().await; + // It may happen that some blocks were cached by implicit view, + // reuse the message. + if !matches!(&msg, AllMessages::ChainApi(ChainApiMessage::BlockHeader(..))) { + next_overseer_message.replace(msg); + break + } + + assert_matches!( + msg, + AllMessages::ChainApi( + ChainApiMessage::BlockHeader(_hash, tx) + ) if _hash == hash => { + let header = Header { + parent_hash, + number, + state_root: Hash::zero(), + extrinsics_root: Hash::zero(), + digest: Default::default(), + }; + + tx.send(Ok(Some(header))).unwrap(); + } + ); + requested_len += 1; + } + + for _ in 0..seconded_in_view { + let msg = match next_overseer_message.take() { + Some(msg) => msg, + None => virtual_overseer.recv().await, + }; + assert_matches!( + msg, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetTreeMembership(.., tx), + ) => { + tx.send(Vec::new()).unwrap(); + } + ); + } + + for hash in ancestry_hashes.take(requested_len) { + // Check that subsystem job issues a request for a validator set. + let msg = match next_overseer_message.take() { + Some(msg) => msg, + None => virtual_overseer.recv().await, + }; + assert_matches!( + msg, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::Validators(tx)) + ) if parent == hash => { + tx.send(Ok(test_state.validator_public.clone())).unwrap(); + } + ); + + // Check that subsystem job issues a request for the validator groups. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ValidatorGroups(tx)) + ) if parent == hash => { + tx.send(Ok(test_state.validator_groups.clone())).unwrap(); + } + ); + + // Check that subsystem job issues a request for the session index for child. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx)) + ) if parent == hash => { + tx.send(Ok(test_state.signing_context.session_index)).unwrap(); + } + ); + + // Check that subsystem job issues a request for the availability cores. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AvailabilityCores(tx)) + ) if parent == hash => { + tx.send(Ok(test_state.availability_cores.clone())).unwrap(); + } + ); + } +} + +async fn assert_validate_seconded_candidate( + virtual_overseer: &mut VirtualOverseer, + relay_parent: Hash, + candidate: &CommittedCandidateReceipt, + pov: &PoV, + pvd: &PersistedValidationData, + validation_code: &ValidationCode, + expected_head_data: &HeadData, + fetch_pov: bool, +) { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ValidationCodeByHash(hash, tx)) + ) if parent == relay_parent && hash == validation_code.hash() => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + } + ); + + if fetch_pov { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::AvailabilityDistribution( + AvailabilityDistributionMessage::FetchPoV { + relay_parent: hash, + tx, + .. + } + ) if hash == relay_parent => { + tx.send(pov.clone()).unwrap(); + } + ); + } + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CandidateValidation(CandidateValidationMessage::ValidateFromExhaustive( + _pvd, + _validation_code, + candidate_receipt, + _pov, + timeout, + tx, + )) if &_pvd == pvd && + &_validation_code == validation_code && + &*_pov == pov && + &candidate_receipt.descriptor == candidate.descriptor() && + timeout == BACKING_EXECUTION_TIMEOUT && + candidate.commitments.hash() == candidate_receipt.commitments_hash => + { + tx.send(Ok(ValidationResult::Valid( + CandidateCommitments { + head_data: expected_head_data.clone(), + horizontal_messages: Vec::new(), + upward_messages: Vec::new(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }, + pvd.clone(), + ))) + .unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::AvailabilityStore( + AvailabilityStoreMessage::StoreAvailableData { candidate_hash, tx, .. } + ) if candidate_hash == candidate.hash() => { + tx.send(Ok(())).unwrap(); + } + ); +} + +async fn assert_hypothetical_depth_requests( + virtual_overseer: &mut VirtualOverseer, + mut expected_requests: Vec<(HypotheticalDepthRequest, Vec)>, +) { + // Requests come with no particular order. + let requests_num = expected_requests.len(); + + for _ in 0..requests_num { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetHypotheticalDepth(request, tx), + ) => { + let idx = match expected_requests.iter().position(|r| r.0 == request) { + Some(idx) => idx, + None => panic!( + "unexpected hypothetical depth request, no match found for {:?}", + request + ), + }; + let resp = std::mem::take(&mut expected_requests[idx].1); + tx.send(resp).unwrap(); + + expected_requests.remove(idx); + } + ); + } +} + +// Test that `seconding_sanity_check` works when a candidate is allowed +// for all leaves. +#[test] +fn seconding_sanity_check_allowed() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate is seconded in a parent of the activated `leaf_a`. + const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_A_DEPTH: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + let leaf_b_hash = Hash::from_low_u64_be(128); + // `a` is grandparent of `b`. + let leaf_a_hash = Hash::from_low_u64_be(130); + let leaf_a_parent = get_parent_hash(leaf_a_hash); + let activated = ActivatedLeaf { + hash: leaf_a_hash, + number: LEAF_A_BLOCK_NUMBER, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + }; + let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_DEPTH)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + const LEAF_B_BLOCK_NUMBER: BlockNumber = LEAF_A_BLOCK_NUMBER + 2; + const LEAF_B_DEPTH: BlockNumber = 4; + + let activated = ActivatedLeaf { + hash: leaf_b_hash, + number: LEAF_B_BLOCK_NUMBER, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + }; + let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_DEPTH)]; + let test_leaf_b = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; + activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state, 0).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(¶_id).unwrap(); + + let pov_hash = pov.hash(); + let candidate = TestCandidateBuilder { + para_id, + relay_parent: leaf_a_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + ..Default::default() + } + .build(); + + let second = CandidateBackingMessage::Second( + leaf_a_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + leaf_a_parent, + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + false, + ) + .await; + + // `seconding_sanity_check` + let expected_request_a = HypotheticalDepthRequest { + candidate_hash: candidate.hash(), + candidate_para: para_id, + parent_head_data_hash: pvd.parent_head.hash(), + candidate_relay_parent: leaf_a_parent, + fragment_tree_relay_parent: leaf_a_hash, + }; + let expected_request_b = HypotheticalDepthRequest { + candidate_hash: candidate.hash(), + candidate_para: para_id, + parent_head_data_hash: pvd.parent_head.hash(), + candidate_relay_parent: leaf_a_parent, + fragment_tree_relay_parent: leaf_b_hash, + }; + assert_hypothetical_depth_requests( + &mut virtual_overseer, + vec![(expected_request_a, vec![0, 1, 2, 3]), (expected_request_b, vec![3])], + ) + .await; + // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateSeconded( + candidate_para, + candidate_receipt, + _pvd, + tx, + ), + ) if candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => { + // Any non-empty response will do. + tx.send(vec![(leaf_a_hash, vec![0, 1, 2, 3])]).unwrap(); + } + ); + + test_dispute_coordinator_notifications( + &mut virtual_overseer, + candidate.hash(), + test_state.session(), + vec![ValidatorIndex(0)], + ) + .await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + _signed_statement, + ) + ) if parent_hash == leaf_a_parent => {} + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { + assert_eq!(leaf_a_parent, hash); + assert_matches!(statement.payload(), Statement::Seconded(_)); + } + ); + + virtual_overseer + }); +} + +// Test that `seconding_sanity_check` works when a candidate is disallowed +// for at least one leaf. +#[test] +fn seconding_sanity_check_disallowed() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate is seconded in a parent of the activated `leaf_a`. + const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_A_DEPTH: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + let leaf_b_hash = Hash::from_low_u64_be(128); + // `a` is grandparent of `b`. + let leaf_a_hash = Hash::from_low_u64_be(130); + let leaf_a_parent = get_parent_hash(leaf_a_hash); + let activated = ActivatedLeaf { + hash: leaf_a_hash, + number: LEAF_A_BLOCK_NUMBER, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + }; + let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_DEPTH)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + const LEAF_B_BLOCK_NUMBER: BlockNumber = LEAF_A_BLOCK_NUMBER + 2; + const LEAF_B_DEPTH: BlockNumber = 4; + + let activated = ActivatedLeaf { + hash: leaf_b_hash, + number: LEAF_B_BLOCK_NUMBER, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + }; + let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_DEPTH)]; + let test_leaf_b = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(¶_id).unwrap(); + + let pov_hash = pov.hash(); + let candidate = TestCandidateBuilder { + para_id, + relay_parent: leaf_a_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + ..Default::default() + } + .build(); + + let second = CandidateBackingMessage::Second( + leaf_a_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + leaf_a_parent, + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + false, + ) + .await; + + // `seconding_sanity_check` + let expected_request_a = HypotheticalDepthRequest { + candidate_hash: candidate.hash(), + candidate_para: para_id, + parent_head_data_hash: pvd.parent_head.hash(), + candidate_relay_parent: leaf_a_parent, + fragment_tree_relay_parent: leaf_a_hash, + }; + assert_hypothetical_depth_requests( + &mut virtual_overseer, + vec![(expected_request_a, vec![0, 1, 2, 3])], + ) + .await; + // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateSeconded( + candidate_para, + candidate_receipt, + _pvd, + tx, + ), + ) if candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => { + // Any non-empty response will do. + tx.send(vec![(leaf_a_hash, vec![0, 2, 3])]).unwrap(); + } + ); + + test_dispute_coordinator_notifications( + &mut virtual_overseer, + candidate.hash(), + test_state.session(), + vec![ValidatorIndex(0)], + ) + .await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + _signed_statement, + ) + ) if parent_hash == leaf_a_parent => {} + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { + assert_eq!(leaf_a_parent, hash); + assert_matches!(statement.payload(), Statement::Seconded(_)); + } + ); + + // A seconded candidate occupies a depth, try to second another one. + // It is allowed in a new leaf but not allowed in the old one. + // Expect it to be rejected. + activate_leaf(&mut virtual_overseer, test_leaf_b, &test_state, 1).await; + let leaf_a_grandparent = get_parent_hash(leaf_a_parent); + let candidate = TestCandidateBuilder { + para_id, + relay_parent: leaf_a_grandparent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + ..Default::default() + } + .build(); + + let second = CandidateBackingMessage::Second( + leaf_a_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + leaf_a_grandparent, + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + false, + ) + .await; + + // `seconding_sanity_check` + let expected_request_a = HypotheticalDepthRequest { + candidate_hash: candidate.hash(), + candidate_para: para_id, + parent_head_data_hash: pvd.parent_head.hash(), + candidate_relay_parent: leaf_a_grandparent, + fragment_tree_relay_parent: leaf_a_hash, + }; + let expected_request_b = HypotheticalDepthRequest { + candidate_hash: candidate.hash(), + candidate_para: para_id, + parent_head_data_hash: pvd.parent_head.hash(), + candidate_relay_parent: leaf_a_grandparent, + fragment_tree_relay_parent: leaf_b_hash, + }; + assert_hypothetical_depth_requests( + &mut virtual_overseer, + vec![ + (expected_request_a, vec![3]), // All depths are occupied. + (expected_request_b, vec![1]), + ], + ) + .await; + + assert!(virtual_overseer + .recv() + .timeout(std::time::Duration::from_millis(50)) + .await + .is_none()); + + virtual_overseer + }); +} + +// Test that a seconded candidate which is not approved by prospective parachains +// subsystem doesn't change the view. +#[test] +fn prospective_parachains_reject_candidate() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate is seconded in a parent of the activated `leaf_a`. + const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_A_DEPTH: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + let leaf_a_hash = Hash::from_low_u64_be(130); + let leaf_a_parent = get_parent_hash(leaf_a_hash); + let activated = ActivatedLeaf { + hash: leaf_a_hash, + number: LEAF_A_BLOCK_NUMBER, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + }; + let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_DEPTH)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(¶_id).unwrap(); + + let pov_hash = pov.hash(); + let candidate = TestCandidateBuilder { + para_id, + relay_parent: leaf_a_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + ..Default::default() + } + .build(); + + let second = CandidateBackingMessage::Second( + leaf_a_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + leaf_a_parent, + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + false, + ) + .await; + + // `seconding_sanity_check` + let expected_request_a = vec![( + HypotheticalDepthRequest { + candidate_hash: candidate.hash(), + candidate_para: para_id, + parent_head_data_hash: pvd.parent_head.hash(), + candidate_relay_parent: leaf_a_parent, + fragment_tree_relay_parent: leaf_a_hash, + }, + vec![0, 1, 2, 3], + )]; + assert_hypothetical_depth_requests(&mut virtual_overseer, expected_request_a.clone()).await; + + // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateSeconded( + candidate_para, + candidate_receipt, + _pvd, + tx, + ), + ) if candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => { + // Reject it. + tx.send(Vec::new()).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Invalid( + relay_parent, + candidate_receipt, + )) if candidate_receipt.descriptor() == candidate.descriptor() && + candidate_receipt.commitments_hash == candidate.commitments.hash() && + relay_parent == leaf_a_parent + ); + + // Try seconding the same candidate. + + let second = CandidateBackingMessage::Second( + leaf_a_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + leaf_a_parent, + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + false, + ) + .await; + + // `seconding_sanity_check` + assert_hypothetical_depth_requests(&mut virtual_overseer, expected_request_a).await; + // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateSeconded( + candidate_para, + candidate_receipt, + _pvd, + tx, + ), + ) if candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => { + // Any non-empty response will do. + tx.send(vec![(leaf_a_hash, vec![0, 2, 3])]).unwrap(); + } + ); + + test_dispute_coordinator_notifications( + &mut virtual_overseer, + candidate.hash(), + test_state.session(), + vec![ValidatorIndex(0)], + ) + .await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + _signed_statement, + ) + ) if parent_hash == leaf_a_parent => {} + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { + assert_eq!(leaf_a_parent, hash); + assert_matches!(statement.payload(), Statement::Seconded(_)); + } + ); + + virtual_overseer + }); +} + +// Test that a validator can second multiple candidates per single relay parent. +#[test] +fn second_multiple_candidates_per_relay_parent() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate `a` is seconded in a parent of the activated `leaf`. + const LEAF_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_DEPTH: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + let leaf_hash = Hash::from_low_u64_be(130); + let leaf_parent = get_parent_hash(leaf_hash); + let leaf_grandparent = get_parent_hash(leaf_parent); + let activated = ActivatedLeaf { + hash: leaf_hash, + number: LEAF_BLOCK_NUMBER, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + }; + let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_DEPTH)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(¶_id).unwrap(); + + let pov_hash = pov.hash(); + let candidate_a = TestCandidateBuilder { + para_id, + relay_parent: leaf_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + ..Default::default() + }; + let mut candidate_b = candidate_a.clone(); + candidate_b.relay_parent = leaf_grandparent; + + // With depths. + let candidate_a = (candidate_a.build(), 1); + let candidate_b = (candidate_b.build(), 2); + + for candidate in &[candidate_a, candidate_b] { + let (candidate, depth) = candidate; + let second = CandidateBackingMessage::Second( + leaf_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + candidate.descriptor().relay_parent, + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + false, + ) + .await; + + // `seconding_sanity_check` + let expected_request_a = vec![( + HypotheticalDepthRequest { + candidate_hash: candidate.hash(), + candidate_para: para_id, + parent_head_data_hash: pvd.parent_head.hash(), + candidate_relay_parent: candidate.descriptor().relay_parent, + fragment_tree_relay_parent: leaf_hash, + }, + vec![*depth], + )]; + assert_hypothetical_depth_requests(&mut virtual_overseer, expected_request_a.clone()) + .await; + + // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateSeconded( + candidate_para, + candidate_receipt, + _pvd, + tx, + ), + ) if &candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => { + // Any non-empty response will do. + tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); + } + ); + + test_dispute_coordinator_notifications( + &mut virtual_overseer, + candidate.hash(), + test_state.session(), + vec![ValidatorIndex(0)], + ) + .await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + _signed_statement, + ) + ) if parent_hash == candidate.descriptor().relay_parent => {} + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { + assert_eq!(candidate.descriptor().relay_parent, hash); + assert_matches!(statement.payload(), Statement::Seconded(_)); + } + ); + } + + virtual_overseer + }); +} + +// Test that the candidate reaches quorum successfully. +#[test] +fn backing_works() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate `a` is seconded in a parent of the activated `leaf`. + const LEAF_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_DEPTH: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + let leaf_hash = Hash::from_low_u64_be(130); + let leaf_parent = get_parent_hash(leaf_hash); + let activated = ActivatedLeaf { + hash: leaf_hash, + number: LEAF_BLOCK_NUMBER, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + }; + let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_DEPTH)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(¶_id).unwrap(); + + let pov_hash = pov.hash(); + + let candidate_a = TestCandidateBuilder { + para_id, + relay_parent: leaf_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + validation_code: validation_code.0.clone(), + persisted_validation_data_hash: pvd.hash(), + ..Default::default() + } + .build(); + + let candidate_a_hash = candidate_a.hash(); + + let public1 = CryptoStore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[5].to_seed()), + ) + .await + .expect("Insert key into keystore"); + let public2 = CryptoStore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[2].to_seed()), + ) + .await + .expect("Insert key into keystore"); + + // Signing context should have a parent hash candidate is based on. + let signing_context = + SigningContext { parent_hash: leaf_parent, session_index: test_state.session() }; + let signed_a = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Seconded(candidate_a.clone(), pvd.clone()), + &signing_context, + ValidatorIndex(2), + &public2.into(), + ) + .await + .ok() + .flatten() + .expect("should be signed"); + + let signed_b = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Valid(candidate_a_hash), + &signing_context, + ValidatorIndex(5), + &public1.into(), + ) + .await + .ok() + .flatten() + .expect("should be signed"); + + let statement = CandidateBackingMessage::Statement(leaf_parent, signed_a.clone()); + + virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + + // Prospective parachains are notified about candidate seconded first. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateSeconded( + candidate_para, + candidate_receipt, + _pvd, + tx, + ), + ) if candidate_receipt == candidate_a && candidate_para == para_id && pvd == _pvd => { + // Any non-empty response will do. + tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); + } + ); + + test_dispute_coordinator_notifications( + &mut virtual_overseer, + candidate_a_hash, + test_state.session(), + vec![ValidatorIndex(2)], + ) + .await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + candidate_a.descriptor().relay_parent, + &candidate_a, + &pov, + &pvd, + &validation_code, + expected_head_data, + true, + ) + .await; + + test_dispute_coordinator_notifications( + &mut virtual_overseer, + candidate_a_hash, + test_state.session(), + vec![ValidatorIndex(0)], + ) + .await; + // Prospective parachains are notified about candidate backed. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateBacked( + candidate_para_id, candidate_hash + ), + ) if candidate_a_hash == candidate_hash && candidate_para_id == para_id + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::Provisioner( + ProvisionerMessage::ProvisionableData( + _, + ProvisionableData::BackedCandidate(candidate_receipt) + ) + ) => { + assert_eq!(candidate_receipt, candidate_a.to_plain()); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share(hash, _stmt) + ) => { + assert_eq!(leaf_parent, hash); + } + ); + + let statement = CandidateBackingMessage::Statement(leaf_parent, signed_b.clone()); + + virtual_overseer.send(FromOrchestra::Communication { msg: statement }).await; + test_dispute_coordinator_notifications( + &mut virtual_overseer, + candidate_a_hash, + test_state.session(), + vec![ValidatorIndex(5)], + ) + .await; + virtual_overseer + }); +} + +// Tests that validators start work on consecutive prospective parachain blocks. +#[test] +fn concurrent_dependent_candidates() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate `a` is seconded in a grandparent of the activated `leaf`, + // candidate `b` -- in parent. + const LEAF_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_DEPTH: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + let leaf_hash = Hash::from_low_u64_be(130); + let leaf_parent = get_parent_hash(leaf_hash); + let leaf_grandparent = get_parent_hash(leaf_parent); + let activated = ActivatedLeaf { + hash: leaf_hash, + number: LEAF_BLOCK_NUMBER, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + }; + let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_DEPTH)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; + + let head_data = &[ + HeadData(vec![10, 20, 30]), // Before `a`. + HeadData(vec![11, 21, 31]), // After `a`. + HeadData(vec![12, 22]), // After `b`. + ]; + + let pov_a = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd_a = PersistedValidationData { + parent_head: head_data[0].clone(), + relay_parent_number: LEAF_BLOCK_NUMBER - 2, + relay_parent_storage_root: Hash::zero(), + max_pov_size: 1024, + }; + + let pov_b = PoV { block_data: BlockData(vec![22, 14, 100]) }; + let pvd_b = PersistedValidationData { + parent_head: head_data[1].clone(), + relay_parent_number: LEAF_BLOCK_NUMBER - 1, + relay_parent_storage_root: Hash::zero(), + max_pov_size: 1024, + }; + let validation_code = ValidationCode(vec![1, 2, 3]); + + let candidate_a = TestCandidateBuilder { + para_id, + relay_parent: leaf_grandparent, + pov_hash: pov_a.hash(), + head_data: head_data[1].clone(), + erasure_root: make_erasure_root(&test_state, pov_a.clone(), pvd_a.clone()), + persisted_validation_data_hash: pvd_a.hash(), + validation_code: validation_code.0.clone(), + ..Default::default() + } + .build(); + let candidate_b = TestCandidateBuilder { + para_id, + relay_parent: leaf_parent, + pov_hash: pov_b.hash(), + head_data: head_data[2].clone(), + erasure_root: make_erasure_root(&test_state, pov_b.clone(), pvd_b.clone()), + persisted_validation_data_hash: pvd_b.hash(), + validation_code: validation_code.0.clone(), + ..Default::default() + } + .build(); + let candidate_a_hash = candidate_a.hash(); + let candidate_b_hash = candidate_b.hash(); + + let public1 = CryptoStore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[5].to_seed()), + ) + .await + .expect("Insert key into keystore"); + let public2 = CryptoStore::sr25519_generate_new( + &*test_state.keystore, + ValidatorId::ID, + Some(&test_state.validators[2].to_seed()), + ) + .await + .expect("Insert key into keystore"); + + // Signing context should have a parent hash candidate is based on. + let signing_context = + SigningContext { parent_hash: leaf_grandparent, session_index: test_state.session() }; + let signed_a = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Seconded(candidate_a.clone(), pvd_a.clone()), + &signing_context, + ValidatorIndex(2), + &public2.into(), + ) + .await + .ok() + .flatten() + .expect("should be signed"); + + let signing_context = + SigningContext { parent_hash: leaf_parent, session_index: test_state.session() }; + let signed_b = SignedFullStatementWithPVD::sign( + &test_state.keystore, + StatementWithPVD::Seconded(candidate_b.clone(), pvd_b.clone()), + &signing_context, + ValidatorIndex(5), + &public1.into(), + ) + .await + .ok() + .flatten() + .expect("should be signed"); + + let statement_a = CandidateBackingMessage::Statement(leaf_grandparent, signed_a.clone()); + let statement_b = CandidateBackingMessage::Statement(leaf_parent, signed_b.clone()); + + virtual_overseer.send(FromOrchestra::Communication { msg: statement_a }).await; + // At this point the subsystem waits for response, the previous message is received, + // send a second one without blocking. + let _ = virtual_overseer + .tx + .start_send_unpin(FromOrchestra::Communication { msg: statement_b }); + + let mut valid_statements = HashSet::new(); + + loop { + let msg = virtual_overseer + .recv() + .timeout(std::time::Duration::from_secs(1)) + .await + .expect("overseer recv timed out"); + + // Order is not guaranteed since we have 2 statements being handled concurrently. + match msg { + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateSeconded(.., tx), + ) => { + tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); + }, + AllMessages::DisputeCoordinator(DisputeCoordinatorMessage::ImportStatements { + .. + }) => {}, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::ValidationCodeByHash(_, tx), + )) => { + tx.send(Ok(Some(validation_code.clone()))).unwrap(); + }, + AllMessages::AvailabilityDistribution( + AvailabilityDistributionMessage::FetchPoV { candidate_hash, tx, .. }, + ) => { + let pov = if candidate_hash == candidate_a_hash { + &pov_a + } else if candidate_hash == candidate_b_hash { + &pov_b + } else { + panic!("unknown candidate hash") + }; + tx.send(pov.clone()).unwrap(); + }, + AllMessages::CandidateValidation( + CandidateValidationMessage::ValidateFromExhaustive(.., candidate, _, _, tx), + ) => { + let candidate_hash = candidate.hash(); + let (head_data, pvd) = if candidate_hash == candidate_a_hash { + (&head_data[1], &pvd_a) + } else if candidate_hash == candidate_b_hash { + (&head_data[2], &pvd_b) + } else { + panic!("unknown candidate hash") + }; + tx.send(Ok(ValidationResult::Valid( + CandidateCommitments { + head_data: head_data.clone(), + horizontal_messages: Vec::new(), + upward_messages: Vec::new(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }, + pvd.clone(), + ))) + .unwrap(); + }, + AllMessages::AvailabilityStore(AvailabilityStoreMessage::StoreAvailableData { + tx, + .. + }) => { + tx.send(Ok(())).unwrap(); + }, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateBacked(..), + ) => {}, + AllMessages::Provisioner(ProvisionerMessage::ProvisionableData(..)) => {}, + AllMessages::StatementDistribution(StatementDistributionMessage::Share( + _, + statement, + )) => { + assert_eq!(statement.validator_index(), ValidatorIndex(0)); + let payload = statement.payload(); + assert_matches!( + payload.clone(), + Statement::Valid(hash) + if hash == candidate_a_hash || hash == candidate_b_hash => + { + assert!(valid_statements.insert(hash)); + } + ); + + if valid_statements.len() == 2 { + break + } + }, + _ => panic!("unexpected message received from overseer: {:?}", msg), + } + } + + assert!( + valid_statements.contains(&candidate_a_hash) && + valid_statements.contains(&candidate_b_hash) + ); + + virtual_overseer + }); +} diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 9972b60490a1..ab9d678f77b0 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -62,7 +62,7 @@ use polkadot_node_subsystem_util::inclusion_emulator::staging::{ ConstraintModifications, Constraints, Fragment, ProspectiveCandidate, RelayChainBlockInfo, }; use polkadot_primitives::vstaging::{ - BlockNumber, CandidateHash, CommittedCandidateReceipt, Hash, Id as ParaId, + BlockNumber, CandidateHash, CommittedCandidateReceipt, Hash, HeadData, Id as ParaId, PersistedValidationData, }; @@ -158,6 +158,17 @@ impl CandidateStorage { }) } + /// Get head-data by hash. + pub(crate) fn head_data_by_hash(&self, hash: &Hash) -> Option<&HeadData> { + // Get some candidate which has a parent-head with the same hash as requested. + let a_candidate_hash = self.by_parent_head.get(hash).and_then(|m| m.iter().next())?; + + // Extract the full parent head from that candidate's `PersistedValidationData`. + self.by_candidate_hash + .get(a_candidate_hash) + .map(|e| &e.candidate.persisted_validation_data.parent_head) + } + fn iter_para_children<'a>( &'a self, parent_head_hash: &Hash, @@ -271,13 +282,19 @@ impl Scope { .unwrap_or_else(|| self.relay_parent.clone()) } - fn ancestor_by_hash(&self, hash: &Hash) -> Option { + /// Get the ancestor of the fragment tree by hash. + pub fn ancestor_by_hash(&self, hash: &Hash) -> Option { if hash == &self.relay_parent.hash { return Some(self.relay_parent.clone()) } self.ancestors_by_hash.get(hash).map(|info| info.clone()) } + + /// Get the base constraints of the scope + pub fn base_constraints(&self) -> &Constraints { + &self.base_constraints + } } // We use indices into a flat vector to refer to nodes in the tree. diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 963c99e0a743..f90d23e92ad7 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -34,7 +34,8 @@ use futures::{channel::oneshot, prelude::*}; use polkadot_node_subsystem::{ messages::{ ChainApiMessage, FragmentTreeMembership, HypotheticalDepthRequest, - ProspectiveParachainsMessage, RuntimeApiMessage, RuntimeApiRequest, + ProspectiveParachainsMessage, ProspectiveValidationDataRequest, RuntimeApiMessage, + RuntimeApiRequest, }, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; @@ -137,8 +138,10 @@ async fn run_iteration(ctx: &mut Context, view: &mut View) -> Result<() answer_hypothetical_depths_request(&view, request, tx), ProspectiveParachainsMessage::GetTreeMembership(para, candidate, tx) => answer_tree_membership_request(&view, para, candidate, tx), - ProspectiveParachainsMessage::GetMinimumRelayParent(para, relay_parent, tx) => - answer_minimum_relay_parent_request(&view, para, relay_parent, tx), + ProspectiveParachainsMessage::GetMinimumRelayParents(relay_parent, tx) => + answer_minimum_relay_parents_request(&view, relay_parent, tx), + ProspectiveParachainsMessage::GetProspectiveValidationData(request, tx) => + answer_prospective_validation_data_request(&view, request, tx), }, } } @@ -160,6 +163,9 @@ async fn handle_active_leaves_update( } for activated in update.activated.into_iter() { + // TODO [now]: skip leaves which don't have prospective parachains + // enabled. This should be a runtime API version check. + let hash = activated.hash; let scheduled_paras = fetch_upcoming_paras(&mut *ctx, hash).await?; @@ -331,7 +337,7 @@ async fn handle_candidate_backed( target: LOG_TARGET, para_id = ?para, ?candidate_hash, - "Received instructio to back candidate", + "Received instruction to back candidate", ); return Ok(()) @@ -461,19 +467,75 @@ fn answer_tree_membership_request( let _ = tx.send(membership); } -fn answer_minimum_relay_parent_request( +fn answer_minimum_relay_parents_request( view: &View, - para: ParaId, relay_parent: Hash, - tx: oneshot::Sender>, + tx: oneshot::Sender>, +) { + let mut v = Vec::new(); + if let Some(leaf_data) = view.active_leaves.get(&relay_parent) { + for (para_id, fragment_tree) in &leaf_data.fragment_trees { + v.push((*para_id, fragment_tree.scope().earliest_relay_parent().number)); + } + } + + let _ = tx.send(v); +} + +fn answer_prospective_validation_data_request( + view: &View, + request: ProspectiveValidationDataRequest, + tx: oneshot::Sender>, ) { - let res = view + // 1. Try to get the head-data from the candidate store if known. + // 2. Otherwise, it might exist as the base in some relay-parent and we can find it by + // iterating fragment trees. + // 3. Otherwise, it is unknown. + // 4. Also try to find the relay parent block info by scanning + // fragment trees. + // 5. If head data and relay parent block info are found - success. Otherwise, failure. + + let storage = match view.candidate_storage.get(&request.para_id) { + None => { + let _ = tx.send(None); + return + }, + Some(s) => s, + }; + + let mut head_data = + storage.head_data_by_hash(&request.parent_head_data_hash).map(|x| x.clone()); + let mut relay_parent_info = None; + + for fragment_tree in view .active_leaves - .get(&relay_parent) - .and_then(|data| data.fragment_trees.get(¶)) - .map(|tree| tree.scope().earliest_relay_parent().number); + .values() + .filter_map(|x| x.fragment_trees.get(&request.para_id)) + { + if head_data.is_some() && relay_parent_info.is_some() { + break + } + if relay_parent_info.is_none() { + relay_parent_info = + fragment_tree.scope().ancestor_by_hash(&request.candidate_relay_parent); + } + if head_data.is_none() { + let required_parent = &fragment_tree.scope().base_constraints().required_parent; + if required_parent.hash() == request.parent_head_data_hash { + head_data = Some(required_parent.clone()); + } + } + } - let _ = tx.send(res); + let _ = tx.send(match (head_data, relay_parent_info) { + (Some(h), Some(i)) => Some(PersistedValidationData { + parent_head: h, + relay_parent_number: i.number, + relay_parent_storage_root: i.storage_root, + max_pov_size: request.max_pov_size, + }), + _ => None, + }); } #[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] diff --git a/node/malus/src/variants/suggest_garbage_candidate.rs b/node/malus/src/variants/suggest_garbage_candidate.rs index b8aaaa18c10d..ddcf6b3c98b0 100644 --- a/node/malus/src/variants/suggest_garbage_candidate.rs +++ b/node/malus/src/variants/suggest_garbage_candidate.rs @@ -30,7 +30,6 @@ use polkadot_cli::{ ProvideRuntimeApi, }, }; -use polkadot_node_core_candidate_validation::find_validation_data; use polkadot_node_primitives::{AvailableData, BlockData, PoV}; use polkadot_primitives::v2::{CandidateDescriptor, CandidateHash}; @@ -88,7 +87,13 @@ where ) -> Option> { match msg { FromOrchestra::Communication { - msg: CandidateBackingMessage::Second(relay_parent, candidate, _pov), + msg: + CandidateBackingMessage::Second( + relay_parent, + candidate, + persisted_validation_data, + _pov, + ), } => { gum::debug!( target: MALUS, @@ -103,7 +108,7 @@ where let mut new_sender = subsystem_sender.clone(); let _candidate = candidate.clone(); self.spawner.spawn_blocking( - "malus-get-validation-data", + "malus-get-n-validators", Some("malus"), Box::pin(async move { gum::trace!(target: MALUS, "Requesting validators"); @@ -114,25 +119,16 @@ where .unwrap() .len(); gum::trace!(target: MALUS, "Validators {}", n_validators); - match find_validation_data(&mut new_sender, &_candidate.descriptor()).await - { - Ok(Some((validation_data, validation_code))) => { - sender - .send((validation_data, validation_code, n_validators)) - .expect("channel is still open"); - }, - _ => { - panic!("Unable to fetch validation data"); - }, - } + sender.send(n_validators).expect("channel is still open"); }), ); - let (validation_data, validation_code, n_validators) = receiver.recv().unwrap(); + let n_validators = receiver.recv().unwrap(); - let validation_data_hash = validation_data.hash(); - let validation_code_hash = validation_code.hash(); - let validation_data_relay_parent_number = validation_data.relay_parent_number; + let validation_data_hash = persisted_validation_data.hash(); + let validation_code_hash = candidate.descriptor.validation_code_hash; + let validation_data_relay_parent_number = + persisted_validation_data.relay_parent_number; gum::trace!( target: MALUS, @@ -142,11 +138,13 @@ where ?validation_data_hash, ?validation_code_hash, ?validation_data_relay_parent_number, - "Fetched validation data." + "Fetched current validators set" ); - let malicious_available_data = - AvailableData { pov: Arc::new(pov.clone()), validation_data }; + let malicious_available_data = AvailableData { + pov: Arc::new(pov.clone()), + validation_data: persisted_validation_data.clone(), + }; let pov_hash = pov.hash(); let erasure_root = { @@ -209,7 +207,12 @@ where .insert(malicious_candidate_hash, candidate.hash()); let message = FromOrchestra::Communication { - msg: CandidateBackingMessage::Second(relay_parent, malicious_candidate, pov), + msg: CandidateBackingMessage::Second( + relay_parent, + malicious_candidate, + persisted_validation_data, + pov, + ), }; Some(message) diff --git a/node/network/collator-protocol/src/validator_side/mod.rs b/node/network/collator-protocol/src/validator_side/mod.rs index 592feaf9124a..30ff333b40fb 100644 --- a/node/network/collator-protocol/src/validator_side/mod.rs +++ b/node/network/collator-protocol/src/validator_side/mod.rs @@ -53,7 +53,10 @@ use polkadot_node_subsystem::{ overseer, FromOrchestra, OverseerSignal, PerLeafSpan, SubsystemSender, }; use polkadot_node_subsystem_util::metrics::{self, prometheus}; -use polkadot_primitives::v2::{CandidateReceipt, CollatorId, Hash, Id as ParaId}; +use polkadot_primitives::v2::{ + CandidateReceipt, CollatorId, Hash, Id as ParaId, OccupiedCoreAssumption, + PersistedValidationData, +}; use crate::error::Result; @@ -1307,6 +1310,39 @@ async fn dequeue_next_collation_and_fetch( } } +#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)] +async fn request_persisted_validation_data( + ctx: &mut Context, + relay_parent: Hash, + para_id: ParaId, +) -> Option { + // TODO [https://github.com/paritytech/polkadot/issues/5054] + // + // As of https://github.com/paritytech/polkadot/pull/5557 the + // `Second` message requires the `PersistedValidationData` to be + // supplied. + // + // Without asynchronous backing, this can be easily fetched from the + // chain state. + // + // This assumes the core is _scheduled_, in keeping with the effective + // current behavior. If the core is occupied, we simply don't return + // anything. Likewise with runtime API errors, which are rare. + let res = polkadot_node_subsystem_util::request_persisted_validation_data( + relay_parent, + para_id, + OccupiedCoreAssumption::Free, + ctx.sender(), + ) + .await + .await; + + match res { + Ok(Ok(Some(pvd))) => Some(pvd), + _ => None, + } +} + /// Handle a fetched collation result. #[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)] async fn handle_collation_fetched_result( @@ -1351,13 +1387,31 @@ async fn handle_collation_fetched_result( if let Entry::Vacant(entry) = state.pending_candidates.entry(relay_parent) { collation_event.1.commitments_hash = Some(candidate_receipt.commitments_hash); - ctx.sender() - .send_message(CandidateBackingMessage::Second( + + if let Some(pvd) = request_persisted_validation_data( + ctx, + candidate_receipt.descriptor().relay_parent, + candidate_receipt.descriptor().para_id, + ) + .await + { + // TODO [https://github.com/paritytech/polkadot/issues/5054] + // + // If PVD isn't available (core occupied) then we'll silently + // just not second this. But prior to asynchronous backing + // we wouldn't second anyway because the core is occupied. + // + // The proper refactoring would be to accept declares from collators + // but not even fetch from them if the core is occupied. Given 5054, + // there's no reason to do this right now. + ctx.send_message(CandidateBackingMessage::Second( relay_parent.clone(), candidate_receipt, + pvd, pov, )) .await; + } entry.insert(collation_event); } else { diff --git a/node/network/collator-protocol/src/validator_side/tests.rs b/node/network/collator-protocol/src/validator_side/tests.rs index 77c209361422..3c45a675aa48 100644 --- a/node/network/collator-protocol/src/validator_side/tests.rs +++ b/node/network/collator-protocol/src/validator_side/tests.rs @@ -32,8 +32,8 @@ use polkadot_node_subsystem::messages::{AllMessages, RuntimeApiMessage, RuntimeA use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_primitives::v2::{ - CollatorPair, CoreState, GroupIndex, GroupRotationInfo, OccupiedCore, ScheduledCore, - ValidatorId, ValidatorIndex, + CollatorPair, CoreState, GroupIndex, GroupRotationInfo, HeadData, OccupiedCore, + PersistedValidationData, ScheduledCore, ValidatorId, ValidatorIndex, }; use polkadot_primitives_test_helpers::{ dummy_candidate_descriptor, dummy_candidate_receipt_bad_sig, dummy_hash, @@ -245,15 +245,45 @@ async fn assert_candidate_backing_second( expected_para_id: ParaId, expected_pov: &PoV, ) -> CandidateReceipt { + // TODO [https://github.com/paritytech/polkadot/issues/5054] + // + // While collator protocol isn't updated, it's expected to receive + // a Runtime API request for persisted validation data. + let pvd = PersistedValidationData { + parent_head: HeadData(vec![7, 8, 9]), + relay_parent_number: 5, + max_pov_size: 1024, + relay_parent_storage_root: Default::default(), + }; + assert_matches!( overseer_recv(virtual_overseer).await, - AllMessages::CandidateBacking(CandidateBackingMessage::Second(relay_parent, candidate_receipt, incoming_pov) - ) => { - assert_eq!(expected_relay_parent, relay_parent); - assert_eq!(expected_para_id, candidate_receipt.descriptor.para_id); - assert_eq!(*expected_pov, incoming_pov); - candidate_receipt - }) + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + hash, + RuntimeApiRequest::PersistedValidationData(para_id, assumption, tx), + )) => { + assert_eq!(expected_relay_parent, hash); + assert_eq!(expected_para_id, para_id); + assert_eq!(OccupiedCoreAssumption::Free, assumption); + tx.send(Ok(Some(pvd.clone()))).unwrap(); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::CandidateBacking(CandidateBackingMessage::Second( + relay_parent, + candidate_receipt, + received_pvd, + incoming_pov, + )) => { + assert_eq!(expected_relay_parent, relay_parent); + assert_eq!(expected_para_id, candidate_receipt.descriptor.para_id); + assert_eq!(*expected_pov, incoming_pov); + assert_eq!(pvd, received_pvd); + candidate_receipt + } + ) } /// Assert that a collator got disconnected. diff --git a/node/network/statement-distribution/src/error.rs b/node/network/statement-distribution/src/error.rs index 01b2efd53b86..f91b0980c966 100644 --- a/node/network/statement-distribution/src/error.rs +++ b/node/network/statement-distribution/src/error.rs @@ -18,9 +18,11 @@ //! Error handling related code and Error/Result definitions. use polkadot_node_network_protocol::PeerId; -use polkadot_node_subsystem::SubsystemError; +use polkadot_node_subsystem::{RuntimeApiError, SubsystemError}; use polkadot_node_subsystem_util::runtime; -use polkadot_primitives::v2::{CandidateHash, Hash}; +use polkadot_primitives::v2::{CandidateHash, Hash, Id as ParaId}; + +use futures::channel::oneshot; use crate::LOG_TARGET; @@ -56,6 +58,12 @@ pub enum Error { #[error("Error while accessing runtime information")] Runtime(#[from] runtime::Error), + #[error("RuntimeAPISubsystem channel closed before receipt")] + RuntimeApiUnavailable(#[source] oneshot::Canceled), + + #[error("Fetching persisted validation data for para {0:?}, {1:?}")] + FetchPersistedValidationData(ParaId, RuntimeApiError), + #[error("Relay parent could not be found in active heads")] NoSuchHead(Hash), diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 2abb765f392b..ee5f2869e8a0 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -33,7 +33,9 @@ use polkadot_node_network_protocol::{ v1::{self as protocol_v1, StatementMetadata}, IfDisconnected, PeerId, UnifiedReputationChange as Rep, Versioned, View, }; -use polkadot_node_primitives::{SignedFullStatement, Statement, UncheckedSignedFullStatement}; +use polkadot_node_primitives::{ + SignedFullStatement, Statement, StatementWithPVD, UncheckedSignedFullStatement, +}; use polkadot_node_subsystem_util::{self as util, rand, MIN_GOSSIP_PEERS}; use polkadot_node_subsystem::{ @@ -43,12 +45,12 @@ use polkadot_node_subsystem::{ StatementDistributionMessage, }, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, PerLeafSpan, SpawnedSubsystem, - SubsystemError, + StatementDistributionSenderTrait, SubsystemError, }; use polkadot_primitives::v2::{ AuthorityDiscoveryId, CandidateHash, CommittedCandidateReceipt, CompactStatement, Hash, - SignedStatement, SigningContext, UncheckedSignedStatement, ValidatorId, ValidatorIndex, - ValidatorSignature, + Id as ParaId, OccupiedCoreAssumption, PersistedValidationData, SignedStatement, SigningContext, + UncheckedSignedStatement, ValidatorId, ValidatorIndex, ValidatorSignature, }; use futures::{ @@ -657,6 +659,8 @@ enum DeniedStatement { struct ActiveHeadData { /// All candidates we are aware of for this head, keyed by hash. candidates: HashSet, + /// Persisted validation data cache. + cached_validation_data: HashMap, /// Stored statements for circulation to peers. /// /// These are iterable in insertion order, and `Seconded` statements are always @@ -682,6 +686,7 @@ impl ActiveHeadData { ) -> Self { ActiveHeadData { candidates: Default::default(), + cached_validation_data: Default::default(), statements: Default::default(), waiting_large_statements: Default::default(), validators, @@ -691,6 +696,37 @@ impl ActiveHeadData { } } + async fn fetch_persisted_validation_data( + &mut self, + sender: &mut Sender, + relay_parent: Hash, + para_id: ParaId, + ) -> Result> + where + Sender: StatementDistributionSenderTrait, + { + if let Entry::Vacant(entry) = self.cached_validation_data.entry(para_id) { + let persisted_validation_data = + polkadot_node_subsystem_util::request_persisted_validation_data( + relay_parent, + para_id, + OccupiedCoreAssumption::Free, + sender, + ) + .await + .await + .map_err(Error::RuntimeApiUnavailable)? + .map_err(|err| Error::FetchPersistedValidationData(para_id, err))?; + + match persisted_validation_data { + Some(pvd) => entry.insert(pvd), + None => return Ok(None), + }; + } + + Ok(self.cached_validation_data.get(¶_id)) + } + /// Note the given statement. /// /// If it was not already known and can be accepted, returns `NotedStatement::Fresh`, @@ -1554,6 +1590,45 @@ async fn handle_incoming_message<'a, Context>( Ok(false) => {}, } + // TODO [https://github.com/paritytech/polkadot/issues/5055] + // + // For `Seconded` statements `None` or `Err` means we couldn't fetch the PVD, which + // means the statement shouldn't be accepted. + // + // In case of `Valid` we should have it cached prior, therefore this performs + // no Runtime API calls and always returns `Ok(Some(_))`. + if let Statement::Seconded(receipt) = statement.payload() { + let para_id = receipt.descriptor.para_id; + // Either call the Runtime API or check that validation data is cached. + let result = active_head + .fetch_persisted_validation_data(ctx.sender(), relay_parent, para_id) + .await; + if !matches!(result, Ok(Some(_))) { + return None + } + } + + // Extend the payload with persisted validation data required by the backing + // subsystem. + // + // Do it in advance before noting the statement because we don't want to borrow active + // head mutable and use the cache. + let statement_with_pvd = statement + .clone() + .convert_to_superpayload_with(|statement| match statement { + Statement::Seconded(receipt) => { + let para_id = &receipt.descriptor.para_id; + let persisted_validation_data = active_head + .cached_validation_data + .get(para_id) + .cloned() + .expect("pvd is ensured to be cached above; qed"); + StatementWithPVD::Seconded(receipt, persisted_validation_data) + }, + Statement::Valid(candidate_hash) => StatementWithPVD::Valid(candidate_hash), + }) + .expect("payload was checked with conversion from compact; qed"); + // Note: `peer_data.receive` already ensures that the statement is not an unbounded equivocation // or unpinned to a seconded candidate. So it is safe to place it into the storage. match active_head.note_statement(statement) { @@ -1567,11 +1642,8 @@ async fn handle_incoming_message<'a, Context>( // When we receive a new message from a peer, we forward it to the // candidate backing subsystem. - ctx.send_message(CandidateBackingMessage::Statement( - relay_parent, - statement.statement.clone(), - )) - .await; + ctx.send_message(CandidateBackingMessage::Statement(relay_parent, statement_with_pvd)) + .await; Some((relay_parent, statement)) }, diff --git a/node/network/statement-distribution/src/tests.rs b/node/network/statement-distribution/src/tests.rs index 9f5b4f6de326..8635dcec3a17 100644 --- a/node/network/statement-distribution/src/tests.rs +++ b/node/network/statement-distribution/src/tests.rs @@ -26,14 +26,16 @@ use polkadot_node_network_protocol::{ }, view, ObservedRole, }; -use polkadot_node_primitives::{Statement, UncheckedSignedFullStatement}; +use polkadot_node_primitives::{ + SignedFullStatementWithPVD, Statement, UncheckedSignedFullStatement, +}; use polkadot_node_subsystem::{ jaeger, messages::{network_bridge_event, AllMessages, RuntimeApiMessage, RuntimeApiRequest}, ActivatedLeaf, LeafStatus, }; use polkadot_node_subsystem_test_helpers::mock::make_ferdie_keystore; -use polkadot_primitives::v2::{Hash, SessionInfo, ValidationCode}; +use polkadot_primitives::v2::{Hash, HeadData, SessionInfo, ValidationCode}; use polkadot_primitives_test_helpers::{ dummy_committed_candidate_receipt, dummy_hash, AlwaysZeroRng, }; @@ -44,6 +46,27 @@ use sp_keyring::Sr25519Keyring; use sp_keystore::{CryptoStore, SyncCryptoStore, SyncCryptoStorePtr}; use std::{iter::FromIterator as _, sync::Arc, time::Duration}; +fn dummy_pvd() -> PersistedValidationData { + PersistedValidationData { + parent_head: HeadData(vec![7, 8, 9]), + relay_parent_number: 5, + max_pov_size: 1024, + relay_parent_storage_root: Default::default(), + } +} + +fn extend_statement_with_pvd( + statement: SignedFullStatement, + pvd: PersistedValidationData, +) -> SignedFullStatementWithPVD { + statement + .convert_to_superpayload_with(|statement| match statement { + Statement::Seconded(receipt) => StatementWithPVD::Seconded(receipt, pvd), + Statement::Valid(candidate_hash) => StatementWithPVD::Valid(candidate_hash), + }) + .unwrap() +} + #[test] fn active_head_accepts_only_2_seconded_per_validator() { let validators = vec![ @@ -699,12 +722,14 @@ fn circulated_statement_goes_to_all_peers_with_view() { #[test] fn receiving_from_one_sends_to_another_and_to_candidate_backing() { + const PARA_ID: ParaId = ParaId::new(1); let hash_a = Hash::repeat_byte(1); + let pvd = dummy_pvd(); let candidate = { let mut c = dummy_committed_candidate_receipt(dummy_hash()); c.descriptor.relay_parent = hash_a; - c.descriptor.para_id = 1.into(); + c.descriptor.para_id = PARA_ID; c }; @@ -845,18 +870,32 @@ fn receiving_from_one_sends_to_another_and_to_candidate_backing() { }) .await; + let statement_with_pvd = extend_statement_with_pvd(statement.clone(), pvd.clone()); + + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + hash, + RuntimeApiRequest::PersistedValidationData(para_id, assumption, tx), + )) if para_id == PARA_ID && + assumption == OccupiedCoreAssumption::Free && + hash == hash_a => + { + tx.send(Ok(Some(pvd))).unwrap(); + } + ); + assert_matches!( handle.recv().await, AllMessages::NetworkBridge( NetworkBridgeMessage::ReportPeer(p, r) ) if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST => {} ); - assert_matches!( handle.recv().await, AllMessages::CandidateBacking( CandidateBackingMessage::Statement(r, s) - ) if r == hash_a && s == statement => {} + ) if r == hash_a && s == statement_with_pvd => {} ); assert_matches!( @@ -885,6 +924,9 @@ fn receiving_from_one_sends_to_another_and_to_candidate_backing() { #[test] fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing() { + const PARA_ID: ParaId = ParaId::new(1); + let pvd = dummy_pvd(); + sp_tracing::try_init_simple(); let hash_a = Hash::repeat_byte(1); let hash_b = Hash::repeat_byte(2); @@ -892,7 +934,7 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( let candidate = { let mut c = dummy_committed_candidate_receipt(dummy_hash()); c.descriptor.relay_parent = hash_a; - c.descriptor.para_id = 1.into(); + c.descriptor.para_id = PARA_ID; c.commitments.new_validation_code = Some(ValidationCode(vec![1, 2, 3])); c }; @@ -1274,6 +1316,20 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( ) if p == peer_c && r == BENEFIT_VALID_RESPONSE => {} ); + let statement_with_pvd = extend_statement_with_pvd(statement.clone(), pvd.clone()); + + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + hash, + RuntimeApiRequest::PersistedValidationData(para_id, assumption, tx), + )) if para_id == PARA_ID && + assumption == OccupiedCoreAssumption::Free && + hash == hash_a => + { + tx.send(Ok(Some(pvd))).unwrap(); + } + ); assert_matches!( handle.recv().await, AllMessages::NetworkBridge( @@ -1285,7 +1341,7 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( handle.recv().await, AllMessages::CandidateBacking( CandidateBackingMessage::Statement(r, s) - ) if r == hash_a && s == statement => {} + ) if r == hash_a && s == statement_with_pvd => {} ); // Now messages should go out: @@ -1887,6 +1943,7 @@ fn peer_cant_flood_with_large_statements() { #[test] fn handle_multiple_seconded_statements() { let relay_parent_hash = Hash::repeat_byte(1); + let pvd = dummy_pvd(); let candidate = dummy_committed_candidate_receipt(relay_parent_hash); let candidate_hash = candidate.hash(); @@ -2086,6 +2143,18 @@ fn handle_multiple_seconded_statements() { }) .await; + let statement_with_pvd = extend_statement_with_pvd(statement.clone(), pvd.clone()); + + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::PersistedValidationData(_, assumption, tx), + )) if assumption == OccupiedCoreAssumption::Free => { + tx.send(Ok(Some(pvd.clone()))).unwrap(); + } + ); + assert_matches!( handle.recv().await, AllMessages::NetworkBridge( @@ -2103,7 +2172,7 @@ fn handle_multiple_seconded_statements() { CandidateBackingMessage::Statement(r, s) ) => { assert_eq!(r, relay_parent_hash); - assert_eq!(s, statement); + assert_eq!(s, statement_with_pvd); } ); @@ -2189,6 +2258,10 @@ fn handle_multiple_seconded_statements() { }) .await; + let statement_with_pvd = extend_statement_with_pvd(statement.clone(), pvd.clone()); + + // Persisted validation data is cached. + assert_matches!( handle.recv().await, AllMessages::NetworkBridge( @@ -2205,7 +2278,7 @@ fn handle_multiple_seconded_statements() { CandidateBackingMessage::Statement(r, s) ) => { assert_eq!(r, relay_parent_hash); - assert_eq!(s, statement); + assert_eq!(s, statement_with_pvd); } ); diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index 28c4e6c03fbe..9e95ca8af03e 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -467,12 +467,14 @@ pub struct Overseer { #[subsystem(CandidateBackingMessage, sends: [ CandidateValidationMessage, CollatorProtocolMessage, + ChainApiMessage, AvailabilityDistributionMessage, AvailabilityStoreMessage, StatementDistributionMessage, ProvisionerMessage, RuntimeApiMessage, DisputeCoordinatorMessage, + ProspectiveParachainsMessage, ])] candidate_backing: CandidateBacking, diff --git a/node/primitives/src/disputes/mod.rs b/node/primitives/src/disputes/mod.rs index 4b2d636dc10e..01293d2b64f0 100644 --- a/node/primitives/src/disputes/mod.rs +++ b/node/primitives/src/disputes/mod.rs @@ -19,10 +19,10 @@ use parity_scale_codec::{Decode, Encode}; use sp_application_crypto::AppKey; use sp_keystore::{CryptoStore, Error as KeystoreError, SyncCryptoStorePtr}; -use super::{Statement, UncheckedSignedFullStatement}; use polkadot_primitives::v2::{ - CandidateHash, CandidateReceipt, DisputeStatement, InvalidDisputeStatementKind, SessionIndex, - SigningContext, ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature, + CandidateHash, CandidateReceipt, CompactStatement, DisputeStatement, EncodeAs, + InvalidDisputeStatementKind, SessionIndex, SigningContext, UncheckedSigned, + ValidDisputeStatementKind, ValidatorId, ValidatorIndex, ValidatorSignature, }; /// `DisputeMessage` and related types. @@ -174,19 +174,23 @@ impl SignedDisputeStatement { /// along with the signing context. /// /// This does signature checks again with the data provided. - pub fn from_backing_statement( - backing_statement: &UncheckedSignedFullStatement, + pub fn from_backing_statement( + backing_statement: &UncheckedSigned, signing_context: SigningContext, validator_public: ValidatorId, - ) -> Result { - let (statement_kind, candidate_hash) = match backing_statement.unchecked_payload() { - Statement::Seconded(candidate) => ( + ) -> Result + where + for<'a> &'a T: Into, + T: EncodeAs, + { + let (statement_kind, candidate_hash) = match backing_statement.unchecked_payload().into() { + CompactStatement::Seconded(candidate_hash) => ( ValidDisputeStatementKind::BackingSeconded(signing_context.parent_hash), - candidate.hash(), + candidate_hash, ), - Statement::Valid(candidate_hash) => ( + CompactStatement::Valid(candidate_hash) => ( ValidDisputeStatementKind::BackingValid(signing_context.parent_hash), - *candidate_hash, + candidate_hash, ), }; diff --git a/node/primitives/src/lib.rs b/node/primitives/src/lib.rs index 882b75a0e81f..6f76732e35c4 100644 --- a/node/primitives/src/lib.rs +++ b/node/primitives/src/lib.rs @@ -194,6 +194,76 @@ impl EncodeAs for Statement { } } +/// A statement, exactly the same as [`Statement`] but where seconded messages carry +/// the [`PersistedValidationData`]. +#[derive(Clone, PartialEq, Eq)] +pub enum StatementWithPVD { + /// A statement that a validator seconds a candidate. + Seconded(CommittedCandidateReceipt, PersistedValidationData), + /// A statement that a validator has deemed a candidate valid. + Valid(CandidateHash), +} + +impl std::fmt::Debug for StatementWithPVD { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + StatementWithPVD::Seconded(seconded, _) => + write!(f, "Seconded: {:?}", seconded.descriptor), + StatementWithPVD::Valid(hash) => write!(f, "Valid: {:?}", hash), + } + } +} + +impl StatementWithPVD { + /// Get the candidate hash referenced by this statement. + /// + /// If this is a `Statement::Seconded`, this does hash the candidate receipt, which may be expensive + /// for large candidates. + pub fn candidate_hash(&self) -> CandidateHash { + match *self { + StatementWithPVD::Valid(ref h) => *h, + StatementWithPVD::Seconded(ref c, _) => c.hash(), + } + } + + /// Transform this statement into its compact version, which references only the hash + /// of the candidate. + pub fn to_compact(&self) -> CompactStatement { + match *self { + StatementWithPVD::Seconded(ref c, _) => CompactStatement::Seconded(c.hash()), + StatementWithPVD::Valid(hash) => CompactStatement::Valid(hash), + } + } + + /// Drop the [`PersistedValidationData`] from the statement. + pub fn drop_pvd(self) -> Statement { + match self { + StatementWithPVD::Seconded(c, _) => Statement::Seconded(c), + StatementWithPVD::Valid(c_h) => Statement::Valid(c_h), + } + } + + /// Drop the [`PersistedValidationData`] from the statement in a signed + /// variant. + pub fn drop_pvd_from_signed(signed: SignedFullStatementWithPVD) -> SignedFullStatement { + signed + .convert_to_superpayload_with(|s| s.drop_pvd()) + .expect("persisted_validation_data doesn't affect encoded_as; qed") + } +} + +impl From<&'_ StatementWithPVD> for CompactStatement { + fn from(stmt: &StatementWithPVD) -> Self { + stmt.to_compact() + } +} + +impl EncodeAs for StatementWithPVD { + fn encode_as(&self) -> Vec { + self.to_compact().encode() + } +} + /// A statement, the corresponding signature, and the index of the sender. /// /// Signing context and validator set should be apparent from context. @@ -205,6 +275,13 @@ pub type SignedFullStatement = Signed; /// Variant of `SignedFullStatement` where the signature has not yet been verified. pub type UncheckedSignedFullStatement = UncheckedSigned; +/// A statement, the corresponding signature, and the index of the sender. +/// +/// Seconded statements are accompanied by the [`PersistedValidationData`] +/// +/// Signing context and validator set should be apparent from context. +pub type SignedFullStatementWithPVD = Signed; + /// Candidate invalidity details #[derive(Debug)] pub enum InvalidCandidate { diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index db2bd89286b7..9652cff20ba7 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -36,7 +36,7 @@ use polkadot_node_primitives::{ approval::{BlockApprovalMeta, IndirectAssignmentCert, IndirectSignedApprovalVote}, AvailableData, BabeEpoch, BlockWeight, CandidateVotes, CollationGenerationConfig, CollationSecondedSignal, DisputeMessage, ErasureChunk, PoV, SignedDisputeStatement, - SignedFullStatement, ValidationResult, + SignedFullStatement, SignedFullStatementWithPVD, ValidationResult, }; use polkadot_primitives::{ v2::{ @@ -75,17 +75,17 @@ pub enum CandidateBackingMessage { GetBackedCandidates(Hash, Vec, oneshot::Sender>), /// Note that the Candidate Backing subsystem should second the given candidate in the context of the /// given relay-parent (ref. by hash). This candidate must be validated. - Second(Hash, CandidateReceipt, PoV), - /// Note a validator's statement about a particular candidate. Disagreements about validity must be escalated - /// to a broader check by Misbehavior Arbitration. Agreements are simply tallied until a quorum is reached. - Statement(Hash, SignedFullStatement), + Second(Hash, CandidateReceipt, PersistedValidationData, PoV), + /// Note a validator's statement about a particular candidate. + /// Agreements are simply tallied until a quorum is reached. + Statement(Hash, SignedFullStatementWithPVD), } impl BoundToRelayParent for CandidateBackingMessage { fn relay_parent(&self) -> Hash { match self { Self::GetBackedCandidates(hash, _, _) => *hash, - Self::Second(hash, _, _) => *hash, + Self::Second(hash, _, _, _) => *hash, Self::Statement(hash, _) => *hash, } } @@ -942,7 +942,7 @@ pub enum PvfCheckerMessage {} /// A request for the depths a hypothetical candidate would occupy within /// some fragment tree. -#[derive(Debug)] +#[derive(Debug, PartialEq, Eq, Clone, Copy)] pub struct HypotheticalDepthRequest { /// The hash of the potential candidate. pub candidate_hash: CandidateHash, @@ -956,6 +956,21 @@ pub struct HypotheticalDepthRequest { pub fragment_tree_relay_parent: Hash, } +/// A request for the persisted validation data stored in the prospective +/// parachains subsystem. +#[derive(Debug)] +pub struct ProspectiveValidationDataRequest { + /// The para-id of the candidate. + pub para_id: ParaId, + /// The relay-parent of the candidate. + pub candidate_relay_parent: Hash, + /// The parent head-data hash. + pub parent_head_data_hash: Hash, + /// The maximum POV size expected of this candidate. This should be + /// the maximum as configured during the session. + pub max_pov_size: u32, +} + /// Indicates the relay-parents whose fragment tree a candidate /// is present in and the depths of that tree the candidate is present in. pub type FragmentTreeMembership = Vec<(Hash, Vec)>; @@ -992,11 +1007,25 @@ pub enum ProspectiveParachainsMessage { GetHypotheticalDepth(HypotheticalDepthRequest, oneshot::Sender>), /// Get the membership of the candidate in all fragment trees. GetTreeMembership(ParaId, CandidateHash, oneshot::Sender), - /// Get the minimum accepted relay-parent number in the fragment tree - /// for the given relay-parent and para-id. + /// Get the minimum accepted relay-parent number for each para in the fragment tree + /// for the given relay-chain block hash. + /// + /// That is, if the block hash is known and is an active leaf, this returns the + /// minimum relay-parent block number in the same branch of the relay chain which + /// is accepted in the fragment tree for each para-id. + /// + /// If the block hash is not an active leaf, this will return an empty vector. /// - /// That is, if the relay-parent is known and there's a fragment tree for it, - /// in this para-id, this returns the minimum relay-parent block number in the - /// same chain which is accepted in the fragment tree for the para-id. - GetMinimumRelayParent(ParaId, Hash, oneshot::Sender>), + /// Para-IDs which are omitted from this list can be assumed to have no + /// valid candidate relay-parents under the given relay-chain block hash. + /// + /// Para-IDs are returned in no particular order. + GetMinimumRelayParents(Hash, oneshot::Sender>), + /// Get the validation data of some prospective candidate. The candidate doesn't need + /// to be part of any fragment tree, but this only succeeds if the parent head-data and + /// relay-parent are part of some fragment tree. + GetProspectiveValidationData( + ProspectiveValidationDataRequest, + oneshot::Sender>, + ), } diff --git a/node/subsystem-util/src/backing_implicit_view.rs b/node/subsystem-util/src/backing_implicit_view.rs new file mode 100644 index 000000000000..dc10efe519fe --- /dev/null +++ b/node/subsystem-util/src/backing_implicit_view.rs @@ -0,0 +1,687 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use futures::channel::oneshot; +use polkadot_node_subsystem::{ + errors::ChainApiError, + messages::{ChainApiMessage, ProspectiveParachainsMessage}, + SubsystemSender, +}; +use polkadot_primitives::vstaging::{BlockNumber, Hash, Id as ParaId}; + +use std::collections::HashMap; + +// Always aim to retain 1 block before the active leaves. +const MINIMUM_RETAIN_LENGTH: BlockNumber = 2; + +/// Handles the implicit view of the relay chain derived from the immediate view, which +/// is composed of active leaves, and the minimum relay-parents allowed for +/// candidates of various parachains at those leaves. +#[derive(Default, Clone)] +pub struct View { + leaves: HashMap, + block_info_storage: HashMap, +} + +// Minimum relay parents implicitly relative to a particular block. +#[derive(Debug, Clone)] +struct AllowedRelayParents { + // minimum relay parents can only be fetched for active leaves, + // so this will be empty for all blocks that haven't ever been + // witnessed as active leaves. + minimum_relay_parents: HashMap, + // Ancestry, in descending order, starting from the block hash itself down + // to and including the minimum of `minimum_relay_parents`. + allowed_relay_parents_contiguous: Vec, +} + +impl AllowedRelayParents { + fn allowed_relay_parents_for( + &self, + para_id: Option, + base_number: BlockNumber, + ) -> &[Hash] { + let para_id = match para_id { + None => return &self.allowed_relay_parents_contiguous[..], + Some(p) => p, + }; + + let para_min = match self.minimum_relay_parents.get(¶_id) { + Some(p) => *p, + None => return &[], + }; + + if base_number < para_min { + return &[] + } + + let diff = base_number - para_min; + + // difference of 0 should lead to slice len of 1 + let slice_len = ((diff + 1) as usize).min(self.allowed_relay_parents_contiguous.len()); + &self.allowed_relay_parents_contiguous[..slice_len] + } +} + +#[derive(Debug, Clone)] +struct ActiveLeafPruningInfo { + // The minimum block in the same branch of the relay-chain that should be + // preserved. + retain_minimum: BlockNumber, +} + +#[derive(Debug, Clone)] +struct BlockInfo { + block_number: BlockNumber, + // If this was previously an active leaf, this will be `Some` + // and is useful for understanding the views of peers in the network + // which may not be in perfect synchrony with our own view. + // + // If they are ahead of us in getting a new leaf, there's nothing we + // can do as it's an unrecognized block hash. But if they're behind us, + // it's useful for us to retain some information about previous leaves' + // implicit views so we can continue to send relevant messages to them + // until they catch up. + maybe_allowed_relay_parents: Option, + parent_hash: Hash, +} + +impl View { + /// Activate a leaf in the view. + /// This will request the minimum relay parents from the + /// Prospective Parachains subsystem for each leaf and will load headers in the ancestry of each + /// leaf in the view as needed. These are the 'implicit ancestors' of the leaf. + /// + /// To maximize reuse of outdated leaves, it's best to activate new leaves before + /// deactivating old ones. + /// + /// This returns a list of para-ids which are relevant to the leaf, + /// and the allowed relay parents for these paras under this leaf can be + /// queried with [`known_allowed_relay_parents_under`]. + /// + /// No-op for known leaves. + pub async fn activate_leaf( + &mut self, + sender: &mut Sender, + leaf_hash: Hash, + ) -> Result, FetchError> + where + Sender: SubsystemSender, + Sender: SubsystemSender, + { + if self.leaves.contains_key(&leaf_hash) { + return Err(FetchError::AlreadyKnown) + } + + let res = fetch_fresh_leaf_and_insert_ancestry( + leaf_hash, + &mut self.block_info_storage, + &mut *sender, + ) + .await; + + match res { + Ok(fetched) => { + // Retain at least `MINIMUM_RETAIN_LENGTH` blocks in storage. + // This helps to avoid Chain API calls when activating leaves in the + // same chain. + let retain_minimum = std::cmp::min( + fetched.minimum_ancestor_number, + fetched.leaf_number.saturating_sub(MINIMUM_RETAIN_LENGTH), + ); + + self.leaves.insert(leaf_hash, ActiveLeafPruningInfo { retain_minimum }); + + Ok(fetched.relevant_paras) + }, + Err(e) => Err(e), + } + } + + /// Deactivate a leaf in the view. This prunes any outdated implicit ancestors as well. + pub fn deactivate_leaf(&mut self, leaf_hash: Hash) { + if self.leaves.remove(&leaf_hash).is_none() { + return + } + + // Prune everything before the minimum out of all leaves, + // pruning absolutely everything if there are no leaves (empty view) + // + // Pruning by block number does leave behind orphaned forks slightly longer + // but the memory overhead is negligible. + { + let minimum = self.leaves.values().map(|l| l.retain_minimum).min(); + + self.block_info_storage + .retain(|_, i| minimum.map_or(false, |m| i.block_number >= m)); + } + } + + /// Get an iterator over all allowed relay-parents in the view with no particular order. + /// + /// **Important**: not all blocks are guaranteed to be allowed for some leaves, it may + /// happen that a block info is only kept in the view storage because of a retaining rule. + /// + /// For getting relay-parents that are valid for parachain candidates use + /// [`View::known_allowed_relay_parents_under`]. + pub fn all_allowed_relay_parents<'a>(&'a self) -> impl Iterator + 'a { + self.block_info_storage.keys() + } + + /// Get the known, allowed relay-parents that are valid for parachain candidates + /// which could be backed in a child of a given block for a given para ID. + /// + /// This is expressed as a contiguous slice of relay-chain block hashes which may + /// include the provided block hash itself. + /// + /// If `para_id` is `None`, this returns all valid relay-parents across all paras + /// for the leaf. + /// + /// `None` indicates that the block hash isn't part of the implicit view or that + /// there are no known allowed relay parents. + /// + /// This always returns `Some` for active leaves or for blocks that previously + /// were active leaves. + /// + /// This can return the empty slice, which indicates that no relay-parents are allowed + /// for the para, e.g. if the para is not scheduled at the given block hash. + pub fn known_allowed_relay_parents_under( + &self, + block_hash: &Hash, + para_id: Option, + ) -> Option<&[Hash]> { + let block_info = self.block_info_storage.get(block_hash)?; + block_info + .maybe_allowed_relay_parents + .as_ref() + .map(|mins| mins.allowed_relay_parents_for(para_id, block_info.block_number)) + } +} + +/// Errors when fetching a leaf and associated ancestry. +#[derive(Debug)] +pub enum FetchError { + /// Leaf was already known. + AlreadyKnown, + /// The prospective parachains subsystem was unavailable. + ProspectiveParachainsUnavailable, + /// A block header was unavailable. + BlockHeaderUnavailable(Hash, BlockHeaderUnavailableReason), + /// A block header was unavailable due to a chain API error. + ChainApiError(Hash, ChainApiError), + /// The chain API subsystem was unavailable. + ChainApiUnavailable, +} + +/// Reasons a block header might have been unavailable. +#[derive(Debug)] +pub enum BlockHeaderUnavailableReason { + /// Block header simply unknown. + Unknown, + /// Internal Chain API error. + Internal(ChainApiError), + /// The subsystem was unavailable. + SubsystemUnavailable, +} + +struct FetchSummary { + minimum_ancestor_number: BlockNumber, + leaf_number: BlockNumber, + relevant_paras: Vec, +} + +async fn fetch_fresh_leaf_and_insert_ancestry( + leaf_hash: Hash, + block_info_storage: &mut HashMap, + sender: &mut Sender, +) -> Result +where + Sender: SubsystemSender, + Sender: SubsystemSender, +{ + let min_relay_parents_raw = { + let (tx, rx) = oneshot::channel(); + sender + .send_message(ProspectiveParachainsMessage::GetMinimumRelayParents(leaf_hash, tx)) + .await; + + match rx.await { + Ok(m) => m, + Err(_) => return Err(FetchError::ProspectiveParachainsUnavailable), + } + }; + + let leaf_header = { + let (tx, rx) = oneshot::channel(); + sender.send_message(ChainApiMessage::BlockHeader(leaf_hash, tx)).await; + + match rx.await { + Ok(Ok(Some(header))) => header, + Ok(Ok(None)) => + return Err(FetchError::BlockHeaderUnavailable( + leaf_hash, + BlockHeaderUnavailableReason::Unknown, + )), + Ok(Err(e)) => + return Err(FetchError::BlockHeaderUnavailable( + leaf_hash, + BlockHeaderUnavailableReason::Internal(e), + )), + Err(_) => + return Err(FetchError::BlockHeaderUnavailable( + leaf_hash, + BlockHeaderUnavailableReason::SubsystemUnavailable, + )), + } + }; + + let min_min = min_relay_parents_raw.iter().map(|x| x.1).min().unwrap_or(leaf_header.number); + let relevant_paras = min_relay_parents_raw.iter().map(|x| x.0).collect(); + let expected_ancestry_len = (leaf_header.number.saturating_sub(min_min) as usize) + 1; + + let ancestry = if leaf_header.number > 0 { + let mut next_ancestor_number = leaf_header.number - 1; + let mut next_ancestor_hash = leaf_header.parent_hash; + + let mut ancestry = Vec::with_capacity(expected_ancestry_len); + ancestry.push(leaf_hash); + + // Ensure all ancestors up to and including `min_min` are in the + // block storage. When views advance incrementally, everything + // should already be present. + while next_ancestor_number >= min_min { + let parent_hash = if let Some(info) = block_info_storage.get(&next_ancestor_hash) { + info.parent_hash + } else { + // load the header and insert into block storage. + let (tx, rx) = oneshot::channel(); + sender.send_message(ChainApiMessage::BlockHeader(next_ancestor_hash, tx)).await; + + let header = match rx.await { + Ok(Ok(Some(header))) => header, + Ok(Ok(None)) => + return Err(FetchError::BlockHeaderUnavailable( + next_ancestor_hash, + BlockHeaderUnavailableReason::Unknown, + )), + Ok(Err(e)) => + return Err(FetchError::BlockHeaderUnavailable( + next_ancestor_hash, + BlockHeaderUnavailableReason::Internal(e), + )), + Err(_) => + return Err(FetchError::BlockHeaderUnavailable( + next_ancestor_hash, + BlockHeaderUnavailableReason::SubsystemUnavailable, + )), + }; + + block_info_storage.insert( + next_ancestor_hash, + BlockInfo { + block_number: next_ancestor_number, + parent_hash: header.parent_hash, + maybe_allowed_relay_parents: None, + }, + ); + + header.parent_hash + }; + + ancestry.push(next_ancestor_hash); + if next_ancestor_number == 0 { + break + } + + next_ancestor_number -= 1; + next_ancestor_hash = parent_hash; + } + + ancestry + } else { + Vec::new() + }; + + let fetched_ancestry = FetchSummary { + minimum_ancestor_number: min_min, + leaf_number: leaf_header.number, + relevant_paras, + }; + + let allowed_relay_parents = AllowedRelayParents { + minimum_relay_parents: min_relay_parents_raw.iter().cloned().collect(), + allowed_relay_parents_contiguous: ancestry, + }; + + let leaf_block_info = BlockInfo { + parent_hash: leaf_header.parent_hash, + block_number: leaf_header.number, + maybe_allowed_relay_parents: Some(allowed_relay_parents), + }; + + block_info_storage.insert(leaf_hash, leaf_block_info); + + Ok(fetched_ancestry) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::TimeoutExt; + use assert_matches::assert_matches; + use futures::future::{join, FutureExt}; + use polkadot_node_subsystem::AllMessages; + use polkadot_node_subsystem_test_helpers::{ + make_subsystem_context, TestSubsystemContextHandle, + }; + use polkadot_overseer::SubsystemContext; + use polkadot_primitives::v2::Header; + use sp_core::testing::TaskExecutor; + use std::time::Duration; + + const PARA_A: ParaId = ParaId::new(0); + const PARA_B: ParaId = ParaId::new(1); + const PARA_C: ParaId = ParaId::new(2); + + const GENESIS_HASH: Hash = Hash::repeat_byte(0xFF); + const GENESIS_NUMBER: BlockNumber = 0; + + // Chains A and B are forks of genesis. + + const CHAIN_A: &[Hash] = + &[Hash::repeat_byte(0x01), Hash::repeat_byte(0x02), Hash::repeat_byte(0x03)]; + + const CHAIN_B: &[Hash] = &[ + Hash::repeat_byte(0x04), + Hash::repeat_byte(0x05), + Hash::repeat_byte(0x06), + Hash::repeat_byte(0x07), + Hash::repeat_byte(0x08), + Hash::repeat_byte(0x09), + ]; + + type VirtualOverseer = TestSubsystemContextHandle; + + const TIMEOUT: Duration = Duration::from_secs(2); + + async fn overseer_recv(virtual_overseer: &mut VirtualOverseer) -> AllMessages { + virtual_overseer + .recv() + .timeout(TIMEOUT) + .await + .expect("overseer `recv` timed out") + } + + fn default_header() -> Header { + Header { + parent_hash: Hash::zero(), + number: 0, + state_root: Hash::zero(), + extrinsics_root: Hash::zero(), + digest: Default::default(), + } + } + + fn get_block_header(chain: &[Hash], hash: &Hash) -> Option
{ + let idx = chain.iter().position(|h| h == hash)?; + let parent_hash = idx.checked_sub(1).map(|i| chain[i]).unwrap_or(GENESIS_HASH); + let number = + if *hash == GENESIS_HASH { GENESIS_NUMBER } else { GENESIS_NUMBER + idx as u32 + 1 }; + Some(Header { parent_hash, number, ..default_header() }) + } + + async fn assert_block_header_requests( + virtual_overseer: &mut VirtualOverseer, + chain: &[Hash], + blocks: &[Hash], + ) { + for block in blocks.iter().rev() { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ChainApi( + ChainApiMessage::BlockHeader(hash, tx) + ) => { + assert_eq!(*block, hash, "unexpected block header request"); + let header = if block == &GENESIS_HASH { + Header { + number: GENESIS_NUMBER, + ..default_header() + } + } else { + get_block_header(chain, block).expect("unknown block") + }; + + tx.send(Ok(Some(header))).unwrap(); + } + ); + } + } + + async fn assert_min_relay_parents_request( + virtual_overseer: &mut VirtualOverseer, + leaf: &Hash, + response: Vec<(ParaId, u32)>, + ) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetMinimumRelayParents( + leaf_hash, + tx + ) + ) => { + assert_eq!(*leaf, leaf_hash, "received unexpected leaf hash"); + tx.send(response).unwrap(); + } + ); + } + + #[test] + fn construct_fresh_view() { + let pool = TaskExecutor::new(); + let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool); + + let mut view = View::default(); + + // Chain B. + const PARA_A_MIN_PARENT: u32 = 4; + const PARA_B_MIN_PARENT: u32 = 3; + + let prospective_response = vec![(PARA_A, PARA_A_MIN_PARENT), (PARA_B, PARA_B_MIN_PARENT)]; + + let leaf = CHAIN_B.last().unwrap(); + let min_min_idx = (PARA_B_MIN_PARENT - GENESIS_NUMBER - 1) as usize; + + let fut = view.activate_leaf(ctx.sender(), *leaf).timeout(TIMEOUT).map(|res| { + let paras = res.expect("`activate_leaf` timed out").unwrap(); + assert_eq!(paras, vec![PARA_A, PARA_B]); + }); + let overseer_fut = async { + assert_min_relay_parents_request(&mut ctx_handle, leaf, prospective_response).await; + assert_block_header_requests(&mut ctx_handle, CHAIN_B, &CHAIN_B[min_min_idx..]).await; + }; + futures::executor::block_on(join(fut, overseer_fut)); + + for i in min_min_idx..(CHAIN_B.len() - 1) { + // No allowed relay parents constructed for ancestry. + assert!(view.known_allowed_relay_parents_under(&CHAIN_B[i], None).is_none()); + } + + let leaf_info = + view.block_info_storage.get(leaf).expect("block must be present in storage"); + assert_matches!( + leaf_info.maybe_allowed_relay_parents, + Some(ref allowed_relay_parents) => { + assert_eq!(allowed_relay_parents.minimum_relay_parents[&PARA_A], PARA_A_MIN_PARENT); + assert_eq!(allowed_relay_parents.minimum_relay_parents[&PARA_B], PARA_B_MIN_PARENT); + let expected_ancestry: Vec = + CHAIN_B[min_min_idx..].iter().rev().copied().collect(); + assert_eq!( + allowed_relay_parents.allowed_relay_parents_contiguous, + expected_ancestry + ); + } + ); + + // Suppose the whole test chain A is allowed up to genesis for para C. + const PARA_C_MIN_PARENT: u32 = 0; + let prospective_response = vec![(PARA_C, PARA_C_MIN_PARENT)]; + let leaf = CHAIN_A.last().unwrap(); + let blocks = [&[GENESIS_HASH], CHAIN_A].concat(); + + let fut = view.activate_leaf(ctx.sender(), *leaf).timeout(TIMEOUT).map(|res| { + let paras = res.expect("`activate_leaf` timed out").unwrap(); + assert_eq!(paras, vec![PARA_C]); + }); + let overseer_fut = async { + assert_min_relay_parents_request(&mut ctx_handle, leaf, prospective_response).await; + assert_block_header_requests(&mut ctx_handle, CHAIN_A, &blocks).await; + }; + futures::executor::block_on(join(fut, overseer_fut)); + + assert_eq!(view.leaves.len(), 2); + } + + #[test] + fn reuse_block_info_storage() { + let pool = TaskExecutor::new(); + let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool); + + let mut view = View::default(); + + const PARA_A_MIN_PARENT: u32 = 1; + let leaf_a_number = 3; + let leaf_a = CHAIN_B[leaf_a_number - 1]; + let min_min_idx = (PARA_A_MIN_PARENT - GENESIS_NUMBER - 1) as usize; + + let prospective_response = vec![(PARA_A, PARA_A_MIN_PARENT)]; + + let fut = view.activate_leaf(ctx.sender(), leaf_a).timeout(TIMEOUT).map(|res| { + let paras = res.expect("`activate_leaf` timed out").unwrap(); + assert_eq!(paras, vec![PARA_A]); + }); + let overseer_fut = async { + assert_min_relay_parents_request(&mut ctx_handle, &leaf_a, prospective_response).await; + assert_block_header_requests( + &mut ctx_handle, + CHAIN_B, + &CHAIN_B[min_min_idx..leaf_a_number], + ) + .await; + }; + futures::executor::block_on(join(fut, overseer_fut)); + + // Blocks up to the 3rd are present in storage. + const PARA_B_MIN_PARENT: u32 = 2; + let leaf_b_number = 5; + let leaf_b = CHAIN_B[leaf_b_number - 1]; + + let prospective_response = vec![(PARA_B, PARA_B_MIN_PARENT)]; + + let fut = view.activate_leaf(ctx.sender(), leaf_b).timeout(TIMEOUT).map(|res| { + let paras = res.expect("`activate_leaf` timed out").unwrap(); + assert_eq!(paras, vec![PARA_B]); + }); + let overseer_fut = async { + assert_min_relay_parents_request(&mut ctx_handle, &leaf_b, prospective_response).await; + assert_block_header_requests( + &mut ctx_handle, + CHAIN_B, + &CHAIN_B[leaf_a_number..leaf_b_number], // Note the expected range. + ) + .await; + }; + futures::executor::block_on(join(fut, overseer_fut)); + + // Allowed relay parents for leaf A are preserved. + let leaf_a_info = + view.block_info_storage.get(&leaf_a).expect("block must be present in storage"); + assert_matches!( + leaf_a_info.maybe_allowed_relay_parents, + Some(ref allowed_relay_parents) => { + assert_eq!(allowed_relay_parents.minimum_relay_parents[&PARA_A], PARA_A_MIN_PARENT); + let expected_ancestry: Vec = + CHAIN_B[min_min_idx..leaf_a_number].iter().rev().copied().collect(); + let ancestry = view.known_allowed_relay_parents_under(&leaf_a, Some(PARA_A)).unwrap().to_vec(); + assert_eq!(ancestry, expected_ancestry); + } + ); + } + + #[test] + fn pruning() { + let pool = TaskExecutor::new(); + let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool); + + let mut view = View::default(); + + const PARA_A_MIN_PARENT: u32 = 3; + let leaf_a = CHAIN_B.iter().rev().nth(1).unwrap(); + let leaf_a_idx = CHAIN_B.len() - 2; + let min_a_idx = (PARA_A_MIN_PARENT - GENESIS_NUMBER - 1) as usize; + + let prospective_response = vec![(PARA_A, PARA_A_MIN_PARENT)]; + + let fut = view + .activate_leaf(ctx.sender(), *leaf_a) + .timeout(TIMEOUT) + .map(|res| res.unwrap().unwrap()); + let overseer_fut = async { + assert_min_relay_parents_request(&mut ctx_handle, &leaf_a, prospective_response).await; + assert_block_header_requests( + &mut ctx_handle, + CHAIN_B, + &CHAIN_B[min_a_idx..=leaf_a_idx], + ) + .await; + }; + futures::executor::block_on(join(fut, overseer_fut)); + + // Also activate a leaf with a lesser minimum relay parent. + const PARA_B_MIN_PARENT: u32 = 2; + let leaf_b = CHAIN_B.last().unwrap(); + let min_b_idx = (PARA_B_MIN_PARENT - GENESIS_NUMBER - 1) as usize; + + let prospective_response = vec![(PARA_B, PARA_B_MIN_PARENT)]; + // Headers will be requested for the minimum block and the leaf. + let blocks = &[CHAIN_B[min_b_idx], *leaf_b]; + + let fut = view + .activate_leaf(ctx.sender(), *leaf_b) + .timeout(TIMEOUT) + .map(|res| res.expect("`activate_leaf` timed out").unwrap()); + let overseer_fut = async { + assert_min_relay_parents_request(&mut ctx_handle, &leaf_b, prospective_response).await; + assert_block_header_requests(&mut ctx_handle, CHAIN_B, blocks).await; + }; + futures::executor::block_on(join(fut, overseer_fut)); + + // Prune implicit ancestor (no-op). + let block_info_len = view.block_info_storage.len(); + view.deactivate_leaf(CHAIN_B[leaf_a_idx - 1]); + assert_eq!(block_info_len, view.block_info_storage.len()); + + // Prune a leaf with a greater minimum relay parent. + view.deactivate_leaf(*leaf_b); + for hash in CHAIN_B.iter().take(PARA_B_MIN_PARENT as usize) { + assert!(!view.block_info_storage.contains_key(hash)); + } + + // Prune the last leaf. + view.deactivate_leaf(*leaf_a); + assert!(view.block_info_storage.is_empty()); + } +} diff --git a/node/subsystem-util/src/lib.rs b/node/subsystem-util/src/lib.rs index ef61400eb0f9..b30742e78aba 100644 --- a/node/subsystem-util/src/lib.rs +++ b/node/subsystem-util/src/lib.rs @@ -64,6 +64,10 @@ pub mod reexports { pub use polkadot_overseer::gen::{SpawnedSubsystem, Spawner, Subsystem, SubsystemContext}; } +/// A utility for managing the implicit view of the relay-chain derived from active +/// leaves and the minimum allowed relay-parents that parachain candidates can have +/// and be backed in those leaves' children. +pub mod backing_implicit_view; /// An emulator for node-side code to predict the results of on-chain parachain inclusion /// and predict future constraints. pub mod inclusion_emulator; diff --git a/primitives/src/v2/signed.rs b/primitives/src/v2/signed.rs index 28c3b790039f..bebc2c0208c9 100644 --- a/primitives/src/v2/signed.rs +++ b/primitives/src/v2/signed.rs @@ -157,7 +157,6 @@ impl, RealPayload: Encode> Signed Result, (Self, SuperPayload)> where SuperPayload: EncodeAs, - Payload: Encode, { if claimed.encode_as() == self.0.payload.encode_as() { Ok(Signed(UncheckedSigned { @@ -170,6 +169,34 @@ impl, RealPayload: Encode> Signed( + self, + convert: F, + ) -> Result, SuperPayload> + where + F: FnOnce(Payload) -> SuperPayload, + SuperPayload: EncodeAs, + { + let expected_encode_as = self.0.payload.encode_as(); + let converted = convert(self.0.payload); + if converted.encode_as() == expected_encode_as { + Ok(Signed(UncheckedSigned { + payload: converted, + validator_index: self.0.validator_index, + signature: self.0.signature, + real_payload: sp_std::marker::PhantomData, + })) + } else { + Err(converted) + } + } } // We can't bound this on `Payload: Into` because that conversion consumes diff --git a/runtime/parachains/src/runtime_api_impl/vstaging.rs b/runtime/parachains/src/runtime_api_impl/vstaging.rs index 8715cdc53121..edde48f4d984 100644 --- a/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -25,3 +25,7 @@ pub fn get_session_disputes( ) -> Vec<(SessionIndex, CandidateHash, DisputeState)> { >::disputes() } + +// TODO [now]: implicit `validity_constraints`. Ensure that `min_relay_parent` +// never goes lower than the point at which asynchronous backing was enabled. +// Also, never cross session boundaries. diff --git a/statement-table/src/generic.rs b/statement-table/src/generic.rs index d899c54d1d53..eb5def0cef44 100644 --- a/statement-table/src/generic.rs +++ b/statement-table/src/generic.rs @@ -61,6 +61,14 @@ pub trait Context { fn requisite_votes(&self, group: &Self::GroupId) -> usize; } +/// Table configuration. +pub struct Config { + /// When this is true, the table will allow multiple seconded candidates + /// per authority. This flag means that higher-level code is responsible for + /// bounding the number of candidates. + pub allow_multiple_seconded: bool, +} + /// Statements circulated among peers. #[derive(PartialEq, Eq, Debug, Clone, Encode, Decode)] pub enum Statement { @@ -270,12 +278,12 @@ impl CandidateData { // authority metadata struct AuthorityData { - proposal: Option<(Ctx::Digest, Ctx::Signature)>, + proposals: Vec<(Ctx::Digest, Ctx::Signature)>, } impl Default for AuthorityData { fn default() -> Self { - AuthorityData { proposal: None } + AuthorityData { proposals: Vec::new() } } } @@ -290,19 +298,20 @@ pub struct Table { authority_data: HashMap>, detected_misbehavior: HashMap>>, candidate_votes: HashMap>, + config: Config, } -impl Default for Table { - fn default() -> Self { +impl Table { + /// Create a new `Table` from a `Config`. + pub fn new(config: Config) -> Self { Table { - authority_data: HashMap::new(), - detected_misbehavior: HashMap::new(), - candidate_votes: HashMap::new(), + authority_data: HashMap::default(), + detected_misbehavior: HashMap::default(), + candidate_votes: HashMap::default(), + config, } } -} -impl Table { /// Get the attested candidate for `digest`. /// /// Returns `Some(_)` if the candidate exists and is includable. @@ -393,7 +402,9 @@ impl Table { // note misbehavior. let existing = occ.get_mut(); - if let Some((ref old_digest, ref old_sig)) = existing.proposal { + if !self.config.allow_multiple_seconded && existing.proposals.len() == 1 { + let &(ref old_digest, ref old_sig) = &existing.proposals[0]; + if old_digest != &digest { const EXISTENCE_PROOF: &str = "when proposal first received from authority, candidate \ @@ -413,15 +424,19 @@ impl Table { })) } + false + } else if self.config.allow_multiple_seconded && + existing.proposals.iter().find(|(ref od, _)| od == &digest).is_some() + { false } else { - existing.proposal = Some((digest.clone(), signature.clone())); + existing.proposals.push((digest.clone(), signature.clone())); true } }, Entry::Vacant(vacant) => { vacant - .insert(AuthorityData { proposal: Some((digest.clone(), signature.clone())) }); + .insert(AuthorityData { proposals: vec![(digest.clone(), signature.clone())] }); true }, }; @@ -571,8 +586,12 @@ mod tests { use super::*; use std::collections::HashMap; - fn create() -> Table { - Table::default() + fn create_single_seconded() -> Table { + Table::new(Config { allow_multiple_seconded: false }) + } + + fn create_many_seconded() -> Table { + Table::new(Config { allow_multiple_seconded: true }) } #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] @@ -630,7 +649,7 @@ mod tests { } #[test] - fn submitting_two_candidates_is_misbehavior() { + fn submitting_two_candidates_can_be_misbehavior() { let context = TestContext { authorities: { let mut map = HashMap::new(); @@ -639,7 +658,7 @@ mod tests { }, }; - let mut table = create(); + let mut table = create_single_seconded(); let statement_a = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), signature: Signature(1), @@ -665,6 +684,36 @@ mod tests { ); } + #[test] + fn submitting_two_candidates_can_be_allowed() { + let context = TestContext { + authorities: { + let mut map = HashMap::new(); + map.insert(AuthorityId(1), GroupId(2)); + map + }, + }; + + let mut table = create_many_seconded(); + let statement_a = SignedStatement { + statement: Statement::Seconded(Candidate(2, 100)), + signature: Signature(1), + sender: AuthorityId(1), + }; + + let statement_b = SignedStatement { + statement: Statement::Seconded(Candidate(2, 999)), + signature: Signature(1), + sender: AuthorityId(1), + }; + + table.import_statement(&context, statement_a); + assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); + + table.import_statement(&context, statement_b); + assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); + } + #[test] fn submitting_candidate_from_wrong_group_is_misbehavior() { let context = TestContext { @@ -675,7 +724,7 @@ mod tests { }, }; - let mut table = create(); + let mut table = create_single_seconded(); let statement = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), signature: Signature(1), @@ -707,7 +756,7 @@ mod tests { }, }; - let mut table = create(); + let mut table = create_single_seconded(); let candidate_a = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), @@ -751,7 +800,7 @@ mod tests { }, }; - let mut table = create(); + let mut table = create_single_seconded(); let statement = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), signature: Signature(1), @@ -781,7 +830,7 @@ mod tests { }, }; - let mut table = create(); + let mut table = create_single_seconded(); let statement = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), signature: Signature(1), @@ -849,7 +898,7 @@ mod tests { }; // have 2/3 validity guarantors note validity. - let mut table = create(); + let mut table = create_single_seconded(); let statement = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), signature: Signature(1), @@ -883,7 +932,7 @@ mod tests { }, }; - let mut table = create(); + let mut table = create_single_seconded(); let statement = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), signature: Signature(1), @@ -910,7 +959,7 @@ mod tests { }, }; - let mut table = create(); + let mut table = create_single_seconded(); let statement = SignedStatement { statement: Statement::Seconded(Candidate(2, 100)), signature: Signature(1), diff --git a/statement-table/src/lib.rs b/statement-table/src/lib.rs index a3fbbb1fdaaa..3bd586f09da9 100644 --- a/statement-table/src/lib.rs +++ b/statement-table/src/lib.rs @@ -16,7 +16,7 @@ pub mod generic; -pub use generic::{Context, Table}; +pub use generic::{Config, Context, Table}; /// Concrete instantiations suitable for v2 primitives. pub mod v2 { From 2c9296d7dca897c84328520fd2d78778e0c68b79 Mon Sep 17 00:00:00 2001 From: Chris Sosnin <48099298+slumber@users.noreply.github.com> Date: Wed, 13 Jul 2022 07:24:46 +0300 Subject: [PATCH 05/76] Track occupied depth in backing per parachain (#5778) --- node/core/backing/src/lib.rs | 39 +++- node/core/backing/src/tests/mod.rs | 14 +- .../src/tests/prospective_parachains.rs | 217 +++++++++++++++--- 3 files changed, 214 insertions(+), 56 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 9d5f521da1f8..2b619aca5a16 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -244,12 +244,13 @@ impl ProspectiveParachainsMode { struct ActiveLeafState { prospective_parachains_mode: ProspectiveParachainsMode, /// The candidates seconded at various depths under this active - /// leaf. A candidate can only be seconded when its hypothetical - /// depth under every active leaf has an empty entry in this map. + /// leaf with respect to parachain id. A candidate can only be + /// seconded when its hypothetical depth under every active leaf + /// has an empty entry in this map. /// /// When prospective parachains are disabled, the only depth /// which is allowed is 0. - seconded_at_depth: BTreeMap, + seconded_at_depth: HashMap>, } /// The state of the subsystem. @@ -869,7 +870,7 @@ async fn handle_active_leaves_update( // when prospective parachains are disabled is the leaf hash and 0, // respectively. We've just learned about the leaf hash, so we cannot // have any candidates seconded with it as a relay-parent yet. - seconded_at_depth: BTreeMap::new(), + seconded_at_depth: HashMap::new(), }, ); @@ -895,7 +896,8 @@ async fn handle_active_leaves_update( for (candidate_hash, para_id) in remaining_seconded { let (tx, rx) = oneshot::channel(); - membership_answers.push(rx.map_ok(move |membership| (candidate_hash, membership))); + membership_answers + .push(rx.map_ok(move |membership| (para_id, candidate_hash, membership))); ctx.send_message(ProspectiveParachainsMessage::GetTreeMembership( para_id, @@ -905,7 +907,7 @@ async fn handle_active_leaves_update( .await; } - let mut seconded_at_depth = BTreeMap::new(); + let mut seconded_at_depth = HashMap::new(); for response in membership_answers.next().await { match response { Err(oneshot::Canceled) => { @@ -916,15 +918,17 @@ async fn handle_active_leaves_update( continue }, - Ok((candidate_hash, membership)) => { + Ok((para_id, candidate_hash, membership)) => { // This request gives membership in all fragment trees. We have some // wasted data here, and it can be optimized if it proves // relevant to performance. if let Some((_, depths)) = membership.into_iter().find(|(leaf_hash, _)| leaf_hash == &leaf.hash) { + let para_entry: &mut BTreeMap = + seconded_at_depth.entry(para_id).or_default(); for depth in depths { - seconded_at_depth.insert(depth, candidate_hash); + para_entry.insert(depth, candidate_hash); } } }, @@ -1163,7 +1167,11 @@ async fn seconding_sanity_check( responses.push(rx.map_ok(move |depths| (depths, head, leaf_state)).boxed()); } else { if head == &candidate_relay_parent { - if leaf_state.seconded_at_depth.contains_key(&0) { + if leaf_state + .seconded_at_depth + .get(&candidate_para) + .map_or(false, |occupied| occupied.contains_key(&0)) + { // The leaf is already occupied. return SecondingAllowed::No } @@ -1188,7 +1196,11 @@ async fn seconding_sanity_check( }, Ok((depths, head, leaf_state)) => { for depth in &depths { - if leaf_state.seconded_at_depth.contains_key(&depth) { + if leaf_state + .seconded_at_depth + .get(&candidate_para) + .map_or(false, |occupied| occupied.contains_key(&depth)) + { gum::debug!( target: LOG_TARGET, ?candidate_hash, @@ -1323,8 +1335,13 @@ async fn handle_validated_candidate_command( Some(d) => d, }; + let seconded_at_depth = leaf_data + .seconded_at_depth + .entry(candidate.descriptor().para_id) + .or_default(); + for depth in depths { - leaf_data.seconded_at_depth.insert(depth, candidate_hash); + seconded_at_depth.insert(depth, candidate_hash); } } diff --git a/node/core/backing/src/tests/mod.rs b/node/core/backing/src/tests/mod.rs index 402462913749..59334fc179bc 100644 --- a/node/core/backing/src/tests/mod.rs +++ b/node/core/backing/src/tests/mod.rs @@ -32,8 +32,7 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::v2::{ - CandidateDescriptor, CollatorId, GroupRotationInfo, HeadData, PersistedValidationData, - ScheduledCore, + CandidateDescriptor, GroupRotationInfo, HeadData, PersistedValidationData, ScheduledCore, }; use sp_application_crypto::AppKey; use sp_keyring::Sr25519Keyring; @@ -90,9 +89,8 @@ impl Default for TestState { fn default() -> Self { let chain_a = ParaId::from(1); let chain_b = ParaId::from(2); - let thread_a = ParaId::from(3); - let chain_ids = vec![chain_a, chain_b, thread_a]; + let chain_ids = vec![chain_a, chain_b]; let validators = vec![ Sr25519Keyring::Alice, @@ -114,25 +112,21 @@ impl Default for TestState { let validator_public = validator_pubkeys(&validators); - let validator_groups = vec![vec![2, 0, 3, 5], vec![1], vec![4]] + let validator_groups = vec![vec![2, 0, 3, 5], vec![1]] .into_iter() .map(|g| g.into_iter().map(ValidatorIndex).collect()) .collect(); let group_rotation_info = GroupRotationInfo { session_start_block: 0, group_rotation_frequency: 100, now: 1 }; - let thread_collator: CollatorId = Sr25519Keyring::Two.public().into(); let availability_cores = vec![ CoreState::Scheduled(ScheduledCore { para_id: chain_a, collator: None }), CoreState::Scheduled(ScheduledCore { para_id: chain_b, collator: None }), - CoreState::Scheduled(ScheduledCore { - para_id: thread_a, - collator: Some(thread_collator.clone()), - }), ]; let mut head_data = HashMap::new(); head_data.insert(chain_a, HeadData(vec![4, 5, 6])); + head_data.insert(chain_b, HeadData(vec![5, 6, 7])); let relay_parent = Hash::repeat_byte(5); diff --git a/node/core/backing/src/tests/prospective_parachains.rs b/node/core/backing/src/tests/prospective_parachains.rs index 5be62b344980..749da6f10937 100644 --- a/node/core/backing/src/tests/prospective_parachains.rs +++ b/node/core/backing/src/tests/prospective_parachains.rs @@ -78,46 +78,49 @@ async fn activate_leaf( let ancestry_hashes = std::iter::successors(Some(leaf_hash), |h| Some(get_parent_hash(*h))) .take(ancestry_len as usize); let ancestry_numbers = (min_min..=leaf_number).rev(); - let mut ancestry_iter = ancestry_hashes.clone().zip(ancestry_numbers).peekable(); + let ancestry_iter = ancestry_hashes.zip(ancestry_numbers).peekable(); let mut next_overseer_message = None; // How many blocks were actually requested. let mut requested_len = 0; - loop { - let (hash, number) = match ancestry_iter.next() { - Some((hash, number)) => (hash, number), - None => break, - }; + { + let mut ancestry_iter = ancestry_iter.clone(); + loop { + let (hash, number) = match ancestry_iter.next() { + Some((hash, number)) => (hash, number), + None => break, + }; + + // May be `None` for the last element. + let parent_hash = + ancestry_iter.peek().map(|(h, _)| *h).unwrap_or_else(|| get_parent_hash(hash)); + + let msg = virtual_overseer.recv().await; + // It may happen that some blocks were cached by implicit view, + // reuse the message. + if !matches!(&msg, AllMessages::ChainApi(ChainApiMessage::BlockHeader(..))) { + next_overseer_message.replace(msg); + break + } - // May be `None` for the last element. - let parent_hash = - ancestry_iter.peek().map(|(h, _)| *h).unwrap_or_else(|| get_parent_hash(hash)); + assert_matches!( + msg, + AllMessages::ChainApi( + ChainApiMessage::BlockHeader(_hash, tx) + ) if _hash == hash => { + let header = Header { + parent_hash, + number, + state_root: Hash::zero(), + extrinsics_root: Hash::zero(), + digest: Default::default(), + }; - let msg = virtual_overseer.recv().await; - // It may happen that some blocks were cached by implicit view, - // reuse the message. - if !matches!(&msg, AllMessages::ChainApi(ChainApiMessage::BlockHeader(..))) { - next_overseer_message.replace(msg); - break + tx.send(Ok(Some(header))).unwrap(); + } + ); + requested_len += 1; } - - assert_matches!( - msg, - AllMessages::ChainApi( - ChainApiMessage::BlockHeader(_hash, tx) - ) if _hash == hash => { - let header = Header { - parent_hash, - number, - state_root: Hash::zero(), - extrinsics_root: Hash::zero(), - digest: Default::default(), - }; - - tx.send(Ok(Some(header))).unwrap(); - } - ); - requested_len += 1; } for _ in 0..seconded_in_view { @@ -135,7 +138,7 @@ async fn activate_leaf( ); } - for hash in ancestry_hashes.take(requested_len) { + for (hash, number) in ancestry_iter.take(requested_len) { // Check that subsystem job issues a request for a validator set. let msg = match next_overseer_message.take() { Some(msg) => msg, @@ -156,7 +159,9 @@ async fn activate_leaf( AllMessages::RuntimeApi( RuntimeApiMessage::Request(parent, RuntimeApiRequest::ValidatorGroups(tx)) ) if parent == hash => { - tx.send(Ok(test_state.validator_groups.clone())).unwrap(); + let (validator_groups, mut group_rotation_info) = test_state.validator_groups.clone(); + group_rotation_info.now = number; + tx.send(Ok((validator_groups, group_rotation_info))).unwrap(); } ); @@ -1350,3 +1355,145 @@ fn concurrent_dependent_candidates() { virtual_overseer }); } + +// Test that multiple candidates from different paras can occupy the same depth +// in a given relay parent. +#[test] +fn seconding_sanity_check_occupy_same_depth() { + let test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate `a` is seconded in a parent of the activated `leaf`. + const LEAF_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_DEPTH: BlockNumber = 3; + + let para_id_a = test_state.chain_ids[0]; + let para_id_b = test_state.chain_ids[1]; + + let leaf_hash = Hash::from_low_u64_be(130); + let leaf_parent = get_parent_hash(leaf_hash); + + let activated = ActivatedLeaf { + hash: leaf_hash, + number: LEAF_BLOCK_NUMBER, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + }; + + let min_block_number = LEAF_BLOCK_NUMBER - LEAF_DEPTH; + let min_relay_parents = vec![(para_id_a, min_block_number), (para_id_b, min_block_number)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data_a = test_state.head_data.get(¶_id_a).unwrap(); + let expected_head_data_b = test_state.head_data.get(¶_id_b).unwrap(); + + let pov_hash = pov.hash(); + let candidate_a = TestCandidateBuilder { + para_id: para_id_a, + relay_parent: leaf_parent, + pov_hash, + head_data: expected_head_data_a.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + ..Default::default() + }; + + let mut candidate_b = candidate_a.clone(); + candidate_b.para_id = para_id_b; + candidate_b.head_data = expected_head_data_b.clone(); + // A rotation happens, test validator is assigned to second para here. + candidate_b.relay_parent = leaf_hash; + + let candidate_a = (candidate_a.build(), expected_head_data_a, para_id_a); + let candidate_b = (candidate_b.build(), expected_head_data_b, para_id_b); + + for candidate in &[candidate_a, candidate_b] { + let (candidate, expected_head_data, para_id) = candidate; + let second = CandidateBackingMessage::Second( + leaf_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + candidate.descriptor().relay_parent, + &candidate, + &pov, + &pvd, + &validation_code, + *expected_head_data, + false, + ) + .await; + + // `seconding_sanity_check` + let expected_request_a = vec![( + HypotheticalDepthRequest { + candidate_hash: candidate.hash(), + candidate_para: *para_id, + parent_head_data_hash: pvd.parent_head.hash(), + candidate_relay_parent: candidate.descriptor().relay_parent, + fragment_tree_relay_parent: leaf_hash, + }, + vec![0, 1], // Send the same membership for both candidates. + )]; + + assert_hypothetical_depth_requests(&mut virtual_overseer, expected_request_a.clone()) + .await; + + // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateSeconded( + candidate_para, + candidate_receipt, + _pvd, + tx, + ), + ) if &candidate_receipt == candidate && candidate_para == *para_id && pvd == _pvd => { + // Any non-empty response will do. + tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); + } + ); + + test_dispute_coordinator_notifications( + &mut virtual_overseer, + candidate.hash(), + test_state.session(), + vec![ValidatorIndex(0)], + ) + .await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + _signed_statement, + ) + ) if parent_hash == candidate.descriptor().relay_parent => {} + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { + assert_eq!(candidate.descriptor().relay_parent, hash); + assert_matches!(statement.payload(), Statement::Seconded(_)); + } + ); + } + + virtual_overseer + }); +} From d63ecc848c57c4f94b8b5705bb6050327ef2e611 Mon Sep 17 00:00:00 2001 From: Chris Sosnin <48099298+slumber@users.noreply.github.com> Date: Wed, 7 Sep 2022 00:54:59 +0300 Subject: [PATCH 06/76] provisioner: async backing changes (#5711) * Provisioner changes for async backing * Select candidates based on prospective paras mode * Revert naming * Update tests * Update TODO comment * review --- node/core/provisioner/src/error.rs | 6 + node/core/provisioner/src/lib.rs | 215 ++++++++++++++++++++++++++--- node/core/provisioner/src/tests.rs | 153 +++++++++++++++++--- node/overseer/src/lib.rs | 1 + 4 files changed, 336 insertions(+), 39 deletions(-) diff --git a/node/core/provisioner/src/error.rs b/node/core/provisioner/src/error.rs index 4589ab02cf31..c472d9c311e2 100644 --- a/node/core/provisioner/src/error.rs +++ b/node/core/provisioner/src/error.rs @@ -46,6 +46,12 @@ pub enum Error { #[error("failed to get votes on dispute")] CanceledCandidateVotes(#[source] oneshot::Canceled), + #[error("failed to get backable candidate from prospective parachains")] + CanceledBackableCandidate(#[source] oneshot::Canceled), + + #[error("failed to get Runtime API version")] + CanceledRuntimeApiVersion(#[source] oneshot::Canceled), + #[error(transparent)] ChainApi(#[from] ChainApiError), diff --git a/node/core/provisioner/src/lib.rs b/node/core/provisioner/src/lib.rs index 66602ac60583..90b606caa7b6 100644 --- a/node/core/provisioner/src/lib.rs +++ b/node/core/provisioner/src/lib.rs @@ -29,8 +29,9 @@ use polkadot_node_primitives::CandidateVotes; use polkadot_node_subsystem::{ jaeger, messages::{ - CandidateBackingMessage, ChainApiMessage, DisputeCoordinatorMessage, ProvisionableData, - ProvisionerInherentData, ProvisionerMessage, + CandidateBackingMessage, ChainApiMessage, DisputeCoordinatorMessage, + ProspectiveParachainsMessage, ProvisionableData, ProvisionerInherentData, + ProvisionerMessage, RuntimeApiMessage, RuntimeApiRequest, }, overseer, ActivatedLeaf, ActiveLeavesUpdate, FromOrchestra, LeafStatus, OverseerSignal, PerLeafSpan, SpawnedSubsystem, SubsystemError, @@ -38,8 +39,8 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_util::{request_availability_cores, request_persisted_validation_data}; use polkadot_primitives::v2::{ BackedCandidate, BlockNumber, CandidateHash, CandidateReceipt, CoreState, DisputeState, - DisputeStatement, DisputeStatementSet, Hash, MultiDisputeStatementSet, OccupiedCoreAssumption, - SessionIndex, SignedAvailabilityBitfield, ValidatorIndex, + DisputeStatement, DisputeStatementSet, Hash, Id as ParaId, MultiDisputeStatementSet, + OccupiedCoreAssumption, SessionIndex, SignedAvailabilityBitfield, ValidatorIndex, }; use std::collections::{BTreeMap, HashMap, HashSet}; @@ -70,10 +71,21 @@ impl ProvisionerSubsystem { } } +#[derive(Debug, Clone)] +enum ProspectiveParachainsMode { + Enabled, + Disabled { + // Without prospective parachains it's necessary + // to track backed candidates to choose from when assembling + // a relay chain block. + backed_candidates: Vec, + }, +} + /// A per-relay-parent state for the provisioning subsystem. pub struct PerRelayParent { leaf: ActivatedLeaf, - backed_candidates: Vec, + prospective_parachains_mode: ProspectiveParachainsMode, signed_bitfields: Vec, is_inherent_ready: bool, awaiting_inherent: Vec>, @@ -81,12 +93,12 @@ pub struct PerRelayParent { } impl PerRelayParent { - fn new(leaf: ActivatedLeaf) -> Self { + fn new(leaf: ActivatedLeaf, prospective_parachains_mode: ProspectiveParachainsMode) -> Self { let span = PerLeafSpan::new(leaf.span.clone(), "provisioner"); Self { leaf, - backed_candidates: Vec::new(), + prospective_parachains_mode, signed_bitfields: Vec::new(), is_inherent_ready: false, awaiting_inherent: Vec::new(), @@ -141,7 +153,7 @@ async fn run_iteration( from_overseer = ctx.recv().fuse() => { match from_overseer? { FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => - handle_active_leaves_update(update, per_relay_parent, inherent_delays), + handle_active_leaves_update(ctx.sender(), update, per_relay_parent, inherent_delays).await?, FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => {}, FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()), FromOrchestra::Communication { msg } => { @@ -163,20 +175,55 @@ async fn run_iteration( } } -fn handle_active_leaves_update( +async fn prospective_parachains_mode( + sender: &mut impl overseer::ProvisionerSenderTrait, + leaf_hash: Hash, +) -> Result { + // TODO: call a Runtime API once staging version is available + // https://github.com/paritytech/substrate/discussions/11338 + // + // Implementation should probably be shared with backing. + + let (tx, rx) = oneshot::channel(); + sender + .send_message(RuntimeApiMessage::Request(leaf_hash, RuntimeApiRequest::Version(tx))) + .await; + + let version = rx.await.map_err(Error::CanceledRuntimeApiVersion)?.map_err(Error::Runtime)?; + + if version == 3 { + Ok(ProspectiveParachainsMode::Enabled) + } else { + if version != 2 { + gum::warn!( + target: LOG_TARGET, + "Runtime API version is {}, expected 2 or 3. Prospective parachains are disabled", + version + ); + } + Ok(ProspectiveParachainsMode::Disabled { backed_candidates: Vec::new() }) + } +} + +async fn handle_active_leaves_update( + sender: &mut impl overseer::ProvisionerSenderTrait, update: ActiveLeavesUpdate, per_relay_parent: &mut HashMap, inherent_delays: &mut InherentDelays, -) { +) -> Result<(), Error> { for deactivated in &update.deactivated { per_relay_parent.remove(deactivated); } for leaf in update.activated { + let prospective_parachains_mode = prospective_parachains_mode(sender, leaf.hash).await?; + let delay_fut = Delay::new(PRE_PROPOSE_TIMEOUT).map(move |_| leaf.hash).boxed(); - per_relay_parent.insert(leaf.hash, PerRelayParent::new(leaf)); + per_relay_parent.insert(leaf.hash, PerRelayParent::new(leaf, prospective_parachains_mode)); inherent_delays.push(delay_fut); } + + Ok(()) } #[overseer::contextbounds(Provisioner, prefix = self::overseer)] @@ -219,7 +266,7 @@ async fn send_inherent_data_bg( ) -> Result<(), Error> { let leaf = per_relay_parent.leaf.clone(); let signed_bitfields = per_relay_parent.signed_bitfields.clone(); - let backed_candidates = per_relay_parent.backed_candidates.clone(); + let prospective_parachains_mode = per_relay_parent.prospective_parachains_mode.clone(); let span = per_relay_parent.span.child("req-inherent-data"); let mut sender = ctx.sender().clone(); @@ -231,7 +278,7 @@ async fn send_inherent_data_bg( if let Err(err) = send_inherent_data( &leaf, &signed_bitfields, - &backed_candidates, + &prospective_parachains_mode, return_senders, &mut sender, &metrics, @@ -245,7 +292,6 @@ async fn send_inherent_data_bg( gum::debug!( target: LOG_TARGET, signed_bitfield_count = signed_bitfields.len(), - backed_candidates_count = backed_candidates.len(), leaf_hash = ?leaf.hash, "inherent data sent successfully" ); @@ -279,7 +325,11 @@ fn note_provisionable_data( .child("provisionable-backed") .with_candidate(candidate_hash) .with_para_id(backed_candidate.descriptor().para_id); - per_relay_parent.backed_candidates.push(backed_candidate) + if let ProspectiveParachainsMode::Disabled { backed_candidates } = + &mut per_relay_parent.prospective_parachains_mode + { + backed_candidates.push(backed_candidate) + } }, _ => {}, } @@ -307,7 +357,7 @@ type CoreAvailability = BitVec; async fn send_inherent_data( leaf: &ActivatedLeaf, bitfields: &[SignedAvailabilityBitfield], - candidates: &[CandidateReceipt], + prospective_parachains_mode: &ProspectiveParachainsMode, return_senders: Vec>, from_job: &mut impl overseer::ProvisionerSenderTrait, metrics: &Metrics, @@ -326,8 +376,14 @@ async fn send_inherent_data( select_availability_bitfields(&availability_cores, bitfields, &leaf.hash), LeafStatus::Stale => Vec::new(), }; - let candidates = - select_candidates(&availability_cores, &bitfields, candidates, leaf.hash, from_job).await?; + let candidates = select_candidates( + &availability_cores, + &bitfields, + prospective_parachains_mode, + leaf.hash, + from_job, + ) + .await?; gum::debug!( target: LOG_TARGET, @@ -422,14 +478,16 @@ fn select_availability_bitfields( selected.into_iter().map(|(_, b)| b).collect() } -/// Determine which cores are free, and then to the degree possible, pick a candidate appropriate to each free core. -async fn select_candidates( +/// Selects candidates from tracked ones to note in a relay chain block. +/// +/// Should be called when prospective parachains are disabled. +async fn select_candidate_hashes_from_tracked( availability_cores: &[CoreState], bitfields: &[SignedAvailabilityBitfield], candidates: &[CandidateReceipt], relay_parent: Hash, sender: &mut impl overseer::ProvisionerSenderTrait, -) -> Result, Error> { +) -> Result, Error> { let block_number = get_block_number_under_construction(relay_parent, sender).await?; let mut selected_candidates = @@ -503,6 +561,100 @@ async fn select_candidates( } } + Ok(selected_candidates) +} + +/// Requests backable candidates from Prospective Parachains subsystem +/// based on core states. +/// +/// Should be called when prospective parachains are enabled. +async fn request_backable_candidates( + availability_cores: &[CoreState], + bitfields: &[SignedAvailabilityBitfield], + relay_parent: Hash, + sender: &mut impl overseer::ProvisionerSenderTrait, +) -> Result, Error> { + let block_number = get_block_number_under_construction(relay_parent, sender).await?; + + let mut selected_candidates = Vec::with_capacity(availability_cores.len()); + + for (core_idx, core) in availability_cores.iter().enumerate() { + let (para_id, required_path) = match core { + CoreState::Scheduled(scheduled_core) => { + // The core is free, pick the first eligible candidate from + // the fragment tree. + (scheduled_core.para_id, Vec::new()) + }, + CoreState::Occupied(occupied_core) => { + if bitfields_indicate_availability(core_idx, bitfields, &occupied_core.availability) + { + if let Some(ref scheduled_core) = occupied_core.next_up_on_available { + // The candidate occupying the core is available, choose its + // child in the fragment tree. + // + // TODO: doesn't work for parathreads. We lean hard on the assumption + // that cores are fixed to specific parachains within a session. + // https://github.com/paritytech/polkadot/issues/5492 + (scheduled_core.para_id, vec![occupied_core.candidate_hash]) + } else { + continue + } + } else { + if occupied_core.time_out_at != block_number { + continue + } + if let Some(ref scheduled_core) = occupied_core.next_up_on_time_out { + // Candidate's availability timed out, practically same as scheduled. + (scheduled_core.para_id, Vec::new()) + } else { + continue + } + } + }, + CoreState::Free => continue, + }; + + let candidate_hash = + get_backable_candidate(relay_parent, para_id, required_path, sender).await?; + + match candidate_hash { + Some(hash) => selected_candidates.push(hash), + None => { + gum::debug!( + target: LOG_TARGET, + leaf_hash = ?relay_parent, + core = core_idx, + "No backable candidate returned by prospective parachains", + ); + }, + } + } + + Ok(selected_candidates) +} + +/// Determine which cores are free, and then to the degree possible, pick a candidate appropriate to each free core. +async fn select_candidates( + availability_cores: &[CoreState], + bitfields: &[SignedAvailabilityBitfield], + prospective_parachains_mode: &ProspectiveParachainsMode, + relay_parent: Hash, + sender: &mut impl overseer::ProvisionerSenderTrait, +) -> Result, Error> { + let selected_candidates = match prospective_parachains_mode { + ProspectiveParachainsMode::Enabled => + request_backable_candidates(availability_cores, bitfields, relay_parent, sender).await?, + ProspectiveParachainsMode::Disabled { backed_candidates } => + select_candidate_hashes_from_tracked( + availability_cores, + bitfields, + &backed_candidates, + relay_parent, + sender, + ) + .await?, + }; + // now get the backed candidates corresponding to these candidate receipts let (tx, rx) = oneshot::channel(); sender.send_unbounded_message(CandidateBackingMessage::GetBackedCandidates( @@ -571,6 +723,27 @@ async fn get_block_number_under_construction( } } +/// Requests backable candidate from Prospective Parachains based on +/// the given path in the fragment tree. +async fn get_backable_candidate( + relay_parent: Hash, + para_id: ParaId, + required_path: Vec, + sender: &mut impl overseer::ProvisionerSenderTrait, +) -> Result, Error> { + let (tx, rx) = oneshot::channel(); + sender + .send_message(ProspectiveParachainsMessage::GetBackableCandidate( + relay_parent, + para_id, + required_path, + tx, + )) + .await; + + rx.await.map_err(Error::CanceledBackableCandidate) +} + /// The availability bitfield for a given core is the transpose /// of a set of signed availability bitfields. It goes like this: /// diff --git a/node/core/provisioner/src/tests.rs b/node/core/provisioner/src/tests.rs index a58e22d7efc2..216d3841076e 100644 --- a/node/core/provisioner/src/tests.rs +++ b/node/core/provisioner/src/tests.rs @@ -316,13 +316,21 @@ mod select_candidates { ] } + enum TestProspectiveParachainsMode { + Enabled, + Disabled, + } + async fn mock_overseer( mut receiver: mpsc::UnboundedReceiver, expected: Vec, + prospective_parachains_mode: TestProspectiveParachainsMode, ) { use ChainApiMessage::BlockNumber; use RuntimeApiMessage::Request; + let mut candidates = expected.iter().map(BackedCandidate::hash); + while let Some(from_job) = receiver.next().await { match from_job { AllMessages::ChainApi(BlockNumber(_relay_parent, tx)) => @@ -335,11 +343,23 @@ mod select_candidates { tx.send(Ok(mock_availability_cores())).unwrap(), AllMessages::CandidateBacking(CandidateBackingMessage::GetBackedCandidates( _, - _, + hashes, sender, )) => { + let expected_hashes: Vec = + expected.iter().map(BackedCandidate::hash).collect(); + assert_eq!(expected_hashes, hashes); let _ = sender.send(expected.clone()); }, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetBackableCandidate(.., tx), + ) => match prospective_parachains_mode { + TestProspectiveParachainsMode::Enabled => { + let _ = tx.send(candidates.next()); + }, + TestProspectiveParachainsMode::Disabled => + panic!("unexpected prospective parachains request"), + }, _ => panic!("Unexpected message: {:?}", from_job), } } @@ -348,9 +368,19 @@ mod select_candidates { #[test] fn can_succeed() { test_harness( - |r| mock_overseer(r, Vec::new()), + |r| mock_overseer(r, Vec::new(), TestProspectiveParachainsMode::Disabled), |mut tx: TestSubsystemSender| async move { - select_candidates(&[], &[], &[], Default::default(), &mut tx).await.unwrap(); + let prospective_parachains_mode = + ProspectiveParachainsMode::Disabled { backed_candidates: Vec::new() }; + select_candidates( + &[], + &[], + &prospective_parachains_mode, + Default::default(), + &mut tx, + ) + .await + .unwrap(); }, ) } @@ -401,6 +431,8 @@ mod select_candidates { // why those particular indices? see the comments on mock_availability_cores() let expected_candidates: Vec<_> = [1, 4, 7, 8, 10].iter().map(|&idx| candidates[idx].clone()).collect(); + let prospective_parachains_mode = + ProspectiveParachainsMode::Disabled { backed_candidates: candidates }; let expected_backed = expected_candidates .iter() @@ -415,12 +447,17 @@ mod select_candidates { .collect(); test_harness( - |r| mock_overseer(r, expected_backed), + |r| mock_overseer(r, expected_backed, TestProspectiveParachainsMode::Disabled), |mut tx: TestSubsystemSender| async move { - let result = - select_candidates(&mock_cores, &[], &candidates, Default::default(), &mut tx) - .await - .unwrap(); + let result = select_candidates( + &mock_cores, + &[], + &prospective_parachains_mode, + Default::default(), + &mut tx, + ) + .await + .unwrap(); result.into_iter().for_each(|c| { assert!( @@ -442,9 +479,11 @@ mod select_candidates { // why those particular indices? see the comments on mock_availability_cores() // the first candidate with code is included out of [1, 4, 7, 8, 10]. - let cores = [1, 7, 10]; + let cores = [1, 4, 7, 8, 10]; let cores_with_code = [1, 4, 8]; + let expected_cores = [1, 7, 10]; + let committed_receipts: Vec<_> = (0..mock_cores.len()) .map(|i| { let mut descriptor = dummy_candidate_descriptor(dummy_hash()); @@ -465,26 +504,104 @@ mod select_candidates { .collect(); let candidates: Vec<_> = committed_receipts.iter().map(|r| r.to_plain()).collect(); + let backed_candidates: Vec<_> = committed_receipts + .iter() + .map(|committed_receipt| BackedCandidate { + candidate: committed_receipt.clone(), + validity_votes: Vec::new(), + validator_indices: default_bitvec(n_cores), + }) + .collect(); + + // First, provisioner will request backable candidates for each scheduled core. + // Then, some of them get filtered due to new validation code rule. + let expected_backed: Vec<_> = + cores.iter().map(|&idx| backed_candidates[idx].clone()).collect(); + let expected_backed_filtered: Vec<_> = + expected_cores.iter().map(|&idx| candidates[idx].clone()).collect(); + + let prospective_parachains_mode = + ProspectiveParachainsMode::Disabled { backed_candidates: candidates }; + + test_harness( + |r| mock_overseer(r, expected_backed, TestProspectiveParachainsMode::Disabled), + |mut tx: TestSubsystemSender| async move { + let result = select_candidates( + &mock_cores, + &[], + &prospective_parachains_mode, + Default::default(), + &mut tx, + ) + .await + .unwrap(); + + assert_eq!(result.len(), 3); + + result.into_iter().for_each(|c| { + assert!( + expected_backed_filtered.iter().any(|c2| c.candidate.corresponds_to(c2)), + "Failed to find candidate: {:?}", + c, + ) + }); + }, + ) + } + #[test] + fn request_from_prospective_parachains() { + let mock_cores = mock_availability_cores(); + let n_cores = mock_cores.len(); + + let empty_hash = PersistedValidationData::::default().hash(); + + let mut descriptor_template = dummy_candidate_descriptor(dummy_hash()); + descriptor_template.persisted_validation_data_hash = empty_hash; + let candidate_template = CandidateReceipt { + descriptor: descriptor_template, + commitments_hash: CandidateCommitments::default().hash(), + }; + + let candidates: Vec<_> = std::iter::repeat(candidate_template) + .take(mock_cores.len()) + .enumerate() + .map(|(idx, mut candidate)| { + candidate.descriptor.para_id = idx.into(); + candidate + }) + .collect(); + + // why those particular indices? see the comments on mock_availability_cores() let expected_candidates: Vec<_> = - cores.iter().map(|&idx| candidates[idx].clone()).collect(); + [1, 4, 7, 8, 10].iter().map(|&idx| candidates[idx].clone()).collect(); + // Expect prospective parachains subsystem requests. + let prospective_parachains_mode = ProspectiveParachainsMode::Enabled; - let expected_backed: Vec<_> = cores + let expected_backed = expected_candidates .iter() - .map(|&idx| BackedCandidate { - candidate: committed_receipts[idx].clone(), + .map(|c| BackedCandidate { + candidate: CommittedCandidateReceipt { + descriptor: c.descriptor.clone(), + commitments: Default::default(), + }, validity_votes: Vec::new(), validator_indices: default_bitvec(n_cores), }) .collect(); test_harness( - |r| mock_overseer(r, expected_backed), + |r| mock_overseer(r, expected_backed, TestProspectiveParachainsMode::Enabled), |mut tx: TestSubsystemSender| async move { - let result = - select_candidates(&mock_cores, &[], &candidates, Default::default(), &mut tx) - .await - .unwrap(); + let result = select_candidates( + &mock_cores, + &[], + &prospective_parachains_mode, + Default::default(), + &mut tx, + ) + .await + .unwrap(); result.into_iter().for_each(|c| { assert!( diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index 9e95ca8af03e..d66efcca8fce 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -520,6 +520,7 @@ pub struct Overseer { CandidateBackingMessage, ChainApiMessage, DisputeCoordinatorMessage, + ProspectiveParachainsMessage, ])] provisioner: Provisioner, From c61b949683e600f6187468bfef37cb6ba68b469a Mon Sep 17 00:00:00 2001 From: Chris Sosnin <48099298+slumber@users.noreply.github.com> Date: Wed, 7 Sep 2022 00:54:59 +0300 Subject: [PATCH 07/76] provisioner: async backing changes (#5711) * Provisioner changes for async backing * Select candidates based on prospective paras mode * Revert naming * Update tests * Update TODO comment * review --- node/core/provisioner/src/error.rs | 6 + node/core/provisioner/src/lib.rs | 216 ++++++++++++++++++++++++++--- node/core/provisioner/src/tests.rs | 153 +++++++++++++++++--- node/overseer/src/lib.rs | 1 + 4 files changed, 337 insertions(+), 39 deletions(-) diff --git a/node/core/provisioner/src/error.rs b/node/core/provisioner/src/error.rs index 05e437854eac..03eddeb67dd8 100644 --- a/node/core/provisioner/src/error.rs +++ b/node/core/provisioner/src/error.rs @@ -46,6 +46,12 @@ pub enum Error { #[error("failed to get votes on dispute")] CanceledCandidateVotes(#[source] oneshot::Canceled), + #[error("failed to get backable candidate from prospective parachains")] + CanceledBackableCandidate(#[source] oneshot::Canceled), + + #[error("failed to get Runtime API version")] + CanceledRuntimeApiVersion(#[source] oneshot::Canceled), + #[error(transparent)] ChainApi(#[from] ChainApiError), diff --git a/node/core/provisioner/src/lib.rs b/node/core/provisioner/src/lib.rs index 0f3099c7df33..eacedb8c45fc 100644 --- a/node/core/provisioner/src/lib.rs +++ b/node/core/provisioner/src/lib.rs @@ -29,8 +29,9 @@ use polkadot_node_primitives::CandidateVotes; use polkadot_node_subsystem::{ jaeger, messages::{ - CandidateBackingMessage, ChainApiMessage, DisputeCoordinatorMessage, ProvisionableData, - ProvisionerInherentData, ProvisionerMessage, + CandidateBackingMessage, ChainApiMessage, DisputeCoordinatorMessage, + ProspectiveParachainsMessage, ProvisionableData, ProvisionerInherentData, + ProvisionerMessage, RuntimeApiMessage, RuntimeApiRequest, }, overseer, ActivatedLeaf, ActiveLeavesUpdate, FromOrchestra, LeafStatus, OverseerSignal, PerLeafSpan, SpawnedSubsystem, SubsystemError, @@ -40,8 +41,8 @@ use polkadot_node_subsystem_util::{ }; use polkadot_primitives::v2::{ BackedCandidate, BlockNumber, CandidateHash, CandidateReceipt, CoreState, DisputeState, - DisputeStatement, DisputeStatementSet, Hash, MultiDisputeStatementSet, OccupiedCoreAssumption, - SessionIndex, SignedAvailabilityBitfield, ValidatorIndex, + DisputeStatement, DisputeStatementSet, Hash, Id as ParaId, MultiDisputeStatementSet, + OccupiedCoreAssumption, SessionIndex, SignedAvailabilityBitfield, ValidatorIndex, }; use std::collections::{BTreeMap, HashMap, HashSet}; @@ -74,10 +75,21 @@ impl ProvisionerSubsystem { } } +#[derive(Debug, Clone)] +enum ProspectiveParachainsMode { + Enabled, + Disabled { + // Without prospective parachains it's necessary + // to track backed candidates to choose from when assembling + // a relay chain block. + backed_candidates: Vec, + }, +} + /// A per-relay-parent state for the provisioning subsystem. pub struct PerRelayParent { leaf: ActivatedLeaf, - backed_candidates: Vec, + prospective_parachains_mode: ProspectiveParachainsMode, signed_bitfields: Vec, is_inherent_ready: bool, awaiting_inherent: Vec>, @@ -85,12 +97,12 @@ pub struct PerRelayParent { } impl PerRelayParent { - fn new(leaf: ActivatedLeaf) -> Self { + fn new(leaf: ActivatedLeaf, prospective_parachains_mode: ProspectiveParachainsMode) -> Self { let span = PerLeafSpan::new(leaf.span.clone(), "provisioner"); Self { leaf, - backed_candidates: Vec::new(), + prospective_parachains_mode, signed_bitfields: Vec::new(), is_inherent_ready: false, awaiting_inherent: Vec::new(), @@ -145,7 +157,7 @@ async fn run_iteration( from_overseer = ctx.recv().fuse() => { match from_overseer? { FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => - handle_active_leaves_update(update, per_relay_parent, inherent_delays), + handle_active_leaves_update(ctx.sender(), update, per_relay_parent, inherent_delays).await?, FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => {}, FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()), FromOrchestra::Communication { msg } => { @@ -173,20 +185,55 @@ async fn run_iteration( } } -fn handle_active_leaves_update( +async fn prospective_parachains_mode( + sender: &mut impl overseer::ProvisionerSenderTrait, + leaf_hash: Hash, +) -> Result { + // TODO: call a Runtime API once staging version is available + // https://github.com/paritytech/substrate/discussions/11338 + // + // Implementation should probably be shared with backing. + + let (tx, rx) = oneshot::channel(); + sender + .send_message(RuntimeApiMessage::Request(leaf_hash, RuntimeApiRequest::Version(tx))) + .await; + + let version = rx.await.map_err(Error::CanceledRuntimeApiVersion)?.map_err(Error::Runtime)?; + + if version == 3 { + Ok(ProspectiveParachainsMode::Enabled) + } else { + if version != 2 { + gum::warn!( + target: LOG_TARGET, + "Runtime API version is {}, expected 2 or 3. Prospective parachains are disabled", + version + ); + } + Ok(ProspectiveParachainsMode::Disabled { backed_candidates: Vec::new() }) + } +} + +async fn handle_active_leaves_update( + sender: &mut impl overseer::ProvisionerSenderTrait, update: ActiveLeavesUpdate, per_relay_parent: &mut HashMap, inherent_delays: &mut InherentDelays, -) { +) -> Result<(), Error> { for deactivated in &update.deactivated { per_relay_parent.remove(deactivated); } for leaf in update.activated { + let prospective_parachains_mode = prospective_parachains_mode(sender, leaf.hash).await?; + let delay_fut = Delay::new(PRE_PROPOSE_TIMEOUT).map(move |_| leaf.hash).boxed(); - per_relay_parent.insert(leaf.hash, PerRelayParent::new(leaf)); + per_relay_parent.insert(leaf.hash, PerRelayParent::new(leaf, prospective_parachains_mode)); inherent_delays.push(delay_fut); } + + Ok(()) } #[overseer::contextbounds(Provisioner, prefix = self::overseer)] @@ -239,7 +286,7 @@ async fn send_inherent_data_bg( ) -> Result<(), Error> { let leaf = per_relay_parent.leaf.clone(); let signed_bitfields = per_relay_parent.signed_bitfields.clone(); - let backed_candidates = per_relay_parent.backed_candidates.clone(); + let prospective_parachains_mode = per_relay_parent.prospective_parachains_mode.clone(); let span = per_relay_parent.span.child("req-inherent-data"); let mut sender = ctx.sender().clone(); @@ -257,7 +304,7 @@ async fn send_inherent_data_bg( let send_result = send_inherent_data( &leaf, &signed_bitfields, - &backed_candidates, + &prospective_parachains_mode, return_senders, &mut sender, &metrics, @@ -278,7 +325,6 @@ async fn send_inherent_data_bg( gum::debug!( target: LOG_TARGET, signed_bitfield_count = signed_bitfields.len(), - backed_candidates_count = backed_candidates.len(), leaf_hash = ?leaf.hash, "inherent data sent successfully" ); @@ -313,7 +359,11 @@ fn note_provisionable_data( .child("provisionable-backed") .with_candidate(candidate_hash) .with_para_id(backed_candidate.descriptor().para_id); - per_relay_parent.backed_candidates.push(backed_candidate) + if let ProspectiveParachainsMode::Disabled { backed_candidates } = + &mut per_relay_parent.prospective_parachains_mode + { + backed_candidates.push(backed_candidate) + } }, _ => {}, } @@ -341,7 +391,7 @@ type CoreAvailability = BitVec; async fn send_inherent_data( leaf: &ActivatedLeaf, bitfields: &[SignedAvailabilityBitfield], - candidates: &[CandidateReceipt], + prospective_parachains_mode: &ProspectiveParachainsMode, return_senders: Vec>, from_job: &mut impl overseer::ProvisionerSenderTrait, metrics: &Metrics, @@ -381,8 +431,15 @@ async fn send_inherent_data( relay_parent = ?leaf.hash, "Selected bitfields" ); - let candidates = - select_candidates(&availability_cores, &bitfields, candidates, leaf.hash, from_job).await?; + + let candidates = select_candidates( + &availability_cores, + &bitfields, + prospective_parachains_mode, + leaf.hash, + from_job, + ) + .await?; gum::trace!( target: LOG_TARGET, @@ -489,14 +546,16 @@ fn select_availability_bitfields( selected.into_iter().map(|(_, b)| b).collect() } -/// Determine which cores are free, and then to the degree possible, pick a candidate appropriate to each free core. -async fn select_candidates( +/// Selects candidates from tracked ones to note in a relay chain block. +/// +/// Should be called when prospective parachains are disabled. +async fn select_candidate_hashes_from_tracked( availability_cores: &[CoreState], bitfields: &[SignedAvailabilityBitfield], candidates: &[CandidateReceipt], relay_parent: Hash, sender: &mut impl overseer::ProvisionerSenderTrait, -) -> Result, Error> { +) -> Result, Error> { let block_number = get_block_number_under_construction(relay_parent, sender).await?; let mut selected_candidates = @@ -570,6 +629,100 @@ async fn select_candidates( } } + Ok(selected_candidates) +} + +/// Requests backable candidates from Prospective Parachains subsystem +/// based on core states. +/// +/// Should be called when prospective parachains are enabled. +async fn request_backable_candidates( + availability_cores: &[CoreState], + bitfields: &[SignedAvailabilityBitfield], + relay_parent: Hash, + sender: &mut impl overseer::ProvisionerSenderTrait, +) -> Result, Error> { + let block_number = get_block_number_under_construction(relay_parent, sender).await?; + + let mut selected_candidates = Vec::with_capacity(availability_cores.len()); + + for (core_idx, core) in availability_cores.iter().enumerate() { + let (para_id, required_path) = match core { + CoreState::Scheduled(scheduled_core) => { + // The core is free, pick the first eligible candidate from + // the fragment tree. + (scheduled_core.para_id, Vec::new()) + }, + CoreState::Occupied(occupied_core) => { + if bitfields_indicate_availability(core_idx, bitfields, &occupied_core.availability) + { + if let Some(ref scheduled_core) = occupied_core.next_up_on_available { + // The candidate occupying the core is available, choose its + // child in the fragment tree. + // + // TODO: doesn't work for parathreads. We lean hard on the assumption + // that cores are fixed to specific parachains within a session. + // https://github.com/paritytech/polkadot/issues/5492 + (scheduled_core.para_id, vec![occupied_core.candidate_hash]) + } else { + continue + } + } else { + if occupied_core.time_out_at != block_number { + continue + } + if let Some(ref scheduled_core) = occupied_core.next_up_on_time_out { + // Candidate's availability timed out, practically same as scheduled. + (scheduled_core.para_id, Vec::new()) + } else { + continue + } + } + }, + CoreState::Free => continue, + }; + + let candidate_hash = + get_backable_candidate(relay_parent, para_id, required_path, sender).await?; + + match candidate_hash { + Some(hash) => selected_candidates.push(hash), + None => { + gum::debug!( + target: LOG_TARGET, + leaf_hash = ?relay_parent, + core = core_idx, + "No backable candidate returned by prospective parachains", + ); + }, + } + } + + Ok(selected_candidates) +} + +/// Determine which cores are free, and then to the degree possible, pick a candidate appropriate to each free core. +async fn select_candidates( + availability_cores: &[CoreState], + bitfields: &[SignedAvailabilityBitfield], + prospective_parachains_mode: &ProspectiveParachainsMode, + relay_parent: Hash, + sender: &mut impl overseer::ProvisionerSenderTrait, +) -> Result, Error> { + let selected_candidates = match prospective_parachains_mode { + ProspectiveParachainsMode::Enabled => + request_backable_candidates(availability_cores, bitfields, relay_parent, sender).await?, + ProspectiveParachainsMode::Disabled { backed_candidates } => + select_candidate_hashes_from_tracked( + availability_cores, + bitfields, + &backed_candidates, + relay_parent, + sender, + ) + .await?, + }; + // now get the backed candidates corresponding to these candidate receipts let (tx, rx) = oneshot::channel(); sender.send_unbounded_message(CandidateBackingMessage::GetBackedCandidates( @@ -638,6 +791,27 @@ async fn get_block_number_under_construction( } } +/// Requests backable candidate from Prospective Parachains based on +/// the given path in the fragment tree. +async fn get_backable_candidate( + relay_parent: Hash, + para_id: ParaId, + required_path: Vec, + sender: &mut impl overseer::ProvisionerSenderTrait, +) -> Result, Error> { + let (tx, rx) = oneshot::channel(); + sender + .send_message(ProspectiveParachainsMessage::GetBackableCandidate( + relay_parent, + para_id, + required_path, + tx, + )) + .await; + + rx.await.map_err(Error::CanceledBackableCandidate) +} + /// The availability bitfield for a given core is the transpose /// of a set of signed availability bitfields. It goes like this: /// diff --git a/node/core/provisioner/src/tests.rs b/node/core/provisioner/src/tests.rs index d0ca425210ed..2d9ac5a14454 100644 --- a/node/core/provisioner/src/tests.rs +++ b/node/core/provisioner/src/tests.rs @@ -316,13 +316,21 @@ mod select_candidates { ] } + enum TestProspectiveParachainsMode { + Enabled, + Disabled, + } + async fn mock_overseer( mut receiver: mpsc::UnboundedReceiver, expected: Vec, + prospective_parachains_mode: TestProspectiveParachainsMode, ) { use ChainApiMessage::BlockNumber; use RuntimeApiMessage::Request; + let mut candidates = expected.iter().map(BackedCandidate::hash); + while let Some(from_job) = receiver.next().await { match from_job { AllMessages::ChainApi(BlockNumber(_relay_parent, tx)) => @@ -335,11 +343,23 @@ mod select_candidates { tx.send(Ok(mock_availability_cores())).unwrap(), AllMessages::CandidateBacking(CandidateBackingMessage::GetBackedCandidates( _, - _, + hashes, sender, )) => { + let expected_hashes: Vec = + expected.iter().map(BackedCandidate::hash).collect(); + assert_eq!(expected_hashes, hashes); let _ = sender.send(expected.clone()); }, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetBackableCandidate(.., tx), + ) => match prospective_parachains_mode { + TestProspectiveParachainsMode::Enabled => { + let _ = tx.send(candidates.next()); + }, + TestProspectiveParachainsMode::Disabled => + panic!("unexpected prospective parachains request"), + }, _ => panic!("Unexpected message: {:?}", from_job), } } @@ -348,9 +368,19 @@ mod select_candidates { #[test] fn can_succeed() { test_harness( - |r| mock_overseer(r, Vec::new()), + |r| mock_overseer(r, Vec::new(), TestProspectiveParachainsMode::Disabled), |mut tx: TestSubsystemSender| async move { - select_candidates(&[], &[], &[], Default::default(), &mut tx).await.unwrap(); + let prospective_parachains_mode = + ProspectiveParachainsMode::Disabled { backed_candidates: Vec::new() }; + select_candidates( + &[], + &[], + &prospective_parachains_mode, + Default::default(), + &mut tx, + ) + .await + .unwrap(); }, ) } @@ -401,6 +431,8 @@ mod select_candidates { // why those particular indices? see the comments on mock_availability_cores() let expected_candidates: Vec<_> = [1, 4, 7, 8, 10].iter().map(|&idx| candidates[idx].clone()).collect(); + let prospective_parachains_mode = + ProspectiveParachainsMode::Disabled { backed_candidates: candidates }; let expected_backed = expected_candidates .iter() @@ -415,12 +447,17 @@ mod select_candidates { .collect(); test_harness( - |r| mock_overseer(r, expected_backed), + |r| mock_overseer(r, expected_backed, TestProspectiveParachainsMode::Disabled), |mut tx: TestSubsystemSender| async move { - let result = - select_candidates(&mock_cores, &[], &candidates, Default::default(), &mut tx) - .await - .unwrap(); + let result = select_candidates( + &mock_cores, + &[], + &prospective_parachains_mode, + Default::default(), + &mut tx, + ) + .await + .unwrap(); result.into_iter().for_each(|c| { assert!( @@ -442,9 +479,11 @@ mod select_candidates { // why those particular indices? see the comments on mock_availability_cores() // the first candidate with code is included out of [1, 4, 7, 8, 10]. - let cores = [1, 7, 10]; + let cores = [1, 4, 7, 8, 10]; let cores_with_code = [1, 4, 8]; + let expected_cores = [1, 7, 10]; + let committed_receipts: Vec<_> = (0..mock_cores.len()) .map(|i| { let mut descriptor = dummy_candidate_descriptor(dummy_hash()); @@ -465,26 +504,104 @@ mod select_candidates { .collect(); let candidates: Vec<_> = committed_receipts.iter().map(|r| r.to_plain()).collect(); + let backed_candidates: Vec<_> = committed_receipts + .iter() + .map(|committed_receipt| BackedCandidate { + candidate: committed_receipt.clone(), + validity_votes: Vec::new(), + validator_indices: default_bitvec(n_cores), + }) + .collect(); + + // First, provisioner will request backable candidates for each scheduled core. + // Then, some of them get filtered due to new validation code rule. + let expected_backed: Vec<_> = + cores.iter().map(|&idx| backed_candidates[idx].clone()).collect(); + let expected_backed_filtered: Vec<_> = + expected_cores.iter().map(|&idx| candidates[idx].clone()).collect(); + + let prospective_parachains_mode = + ProspectiveParachainsMode::Disabled { backed_candidates: candidates }; + + test_harness( + |r| mock_overseer(r, expected_backed, TestProspectiveParachainsMode::Disabled), + |mut tx: TestSubsystemSender| async move { + let result = select_candidates( + &mock_cores, + &[], + &prospective_parachains_mode, + Default::default(), + &mut tx, + ) + .await + .unwrap(); + + assert_eq!(result.len(), 3); + + result.into_iter().for_each(|c| { + assert!( + expected_backed_filtered.iter().any(|c2| c.candidate.corresponds_to(c2)), + "Failed to find candidate: {:?}", + c, + ) + }); + }, + ) + } + #[test] + fn request_from_prospective_parachains() { + let mock_cores = mock_availability_cores(); + let n_cores = mock_cores.len(); + + let empty_hash = PersistedValidationData::::default().hash(); + + let mut descriptor_template = dummy_candidate_descriptor(dummy_hash()); + descriptor_template.persisted_validation_data_hash = empty_hash; + let candidate_template = CandidateReceipt { + descriptor: descriptor_template, + commitments_hash: CandidateCommitments::default().hash(), + }; + + let candidates: Vec<_> = std::iter::repeat(candidate_template) + .take(mock_cores.len()) + .enumerate() + .map(|(idx, mut candidate)| { + candidate.descriptor.para_id = idx.into(); + candidate + }) + .collect(); + + // why those particular indices? see the comments on mock_availability_cores() let expected_candidates: Vec<_> = - cores.iter().map(|&idx| candidates[idx].clone()).collect(); + [1, 4, 7, 8, 10].iter().map(|&idx| candidates[idx].clone()).collect(); + // Expect prospective parachains subsystem requests. + let prospective_parachains_mode = ProspectiveParachainsMode::Enabled; - let expected_backed: Vec<_> = cores + let expected_backed = expected_candidates .iter() - .map(|&idx| BackedCandidate { - candidate: committed_receipts[idx].clone(), + .map(|c| BackedCandidate { + candidate: CommittedCandidateReceipt { + descriptor: c.descriptor.clone(), + commitments: Default::default(), + }, validity_votes: Vec::new(), validator_indices: default_bitvec(n_cores), }) .collect(); test_harness( - |r| mock_overseer(r, expected_backed), + |r| mock_overseer(r, expected_backed, TestProspectiveParachainsMode::Enabled), |mut tx: TestSubsystemSender| async move { - let result = - select_candidates(&mock_cores, &[], &candidates, Default::default(), &mut tx) - .await - .unwrap(); + let result = select_candidates( + &mock_cores, + &[], + &prospective_parachains_mode, + Default::default(), + &mut tx, + ) + .await + .unwrap(); result.into_iter().for_each(|c| { assert!( diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index 22c70b2ccfb4..84c634ea77bc 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -512,6 +512,7 @@ pub struct Overseer { CandidateBackingMessage, ChainApiMessage, DisputeCoordinatorMessage, + ProspectiveParachainsMessage, ])] provisioner: Provisioner, From aa5b9bf5a2f2aa832be1cb61a8e2df1ee6ec6e67 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 6 Sep 2022 18:29:19 -0500 Subject: [PATCH 08/76] fmt --- node/core/backing/src/lib.rs | 10 +++++----- node/network/statement-distribution/src/tests.rs | 2 +- node/overseer/src/lib.rs | 4 ++-- node/overseer/src/tests.rs | 4 ++-- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index fff2359760e9..957953cf38e2 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -80,15 +80,15 @@ use futures::{ use error::{Error, FatalResult}; use polkadot_node_primitives::{ - AvailableData, InvalidCandidate, PoV, SignedFullStatementWithPVD, - StatementWithPVD, ValidationResult, BACKING_EXECUTION_TIMEOUT, + AvailableData, InvalidCandidate, PoV, SignedFullStatementWithPVD, StatementWithPVD, + ValidationResult, BACKING_EXECUTION_TIMEOUT, }; use polkadot_node_subsystem::{ messages::{ AvailabilityDistributionMessage, AvailabilityStoreMessage, CandidateBackingMessage, - CandidateValidationMessage, CollatorProtocolMessage, - HypotheticalDepthRequest, ProspectiveParachainsMessage, ProvisionableData, - ProvisionerMessage, RuntimeApiMessage, RuntimeApiRequest, StatementDistributionMessage, + CandidateValidationMessage, CollatorProtocolMessage, HypotheticalDepthRequest, + ProspectiveParachainsMessage, ProvisionableData, ProvisionerMessage, RuntimeApiMessage, + RuntimeApiRequest, StatementDistributionMessage, }, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; diff --git a/node/network/statement-distribution/src/tests.rs b/node/network/statement-distribution/src/tests.rs index 7aa5ecaa94c3..f1a4c0562c94 100644 --- a/node/network/statement-distribution/src/tests.rs +++ b/node/network/statement-distribution/src/tests.rs @@ -36,7 +36,7 @@ use polkadot_node_subsystem::{ ActivatedLeaf, LeafStatus, }; use polkadot_node_subsystem_test_helpers::mock::make_ferdie_keystore; -use polkadot_primitives::v2::{Hash, Id as ParaId, HeadData, SessionInfo, ValidationCode}; +use polkadot_primitives::v2::{Hash, HeadData, Id as ParaId, SessionInfo, ValidationCode}; use polkadot_primitives_test_helpers::{ dummy_committed_candidate_receipt, dummy_hash, AlwaysZeroRng, }; diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index aa373d727cd6..1f5087b31091 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -79,8 +79,8 @@ use polkadot_node_subsystem_types::messages::{ BitfieldSigningMessage, CandidateBackingMessage, CandidateValidationMessage, ChainApiMessage, ChainSelectionMessage, CollationGenerationMessage, CollatorProtocolMessage, DisputeCoordinatorMessage, DisputeDistributionMessage, GossipSupportMessage, - NetworkBridgeRxMessage, NetworkBridgeTxMessage, ProspectiveParachainsMessage, ProvisionerMessage, PvfCheckerMessage, - RuntimeApiMessage, StatementDistributionMessage, + NetworkBridgeRxMessage, NetworkBridgeTxMessage, ProspectiveParachainsMessage, + ProvisionerMessage, PvfCheckerMessage, RuntimeApiMessage, StatementDistributionMessage, }; pub use polkadot_node_subsystem_types::{ errors::{SubsystemError, SubsystemResult}, diff --git a/node/overseer/src/tests.rs b/node/overseer/src/tests.rs index b566dbc2ced8..78477ed0e2d4 100644 --- a/node/overseer/src/tests.rs +++ b/node/overseer/src/tests.rs @@ -30,8 +30,8 @@ use polkadot_node_subsystem_types::{ ActivatedLeaf, LeafStatus, }; use polkadot_primitives::v2::{ - CandidateHash, CandidateReceipt, CollatorPair, Id as ParaId, InvalidDisputeStatementKind, SessionIndex, - ValidDisputeStatementKind, ValidatorIndex, + CandidateHash, CandidateReceipt, CollatorPair, Id as ParaId, InvalidDisputeStatementKind, + SessionIndex, ValidDisputeStatementKind, ValidatorIndex, }; use crate::{ From 25b9f7e7fcc7333d9d4f85b05c35586bba7fadbb Mon Sep 17 00:00:00 2001 From: asynchronous rob Date: Mon, 12 Sep 2022 15:30:31 -0500 Subject: [PATCH 09/76] Network bridge changes for asynchronous backing + update subsystems to handle versioned packets (#5991) * BEGIN STATEMENT DISTRIBUTION WORK create a vstaging network protocol which is the same as v1 * mostly make network bridge amenable to vstaging * network-bridge: fully adapt to vstaging * add some TODOs for tests * fix fallout in bitfield-distribution * bitfield distribution tests + TODOs * fix fallout in gossip-support * collator-protocol: fix message fallout * collator-protocol: load PVD from runtime * add TODO for vstaging tests * make things compile * set used network protocol version using a feature * fmt * get approval-distribution building * fix approval-distribution tests * spellcheck * nits * approval distribution net protocol test * bitfield distribution net protocol test * Revert "collator-protocol: fix message fallout" This reverts commit 07cc887303e16c6b3843ecb25cdc7cc2080e2ed1. * Network bridge tests Co-authored-by: Chris Sosnin --- Cargo.lock | 1 + node/core/backing/src/tests/mod.rs | 1 + node/network/approval-distribution/src/lib.rs | 286 +++++++++++----- .../approval-distribution/src/tests.rs | 219 ++++++++++--- node/network/bitfield-distribution/Cargo.toml | 1 + node/network/bitfield-distribution/src/lib.rs | 188 ++++++++--- .../bitfield-distribution/src/tests.rs | 221 +++++++++++-- node/network/bridge/src/network.rs | 3 + node/network/bridge/src/rx/mod.rs | 265 +++++++++++---- node/network/bridge/src/rx/tests.rs | 305 ++++++++++++++++-- node/network/bridge/src/tx/mod.rs | 66 +++- node/network/bridge/src/tx/tests.rs | 135 +++++++- .../src/collator_side/mod.rs | 1 + .../src/validator_side/mod.rs | 1 + node/network/gossip-support/src/lib.rs | 8 +- node/network/protocol/Cargo.toml | 3 + node/network/protocol/src/lib.rs | 223 ++++++++++++- node/network/protocol/src/peer_set.rs | 47 +++ .../network/statement-distribution/src/lib.rs | 42 ++- .../statement-distribution/src/metrics.rs | 17 +- 20 files changed, 1694 insertions(+), 339 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d62b618eeb0c..94e49313f391 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6060,6 +6060,7 @@ dependencies = [ name = "polkadot-availability-bitfield-distribution" version = "0.9.28" dependencies = [ + "always-assert", "assert_matches", "bitvec 1.0.0", "env_logger 0.9.0", diff --git a/node/core/backing/src/tests/mod.rs b/node/core/backing/src/tests/mod.rs index f92c925164e2..81ebcf5c6dc4 100644 --- a/node/core/backing/src/tests/mod.rs +++ b/node/core/backing/src/tests/mod.rs @@ -33,6 +33,7 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_primitives::v2::{ CandidateDescriptor, GroupRotationInfo, HeadData, PersistedValidationData, ScheduledCore, + SessionIndex, }; use sp_application_crypto::AppKey; use sp_keyring::Sr25519Keyring; diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index f0cb4fc24ff8..ca6212701f3e 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -24,7 +24,9 @@ use futures::{channel::oneshot, FutureExt as _}; use polkadot_node_network_protocol::{ self as net_protocol, grid_topology::{RandomRouting, RequiredRouting, SessionGridTopologies, SessionGridTopology}, - v1 as protocol_v1, PeerId, UnifiedReputationChange as Rep, Versioned, View, + peer_set::ValidationVersion, + v1 as protocol_v1, vstaging as protocol_vstaging, PeerId, UnifiedReputationChange as Rep, + Versioned, VersionedValidationProtocol, View, }; use polkadot_node_primitives::approval::{ AssignmentCert, BlockApprovalMeta, IndirectAssignmentCert, IndirectSignedApprovalVote, @@ -150,6 +152,15 @@ enum Resend { No, } +/// Data stored on a per-peer basis. +#[derive(Debug)] +struct PeerData { + /// The peer's view. + view: View, + /// The peer's protocol version. + version: ValidationVersion, +} + /// The [`State`] struct is responsible for tracking the overall state of the subsystem. /// /// It tracks metadata about our view of the unfinalized chain, @@ -169,7 +180,7 @@ struct State { pending_known: HashMap>, /// Peer data is partially stored here, and partially inline within the [`BlockEntry`]s - peer_views: HashMap, + peer_data: HashMap, /// Keeps a topology for various different sessions. topologies: SessionGridTopologies, @@ -330,14 +341,30 @@ impl State { rng: &mut (impl CryptoRng + Rng), ) { match event { - NetworkBridgeEvent::PeerConnected(peer_id, role, _, _) => { + NetworkBridgeEvent::PeerConnected(peer_id, role, version, _) => { // insert a blank view if none already present gum::trace!(target: LOG_TARGET, ?peer_id, ?role, "Peer connected"); - self.peer_views.entry(peer_id).or_default(); + let version = match ValidationVersion::try_from(version).ok() { + Some(v) => v, + None => { + // sanity: network bridge is supposed to detect this already. + gum::error!( + target: LOG_TARGET, + ?peer_id, + ?version, + "Unsupported protocol version" + ); + return + }, + }; + + self.peer_data + .entry(peer_id) + .or_insert_with(|| PeerData { version, view: Default::default() }); }, NetworkBridgeEvent::PeerDisconnected(peer_id) => { gum::trace!(target: LOG_TARGET, ?peer_id, "Peer disconnected"); - self.peer_views.remove(&peer_id); + self.peer_data.remove(&peer_id); self.blocks.iter_mut().for_each(|(_hash, entry)| { entry.known_by.remove(&peer_id); }) @@ -370,7 +397,7 @@ impl State { live }); }, - NetworkBridgeEvent::PeerMessage(peer_id, Versioned::V1(msg)) => { + NetworkBridgeEvent::PeerMessage(peer_id, msg) => { self.process_incoming_peer_message(ctx, metrics, peer_id, msg, rng).await; }, } @@ -420,16 +447,18 @@ impl State { { let sender = ctx.sender(); - for (peer_id, view) in self.peer_views.iter() { - let intersection = view.iter().filter(|h| new_hashes.contains(h)); - let view_intersection = View::new(intersection.cloned(), view.finalized_number); + for (peer_id, data) in self.peer_data.iter() { + let intersection = data.view.iter().filter(|h| new_hashes.contains(h)); + let view_intersection = + View::new(intersection.cloned(), data.view.finalized_number); Self::unify_with_peer( sender, metrics, &mut self.blocks, &self.topologies, - self.peer_views.len(), + self.peer_data.len(), peer_id.clone(), + data.version, view_intersection, rng, ) @@ -506,6 +535,7 @@ impl State { adjust_required_routing_and_propagate( ctx, + &self.peer_data, &mut self.blocks, &self.topologies, |block_entry| block_entry.session == session, @@ -523,13 +553,16 @@ impl State { ctx: &mut Context, metrics: &Metrics, peer_id: PeerId, - msg: protocol_v1::ApprovalDistributionMessage, + msg: net_protocol::ApprovalDistributionMessage, rng: &mut R, ) where R: CryptoRng + Rng, { match msg { - protocol_v1::ApprovalDistributionMessage::Assignments(assignments) => { + Versioned::V1(protocol_v1::ApprovalDistributionMessage::Assignments(assignments)) | + Versioned::VStaging(protocol_vstaging::ApprovalDistributionMessage::Assignments( + assignments, + )) => { gum::trace!( target: LOG_TARGET, peer_id = %peer_id, @@ -570,7 +603,10 @@ impl State { .await; } }, - protocol_v1::ApprovalDistributionMessage::Approvals(approvals) => { + Versioned::V1(protocol_v1::ApprovalDistributionMessage::Approvals(approvals)) | + Versioned::VStaging(protocol_vstaging::ApprovalDistributionMessage::Approvals( + approvals, + )) => { gum::trace!( target: LOG_TARGET, peer_id = %peer_id, @@ -623,9 +659,14 @@ impl State { { gum::trace!(target: LOG_TARGET, ?view, "Peer view change"); let finalized_number = view.finalized_number; - let old_view = - self.peer_views.get_mut(&peer_id).map(|d| std::mem::replace(d, view.clone())); - let old_finalized_number = old_view.map(|v| v.finalized_number).unwrap_or(0); + let (peer_protocol_version, old_finalized_number) = match self + .peer_data + .get_mut(&peer_id) + .map(|d| (d.version, std::mem::replace(&mut d.view, view.clone()))) + { + Some((v, view)) => (v, view.finalized_number), + None => return, // unknown peer + }; // we want to prune every block known_by peer up to (including) view.finalized_number let blocks = &mut self.blocks; @@ -650,8 +691,9 @@ impl State { metrics, &mut self.blocks, &self.topologies, - self.peer_views.len(), + self.peer_data.len(), peer_id.clone(), + peer_protocol_version, view, rng, ) @@ -894,7 +936,7 @@ impl State { // then messages will be sent when we get it. let assignments = vec![(assignment, claimed_candidate_index)]; - let n_peers_total = self.peer_views.len(); + let n_peers_total = self.peer_data.len(); let source_peer = source.peer_id(); let mut peer_filter = move |peer| { @@ -918,31 +960,53 @@ impl State { route_random }; - let peers = entry.known_by.keys().filter(|p| peer_filter(p)).cloned().collect::>(); + let (v1_peers, vstaging_peers) = { + let peer_data = &self.peer_data; + let peers = entry + .known_by + .keys() + .filter_map(|p| peer_data.get_key_value(p)) + .filter(|(p, _)| peer_filter(p)) + .map(|(p, peer_data)| (*p, peer_data.version)) + .collect::>(); - // Add the metadata of the assignment to the knowledge of each peer. - for peer in peers.iter() { - // we already filtered peers above, so this should always be Some - if let Some(peer_knowledge) = entry.known_by.get_mut(peer) { - peer_knowledge.sent.insert(message_subject.clone(), message_kind); + // Add the metadata of the assignment to the knowledge of each peer. + for (peer, _) in peers.iter() { + // we already filtered peers above, so this should always be Some + if let Some(peer_knowledge) = entry.known_by.get_mut(peer) { + peer_knowledge.sent.insert(message_subject.clone(), message_kind); + } } - } - if !peers.is_empty() { - gum::trace!( - target: LOG_TARGET, - ?block_hash, - ?claimed_candidate_index, - local = source.peer_id().is_none(), - num_peers = peers.len(), - "Sending an assignment to peers", - ); + if !peers.is_empty() { + gum::trace!( + target: LOG_TARGET, + ?block_hash, + ?claimed_candidate_index, + local = source.peer_id().is_none(), + num_peers = peers.len(), + "Sending an assignment to peers", + ); + } + let v1_peers = filter_peers_by_version(&peers, ValidationVersion::V1); + let vstaging_peers = filter_peers_by_version(&peers, ValidationVersion::VStaging); + + (v1_peers, vstaging_peers) + }; + + if !v1_peers.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - peers, - Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( - protocol_v1::ApprovalDistributionMessage::Assignments(assignments), - )), + v1_peers, + versioned_assignments_packet(ValidationVersion::V1, assignments.clone()), + )) + .await; + } + + if !vstaging_peers.is_empty() { + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + vstaging_peers, + versioned_assignments_packet(ValidationVersion::VStaging, assignments.clone()), )) .await; } @@ -1173,38 +1237,55 @@ impl State { in_topology || knowledge.sent.contains(message_subject, MessageKind::Assignment) }; - let peers = entry - .known_by - .iter() - .filter(|(p, k)| peer_filter(p, k)) - .map(|(p, _)| p) - .cloned() - .collect::>(); - - // Add the metadata of the assignment to the knowledge of each peer. - for peer in peers.iter() { - // we already filtered peers above, so this should always be Some - if let Some(entry) = entry.known_by.get_mut(peer) { - entry.sent.insert(message_subject.clone(), message_kind); + let (v1_peers, vstaging_peers) = { + let peer_data = &self.peer_data; + let peers = entry + .known_by + .iter() + .filter_map(|(p, k)| peer_data.get(&p).map(|pd| (p, k, pd.version))) + .filter(|(p, k, _)| peer_filter(p, k)) + .map(|(p, _, v)| (p.clone(), v)) + .collect::>(); + + // Add the metadata of the assignment to the knowledge of each peer. + for (peer, _) in peers.iter() { + // we already filtered peers above, so this should always be Some + if let Some(peer_knowledge) = entry.known_by.get_mut(peer) { + peer_knowledge.sent.insert(message_subject.clone(), message_kind); + } } - } - if !peers.is_empty() { - let approvals = vec![vote]; - gum::trace!( - target: LOG_TARGET, - ?block_hash, - ?candidate_index, - local = source.peer_id().is_none(), - num_peers = peers.len(), - "Sending an approval to peers", - ); + if !peers.is_empty() { + gum::trace!( + target: LOG_TARGET, + ?block_hash, + ?candidate_index, + local = source.peer_id().is_none(), + num_peers = peers.len(), + "Sending an approval to peers", + ); + } + + let v1_peers = filter_peers_by_version(&peers, ValidationVersion::V1); + let vstaging_peers = filter_peers_by_version(&peers, ValidationVersion::VStaging); + + (v1_peers, vstaging_peers) + }; + + let approvals = vec![vote]; + if !v1_peers.is_empty() { ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - peers, - Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( - protocol_v1::ApprovalDistributionMessage::Approvals(approvals), - )), + v1_peers, + versioned_approvals_packet(ValidationVersion::V1, approvals.clone()), + )) + .await; + } + + if !vstaging_peers.is_empty() { + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + vstaging_peers, + versioned_approvals_packet(ValidationVersion::VStaging, approvals), )) .await; } @@ -1260,6 +1341,7 @@ impl State { topologies: &SessionGridTopologies, total_peers: usize, peer_id: PeerId, + peer_protocol_version: ValidationVersion, view: View, rng: &mut (impl CryptoRng + Rng), ) { @@ -1373,9 +1455,7 @@ impl State { sender .send_message(NetworkBridgeTxMessage::SendValidationMessage( vec![peer_id.clone()], - Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( - protocol_v1::ApprovalDistributionMessage::Assignments(assignments_to_send), - )), + versioned_assignments_packet(peer_protocol_version, assignments_to_send), )) .await; } @@ -1391,9 +1471,7 @@ impl State { sender .send_message(NetworkBridgeTxMessage::SendValidationMessage( vec![peer_id], - Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( - protocol_v1::ApprovalDistributionMessage::Approvals(approvals_to_send), - )), + versioned_approvals_packet(peer_protocol_version, approvals_to_send), )) .await; } @@ -1421,6 +1499,7 @@ impl State { adjust_required_routing_and_propagate( ctx, + &self.peer_data, &mut self.blocks, &self.topologies, |block_entry| { @@ -1448,6 +1527,7 @@ impl State { adjust_required_routing_and_propagate( ctx, + &self.peer_data, &mut self.blocks, &self.topologies, |block_entry| { @@ -1505,6 +1585,7 @@ impl State { #[overseer::contextbounds(ApprovalDistribution, prefix = self::overseer)] async fn adjust_required_routing_and_propagate( ctx: &mut Context, + peer_data: &HashMap, blocks: &mut HashMap, topologies: &SessionGridTopologies, block_filter: BlockFilter, @@ -1592,21 +1673,27 @@ async fn adjust_required_routing_and_propagate continue, + Some(v) => versioned_assignments_packet(v, assignments_packet), + }; + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( vec![peer], - Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( - protocol_v1::ApprovalDistributionMessage::Assignments(assignments_packet), - )), + versioned_packet, )) .await; } for (peer, approvals_packet) in peer_approvals { + let versioned_packet = match peer_data.get(&peer).map(|pd| pd.version) { + None => continue, + Some(v) => versioned_approvals_packet(v, approvals_packet), + }; + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( vec![peer], - Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( - protocol_v1::ApprovalDistributionMessage::Approvals(approvals_packet), - )), + versioned_packet, )) .await; } @@ -1737,6 +1824,49 @@ impl ApprovalDistribution { } } +fn versioned_approvals_packet( + version: ValidationVersion, + approvals: Vec, +) -> VersionedValidationProtocol { + match version { + ValidationVersion::V1 => + Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( + protocol_v1::ApprovalDistributionMessage::Approvals(approvals), + )), + ValidationVersion::VStaging => + Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( + protocol_vstaging::ApprovalDistributionMessage::Approvals(approvals), + )), + } +} + +fn versioned_assignments_packet( + version: ValidationVersion, + assignments: Vec<(IndirectAssignmentCert, CandidateIndex)>, +) -> VersionedValidationProtocol { + match version { + ValidationVersion::V1 => + Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( + protocol_v1::ApprovalDistributionMessage::Assignments(assignments), + )), + ValidationVersion::VStaging => + Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( + protocol_vstaging::ApprovalDistributionMessage::Assignments(assignments), + )), + } +} + +fn filter_peers_by_version( + peers: &[(PeerId, ValidationVersion)], + version: ValidationVersion, +) -> Vec { + peers + .iter() + .filter(|(_, v)| v == &version) + .map(|(peer_id, _)| *peer_id) + .collect() +} + #[overseer::subsystem(ApprovalDistribution, error=SubsystemError, prefix=self::overseer)] impl ApprovalDistribution { fn start(self, ctx: Context) -> SpawnedSubsystem { diff --git a/node/network/approval-distribution/src/tests.rs b/node/network/approval-distribution/src/tests.rs index b3d44bfe8c1e..dc08260b3ef3 100644 --- a/node/network/approval-distribution/src/tests.rs +++ b/node/network/approval-distribution/src/tests.rs @@ -167,6 +167,7 @@ async fn setup_gossip_topology( async fn setup_peer_with_view( virtual_overseer: &mut VirtualOverseer, peer_id: &PeerId, + validation_version: ValidationVersion, view: View, ) { overseer_send( @@ -174,7 +175,7 @@ async fn setup_peer_with_view( ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerConnected( peer_id.clone(), ObservedRole::Full, - ValidationVersion::V1.into(), + validation_version.into(), None, )), ) @@ -192,13 +193,13 @@ async fn setup_peer_with_view( async fn send_message_from_peer( virtual_overseer: &mut VirtualOverseer, peer_id: &PeerId, - msg: protocol_v1::ApprovalDistributionMessage, + msg: net_protocol::ApprovalDistributionMessage, ) { overseer_send( virtual_overseer, ApprovalDistributionMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage( peer_id.clone(), - Versioned::V1(msg), + msg, )), ) .await; @@ -256,9 +257,9 @@ fn try_import_the_same_assignment() { let _ = test_harness(State::default(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; // setup peers - setup_peer_with_view(overseer, &peer_a, view![]).await; - setup_peer_with_view(overseer, &peer_b, view![hash]).await; - setup_peer_with_view(overseer, &peer_c, view![hash]).await; + setup_peer_with_view(overseer, &peer_a, ValidationVersion::V1, view![]).await; + setup_peer_with_view(overseer, &peer_b, ValidationVersion::V1, view![hash]).await; + setup_peer_with_view(overseer, &peer_c, ValidationVersion::V1, view![hash]).await; // new block `hash_a` with 1 candidates let meta = BlockApprovalMeta { @@ -278,7 +279,7 @@ fn try_import_the_same_assignment() { let assignments = vec![(cert.clone(), 0u32)]; let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments.clone()); - send_message_from_peer(overseer, &peer_a, msg).await; + send_message_from_peer(overseer, &peer_a, Versioned::V1(msg)).await; expect_reputation_change(overseer, &peer_a, COST_UNEXPECTED_MESSAGE).await; @@ -311,11 +312,11 @@ fn try_import_the_same_assignment() { ); // setup new peer - setup_peer_with_view(overseer, &peer_d, view![]).await; + setup_peer_with_view(overseer, &peer_d, ValidationVersion::V1, view![]).await; // send the same assignment from peer_d let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments); - send_message_from_peer(overseer, &peer_d, msg).await; + send_message_from_peer(overseer, &peer_d, Versioned::V1(msg)).await; expect_reputation_change(overseer, &peer_d, COST_UNEXPECTED_MESSAGE).await; expect_reputation_change(overseer, &peer_d, BENEFIT_VALID_MESSAGE).await; @@ -340,7 +341,7 @@ fn spam_attack_results_in_negative_reputation_change() { let _ = test_harness(State::default(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; let peer = &peer_a; - setup_peer_with_view(overseer, peer, view![]).await; + setup_peer_with_view(overseer, peer, ValidationVersion::V1, view![]).await; // new block `hash_b` with 20 candidates let candidates_count = 20; @@ -367,7 +368,7 @@ fn spam_attack_results_in_negative_reputation_change() { .collect(); let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments.clone()); - send_message_from_peer(overseer, peer, msg.clone()).await; + send_message_from_peer(overseer, peer, Versioned::V1(msg.clone())).await; for i in 0..candidates_count { expect_reputation_change(overseer, peer, COST_UNEXPECTED_MESSAGE).await; @@ -399,7 +400,7 @@ fn spam_attack_results_in_negative_reputation_change() { .await; // send the assignments again - send_message_from_peer(overseer, peer, msg.clone()).await; + send_message_from_peer(overseer, peer, Versioned::V1(msg.clone())).await; // each of them will incur `COST_UNEXPECTED_MESSAGE`, not only the first one for _ in 0..candidates_count { @@ -424,7 +425,7 @@ fn peer_sending_us_the_same_we_just_sent_them_is_ok() { let _ = test_harness(State::default(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; let peer = &peer_a; - setup_peer_with_view(overseer, peer, view![]).await; + setup_peer_with_view(overseer, peer, ValidationVersion::V1, view![]).await; // new block `hash` with 1 candidates let meta = BlockApprovalMeta { @@ -476,12 +477,12 @@ fn peer_sending_us_the_same_we_just_sent_them_is_ok() { // the peer could send us it as well let assignments = vec![(cert, candidate_index)]; let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments); - send_message_from_peer(overseer, peer, msg.clone()).await; + send_message_from_peer(overseer, peer, Versioned::V1(msg.clone())).await; assert!(overseer.recv().timeout(TIMEOUT).await.is_none(), "we should not punish the peer"); // send the assignments again - send_message_from_peer(overseer, peer, msg).await; + send_message_from_peer(overseer, peer, Versioned::V1(msg)).await; // now we should expect_reputation_change(overseer, peer, COST_DUPLICATE_MESSAGE).await; @@ -500,9 +501,9 @@ fn import_approval_happy_path() { let _ = test_harness(State::default(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; // setup peers - setup_peer_with_view(overseer, &peer_a, view![]).await; - setup_peer_with_view(overseer, &peer_b, view![hash]).await; - setup_peer_with_view(overseer, &peer_c, view![hash]).await; + setup_peer_with_view(overseer, &peer_a, ValidationVersion::V1, view![]).await; + setup_peer_with_view(overseer, &peer_b, ValidationVersion::V1, view![hash]).await; + setup_peer_with_view(overseer, &peer_c, ValidationVersion::V1, view![hash]).await; // new block `hash_a` with 1 candidates let meta = BlockApprovalMeta { @@ -547,7 +548,7 @@ fn import_approval_happy_path() { signature: dummy_signature(), }; let msg = protocol_v1::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); - send_message_from_peer(overseer, &peer_b, msg).await; + send_message_from_peer(overseer, &peer_b, Versioned::V1(msg)).await; assert_matches!( overseer_recv(overseer).await, @@ -588,8 +589,8 @@ fn import_approval_bad() { let _ = test_harness(State::default(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; // setup peers - setup_peer_with_view(overseer, &peer_a, view![]).await; - setup_peer_with_view(overseer, &peer_b, view![hash]).await; + setup_peer_with_view(overseer, &peer_a, ValidationVersion::V1, view![]).await; + setup_peer_with_view(overseer, &peer_b, ValidationVersion::V1, view![hash]).await; // new block `hash_a` with 1 candidates let meta = BlockApprovalMeta { @@ -615,14 +616,14 @@ fn import_approval_bad() { signature: dummy_signature(), }; let msg = protocol_v1::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); - send_message_from_peer(overseer, &peer_b, msg).await; + send_message_from_peer(overseer, &peer_b, Versioned::V1(msg)).await; expect_reputation_change(overseer, &peer_b, COST_UNEXPECTED_MESSAGE).await; // now import an assignment from peer_b let assignments = vec![(cert.clone(), candidate_index)]; let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments); - send_message_from_peer(overseer, &peer_b, msg).await; + send_message_from_peer(overseer, &peer_b, Versioned::V1(msg)).await; assert_matches!( overseer_recv(overseer).await, @@ -641,7 +642,7 @@ fn import_approval_bad() { // and try again let msg = protocol_v1::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); - send_message_from_peer(overseer, &peer_b, msg).await; + send_message_from_peer(overseer, &peer_b, Versioned::V1(msg)).await; assert_matches!( overseer_recv(overseer).await, @@ -782,7 +783,7 @@ fn update_peer_view() { overseer_send(overseer, ApprovalDistributionMessage::DistributeAssignment(cert_b, 0)).await; // connect a peer - setup_peer_with_view(overseer, peer, view![hash_a]).await; + setup_peer_with_view(overseer, peer, ValidationVersion::V1, view![hash_a]).await; // we should send relevant assignments to the peer assert_matches!( @@ -800,7 +801,7 @@ fn update_peer_view() { virtual_overseer }); - assert_eq!(state.peer_views.get(peer).map(|v| v.finalized_number), Some(0)); + assert_eq!(state.peer_data.get(peer).map(|data| data.view.finalized_number), Some(0)); assert_eq!( state .blocks @@ -852,7 +853,7 @@ fn update_peer_view() { virtual_overseer }); - assert_eq!(state.peer_views.get(peer).map(|v| v.finalized_number), Some(2)); + assert_eq!(state.peer_data.get(peer).map(|data| data.view.finalized_number), Some(2)); assert_eq!( state .blocks @@ -882,7 +883,10 @@ fn update_peer_view() { virtual_overseer }); - assert_eq!(state.peer_views.get(peer).map(|v| v.finalized_number), Some(finalized_number)); + assert_eq!( + state.peer_data.get(peer).map(|data| data.view.finalized_number), + Some(finalized_number) + ); assert!(state.blocks.get(&hash_c).unwrap().known_by.get(peer).is_none()); } @@ -897,7 +901,7 @@ fn import_remotely_then_locally() { let _ = test_harness(State::default(), |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; // setup the peer - setup_peer_with_view(overseer, peer, view![hash]).await; + setup_peer_with_view(overseer, peer, ValidationVersion::V1, view![hash]).await; // new block `hash_a` with 1 candidates let meta = BlockApprovalMeta { @@ -917,7 +921,7 @@ fn import_remotely_then_locally() { let cert = fake_assignment_cert(hash, validator_index); let assignments = vec![(cert.clone(), candidate_index)]; let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments.clone()); - send_message_from_peer(overseer, peer, msg).await; + send_message_from_peer(overseer, peer, Versioned::V1(msg)).await; // send an `Accept` message from the Approval Voting subsystem assert_matches!( @@ -952,7 +956,7 @@ fn import_remotely_then_locally() { signature: dummy_signature(), }; let msg = protocol_v1::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); - send_message_from_peer(overseer, peer, msg).await; + send_message_from_peer(overseer, peer, Versioned::V1(msg)).await; assert_matches!( overseer_recv(overseer).await, @@ -1018,7 +1022,7 @@ fn sends_assignments_even_when_state_is_approved() { .await; // connect the peer. - setup_peer_with_view(overseer, peer, view![hash]).await; + setup_peer_with_view(overseer, peer, ValidationVersion::V1, view![hash]).await; let assignments = vec![(cert.clone(), candidate_index)]; let approvals = vec![approval.clone()]; @@ -1082,7 +1086,7 @@ fn race_condition_in_local_vs_remote_view_update() { }; // This will send a peer view that is ahead of our view - setup_peer_with_view(overseer, peer, view![hash_b]).await; + setup_peer_with_view(overseer, peer, ValidationVersion::V1, view![hash_b]).await; // Send our view update to include a new head overseer_send( @@ -1103,7 +1107,7 @@ fn race_condition_in_local_vs_remote_view_update() { .collect(); let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments.clone()); - send_message_from_peer(overseer, peer, msg.clone()).await; + send_message_from_peer(overseer, peer, Versioned::V1(msg.clone())).await; // This will handle pending messages being processed let msg = ApprovalDistributionMessage::NewBlocks(vec![meta]); @@ -1146,7 +1150,7 @@ fn propagates_locally_generated_assignment_to_both_dimensions() { // Connect all peers. for (peer, _) in &peers { - setup_peer_with_view(overseer, peer, view![hash]).await; + setup_peer_with_view(overseer, peer, ValidationVersion::V1, view![hash]).await; } // Set up a gossip topology. @@ -1251,7 +1255,7 @@ fn propagates_assignments_along_unshared_dimension() { // Connect all peers. for (peer, _) in &peers { - setup_peer_with_view(overseer, peer, view![hash]).await; + setup_peer_with_view(overseer, peer, ValidationVersion::V1, view![hash]).await; } // Set up a gossip topology. @@ -1287,7 +1291,7 @@ fn propagates_assignments_along_unshared_dimension() { // Issuer of the message is important, not the peer we receive from. // 99 deliberately chosen because it's not in X or Y. - send_message_from_peer(overseer, &peers[99].0, msg).await; + send_message_from_peer(overseer, &peers[99].0, Versioned::V1(msg)).await; assert_matches!( overseer_recv(overseer).await, AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( @@ -1336,7 +1340,7 @@ fn propagates_assignments_along_unshared_dimension() { // Issuer of the message is important, not the peer we receive from. // 99 deliberately chosen because it's not in X or Y. - send_message_from_peer(overseer, &peers[99].0, msg).await; + send_message_from_peer(overseer, &peers[99].0, Versioned::V1(msg)).await; assert_matches!( overseer_recv(overseer).await, AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( @@ -1393,7 +1397,7 @@ fn propagates_to_required_after_connect() { // Connect all peers except omitted. for (i, (peer, _)) in peers.iter().enumerate() { if !omitted.contains(&i) { - setup_peer_with_view(overseer, peer, view![hash]).await; + setup_peer_with_view(overseer, peer, ValidationVersion::V1, view![hash]).await; } } @@ -1482,7 +1486,7 @@ fn propagates_to_required_after_connect() { ); for i in omitted.iter().copied() { - setup_peer_with_view(overseer, &peers[i].0, view![hash]).await; + setup_peer_with_view(overseer, &peers[i].0, ValidationVersion::V1, view![hash]).await; assert_matches!( overseer_recv(overseer).await, @@ -1531,7 +1535,7 @@ fn sends_to_more_peers_after_getting_topology() { // Connect all peers except omitted. for (peer, _) in &peers { - setup_peer_with_view(overseer, peer, view![hash]).await; + setup_peer_with_view(overseer, peer, ValidationVersion::V1, view![hash]).await; } // new block `hash_a` with 1 candidates @@ -1683,7 +1687,7 @@ fn originator_aggression_l1() { // Connect all peers except omitted. for (peer, _) in &peers { - setup_peer_with_view(overseer, peer, view![hash]).await; + setup_peer_with_view(overseer, peer, ValidationVersion::V1, view![hash]).await; } // new block `hash_a` with 1 candidates @@ -1841,7 +1845,7 @@ fn non_originator_aggression_l1() { // Connect all peers except omitted. for (peer, _) in &peers { - setup_peer_with_view(overseer, peer, view![hash]).await; + setup_peer_with_view(overseer, peer, ValidationVersion::V1, view![hash]).await; } // new block `hash_a` with 1 candidates @@ -1875,7 +1879,7 @@ fn non_originator_aggression_l1() { // Issuer of the message is important, not the peer we receive from. // 99 deliberately chosen because it's not in X or Y. - send_message_from_peer(overseer, &peers[99].0, msg).await; + send_message_from_peer(overseer, &peers[99].0, Versioned::V1(msg)).await; assert_matches!( overseer_recv(overseer).await, AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( @@ -1946,7 +1950,7 @@ fn non_originator_aggression_l2() { // Connect all peers except omitted. for (peer, _) in &peers { - setup_peer_with_view(overseer, peer, view![hash]).await; + setup_peer_with_view(overseer, peer, ValidationVersion::V1, view![hash]).await; } // new block `hash_a` with 1 candidates @@ -1980,7 +1984,7 @@ fn non_originator_aggression_l2() { // Issuer of the message is important, not the peer we receive from. // 99 deliberately chosen because it's not in X or Y. - send_message_from_peer(overseer, &peers[99].0, msg).await; + send_message_from_peer(overseer, &peers[99].0, Versioned::V1(msg)).await; assert_matches!( overseer_recv(overseer).await, AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( @@ -2105,7 +2109,7 @@ fn resends_messages_periodically() { // Connect all peers. for (peer, _) in &peers { - setup_peer_with_view(overseer, peer, view![hash]).await; + setup_peer_with_view(overseer, peer, ValidationVersion::V1, view![hash]).await; } // Set up a gossip topology. @@ -2140,7 +2144,7 @@ fn resends_messages_periodically() { // Issuer of the message is important, not the peer we receive from. // 99 deliberately chosen because it's not in X or Y. - send_message_from_peer(overseer, &peers[99].0, msg).await; + send_message_from_peer(overseer, &peers[99].0, Versioned::V1(msg)).await; assert_matches!( overseer_recv(overseer).await, AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportAssignment( @@ -2228,3 +2232,122 @@ fn resends_messages_periodically() { virtual_overseer }); } + +/// Tests that peers correctly receive versioned messages. +#[test] +fn import_versioned_approval() { + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + let peer_c = PeerId::random(); + let parent_hash = Hash::repeat_byte(0xFF); + let hash = Hash::repeat_byte(0xAA); + + let _ = test_harness(State::default(), |mut virtual_overseer| async move { + let overseer = &mut virtual_overseer; + // All peers are aware of relay parent. + setup_peer_with_view(overseer, &peer_a, ValidationVersion::VStaging, view![hash]).await; + setup_peer_with_view(overseer, &peer_b, ValidationVersion::V1, view![hash]).await; + setup_peer_with_view(overseer, &peer_c, ValidationVersion::VStaging, view![hash]).await; + + // new block `hash_a` with 1 candidates + let meta = BlockApprovalMeta { + hash, + parent_hash, + number: 1, + candidates: vec![Default::default(); 1], + slot: 1.into(), + session: 1, + }; + let msg = ApprovalDistributionMessage::NewBlocks(vec![meta]); + overseer_send(overseer, msg).await; + + // import an assignment related to `hash` locally + let validator_index = ValidatorIndex(0); + let candidate_index = 0u32; + let cert = fake_assignment_cert(hash, validator_index); + overseer_send( + overseer, + ApprovalDistributionMessage::DistributeAssignment(cert, candidate_index), + ) + .await; + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( + protocol_v1::ApprovalDistributionMessage::Assignments(assignments) + )) + )) => { + assert_eq!(peers, vec![peer_b]); + assert_eq!(assignments.len(), 1); + } + ); + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( + protocol_vstaging::ApprovalDistributionMessage::Assignments(assignments) + )) + )) => { + assert_eq!(peers.len(), 2); + assert!(peers.contains(&peer_a)); + assert!(peers.contains(&peer_c)); + + assert_eq!(assignments.len(), 1); + } + ); + + // send the an approval from peer_a + let approval = IndirectSignedApprovalVote { + block_hash: hash, + candidate_index, + validator: validator_index, + signature: dummy_signature(), + }; + let msg = protocol_vstaging::ApprovalDistributionMessage::Approvals(vec![approval.clone()]); + send_message_from_peer(overseer, &peer_a, Versioned::VStaging(msg)).await; + + assert_matches!( + overseer_recv(overseer).await, + AllMessages::ApprovalVoting(ApprovalVotingMessage::CheckAndImportApproval( + vote, + tx, + )) => { + assert_eq!(vote, approval); + tx.send(ApprovalCheckResult::Accepted).unwrap(); + } + ); + + expect_reputation_change(overseer, &peer_a, BENEFIT_VALID_MESSAGE_FIRST).await; + + // Peers b and c receive versioned approval messages. + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::V1(protocol_v1::ValidationProtocol::ApprovalDistribution( + protocol_v1::ApprovalDistributionMessage::Approvals(approvals) + )) + )) => { + assert_eq!(peers, vec![peer_b]); + assert_eq!(approvals.len(), 1); + } + ); + assert_matches!( + overseer_recv(overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::VStaging(protocol_vstaging::ValidationProtocol::ApprovalDistribution( + protocol_vstaging::ApprovalDistributionMessage::Approvals(approvals) + )) + )) => { + assert_eq!(peers, vec![peer_c]); + assert_eq!(approvals.len(), 1); + } + ); + virtual_overseer + }); +} diff --git a/node/network/bitfield-distribution/Cargo.toml b/node/network/bitfield-distribution/Cargo.toml index 1cae4a6170f5..ca3c4479f82b 100644 --- a/node/network/bitfield-distribution/Cargo.toml +++ b/node/network/bitfield-distribution/Cargo.toml @@ -5,6 +5,7 @@ authors = ["Parity Technologies "] edition = "2021" [dependencies] +always-assert = "0.1" futures = "0.3.21" gum = { package = "tracing-gum", path = "../../gum" } polkadot-primitives = { path = "../../../primitives" } diff --git a/node/network/bitfield-distribution/src/lib.rs b/node/network/bitfield-distribution/src/lib.rs index a0f82dc5ed1d..56fe2727497a 100644 --- a/node/network/bitfield-distribution/src/lib.rs +++ b/node/network/bitfield-distribution/src/lib.rs @@ -22,6 +22,7 @@ #![deny(unused_crate_dependencies)] +use always_assert::never; use futures::{channel::oneshot, FutureExt}; use polkadot_node_network_protocol::{ @@ -29,7 +30,9 @@ use polkadot_node_network_protocol::{ grid_topology::{ RandomRouting, RequiredRouting, SessionBoundGridTopologyStorage, SessionGridTopology, }, - v1 as protocol_v1, OurView, PeerId, UnifiedReputationChange as Rep, Versioned, View, + peer_set::{ProtocolVersion, ValidationVersion}, + v1 as protocol_v1, vstaging as protocol_vstaging, OurView, PeerId, + UnifiedReputationChange as Rep, Versioned, View, }; use polkadot_node_subsystem::{ jaeger, messages::*, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, PerLeafSpan, @@ -69,25 +72,63 @@ struct BitfieldGossipMessage { } impl BitfieldGossipMessage { - fn into_validation_protocol(self) -> net_protocol::VersionedValidationProtocol { - self.into_network_message().into() + fn into_validation_protocol( + self, + recipient_version: ProtocolVersion, + ) -> net_protocol::VersionedValidationProtocol { + self.into_network_message(recipient_version).into() } - fn into_network_message(self) -> net_protocol::BitfieldDistributionMessage { - Versioned::V1(protocol_v1::BitfieldDistributionMessage::Bitfield( - self.relay_parent, - self.signed_availability.into(), - )) + fn into_network_message( + self, + recipient_version: ProtocolVersion, + ) -> net_protocol::BitfieldDistributionMessage { + match ValidationVersion::try_from(recipient_version).ok() { + Some(ValidationVersion::V1) => + Versioned::V1(protocol_v1::BitfieldDistributionMessage::Bitfield( + self.relay_parent, + self.signed_availability.into(), + )), + Some(ValidationVersion::VStaging) => + Versioned::VStaging(protocol_vstaging::BitfieldDistributionMessage::Bitfield( + self.relay_parent, + self.signed_availability.into(), + )), + None => { + never!("Peers should only have supported protocol versions."); + + gum::warn!( + target: LOG_TARGET, + version = ?recipient_version, + "Unknown protocol version provided for message recipient" + ); + + // fall back to v1 to avoid + Versioned::V1(protocol_v1::BitfieldDistributionMessage::Bitfield( + self.relay_parent, + self.signed_availability.into(), + )) + }, + } } } +/// Data stored on a per-peer basis. +#[derive(Debug)] +pub struct PeerData { + /// The peer's view. + view: View, + /// The peer's protocol version. + version: ProtocolVersion, +} + /// Data used to track information of peers and relay parents the /// overseer ordered us to work on. #[derive(Default, Debug)] struct ProtocolState { /// Track all active peers and their views /// to determine what is relevant to them. - peer_views: HashMap, + peer_data: HashMap, /// The current and previous gossip topologies topologies: SessionBoundGridTopologyStorage, @@ -334,7 +375,7 @@ async fn handle_bitfield_distribution( ctx, job_data, topology, - &mut state.peer_views, + &mut state.peer_data, validator, msg, required_routing, @@ -353,7 +394,7 @@ async fn relay_message( ctx: &mut Context, job_data: &mut PerRelayParentData, topology: &SessionGridTopology, - peer_views: &mut HashMap, + peers: &mut HashMap, validator: ValidatorId, message: BitfieldGossipMessage, required_routing: RequiredRouting, @@ -371,16 +412,16 @@ async fn relay_message( .await; drop(_span); - let total_peers = peer_views.len(); + let total_peers = peers.len(); let mut random_routing: RandomRouting = Default::default(); let _span = span.child("interested-peers"); // pass on the bitfield distribution to all interested peers - let interested_peers = peer_views + let interested_peers = peers .iter() - .filter_map(|(peer, view)| { + .filter_map(|(peer, data)| { // check interest in the peer in this message's relay parent - if view.contains(&message.relay_parent) { + if data.view.contains(&message.relay_parent) { let message_needed = job_data.message_from_validator_needed_by_peer(&peer, &validator); if message_needed { @@ -395,7 +436,7 @@ async fn relay_message( }; if need_routing { - Some(peer.clone()) + Some((peer.clone(), data.version)) } else { None } @@ -406,9 +447,9 @@ async fn relay_message( None } }) - .collect::>(); + .collect::>(); - interested_peers.iter().for_each(|peer| { + interested_peers.iter().for_each(|(peer, _)| { // track the message as sent for this peer job_data .message_sent_to_peer @@ -427,11 +468,35 @@ async fn relay_message( ); } else { let _span = span.child("gossip"); - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - interested_peers, - message.into_validation_protocol(), - )) - .await; + + let filter_by_version = |peers: &[(PeerId, ProtocolVersion)], + version: ValidationVersion| { + peers + .iter() + .filter(|(_, v)| v == &version.into()) + .map(|(peer_id, _)| *peer_id) + .collect::>() + }; + + let v1_interested_peers = filter_by_version(&interested_peers, ValidationVersion::V1); + let vstaging_interested_peers = + filter_by_version(&interested_peers, ValidationVersion::VStaging); + + if !v1_interested_peers.is_empty() { + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + v1_interested_peers, + message.clone().into_validation_protocol(ValidationVersion::V1.into()), + )) + .await; + } + + if !vstaging_interested_peers.is_empty() { + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + vstaging_interested_peers, + message.into_validation_protocol(ValidationVersion::VStaging.into()), + )) + .await + } } } @@ -442,10 +507,20 @@ async fn process_incoming_peer_message( state: &mut ProtocolState, metrics: &Metrics, origin: PeerId, - message: protocol_v1::BitfieldDistributionMessage, + message: net_protocol::BitfieldDistributionMessage, rng: &mut (impl CryptoRng + Rng), ) { - let protocol_v1::BitfieldDistributionMessage::Bitfield(relay_parent, bitfield) = message; + let (relay_parent, bitfield) = match message { + Versioned::V1(protocol_v1::BitfieldDistributionMessage::Bitfield( + relay_parent, + bitfield, + )) => (relay_parent, bitfield), + Versioned::VStaging(protocol_vstaging::BitfieldDistributionMessage::Bitfield( + relay_parent, + bitfield, + )) => (relay_parent, bitfield), + }; + gum::trace!( target: LOG_TARGET, peer = %origin, @@ -543,7 +618,7 @@ async fn process_incoming_peer_message( ctx, job_data, topology, - &mut state.peer_views, + &mut state.peer_data, validator, message, required_routing, @@ -567,15 +642,18 @@ async fn handle_network_msg( let _timer = metrics.time_handle_network_msg(); match bridge_message { - NetworkBridgeEvent::PeerConnected(peer, role, _, _) => { + NetworkBridgeEvent::PeerConnected(peer, role, version, _) => { gum::trace!(target: LOG_TARGET, ?peer, ?role, "Peer connected"); // insert if none already present - state.peer_views.entry(peer).or_default(); + state + .peer_data + .entry(peer) + .or_insert_with(|| PeerData { view: View::default(), version }); }, NetworkBridgeEvent::PeerDisconnected(peer) => { gum::trace!(target: LOG_TARGET, ?peer, "Peer disconnected"); // get rid of superfluous data - state.peer_views.remove(&peer); + state.peer_data.remove(&peer); }, NetworkBridgeEvent::NewGossipTopology(gossip_topology) => { let session_index = gossip_topology.session; @@ -590,12 +668,21 @@ async fn handle_network_msg( ); for new_peer in newly_added { - // in case we already knew that peer in the past - // it might have had an existing view, we use to initialize - // and minimize the delta on `PeerViewChange` to be sent - if let Some(old_view) = state.peer_views.remove(&new_peer) { - handle_peer_view_change(ctx, state, new_peer, old_view, rng).await; - } + let old_view = match state.peer_data.get_mut(&new_peer) { + Some(d) => { + // in case we already knew that peer in the past + // it might have had an existing view, we use to initialize + // and minimize the delta on `PeerViewChange` to be sent + std::mem::replace(&mut d.view, Default::default()) + }, + None => { + // For peers which are currently unknown, we'll send topology-related + // messages to them when they connect and send their first view update. + continue + }, + }; + + handle_peer_view_change(ctx, state, new_peer, old_view, rng).await; } }, NetworkBridgeEvent::PeerViewChange(peerid, new_view) => { @@ -606,7 +693,7 @@ async fn handle_network_msg( gum::trace!(target: LOG_TARGET, ?new_view, "Our view change"); handle_our_view_change(state, new_view); }, - NetworkBridgeEvent::PeerMessage(remote, Versioned::V1(message)) => + NetworkBridgeEvent::PeerMessage(remote, message) => process_incoming_peer_message(ctx, state, metrics, remote, message, rng).await, } } @@ -635,6 +722,9 @@ fn handle_our_view_change(state: &mut ProtocolState, view: OurView) { // Send the difference between two views which were not sent // to that particular peer. +// +// This requires that there is an entry in the `peer_data` field for the +// peer. #[overseer::contextbounds(BitfieldDistribution, prefix=self::overseer)] async fn handle_peer_view_change( ctx: &mut Context, @@ -643,13 +733,20 @@ async fn handle_peer_view_change( view: View, rng: &mut (impl CryptoRng + Rng), ) { - let added = state - .peer_views - .entry(origin.clone()) - .or_default() - .replace_difference(view) - .cloned() - .collect::>(); + let peer_data = match state.peer_data.get_mut(&origin) { + None => { + gum::warn!( + target: LOG_TARGET, + peer = ?origin, + "Attempted to update peer view for unknown peer." + ); + + return + }, + Some(pd) => pd, + }; + + let added = peer_data.view.replace_difference(view).cloned().collect::>(); let topology = state.topologies.get_current_topology(); let is_gossip_peer = topology.route_to_peer(RequiredRouting::GridXY, &origin); @@ -716,6 +813,9 @@ async fn send_tracked_gossip_message( "Sending gossip message" ); + let version = + if let Some(peer_data) = state.peer_data.get(&dest) { peer_data.version } else { return }; + job_data .message_sent_to_peer .entry(dest.clone()) @@ -724,7 +824,7 @@ async fn send_tracked_gossip_message( ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( vec![dest], - message.into_validation_protocol(), + message.into_validation_protocol(version), )) .await; } diff --git a/node/network/bitfield-distribution/src/tests.rs b/node/network/bitfield-distribution/src/tests.rs index f3894d61c5f9..bf58d9363a64 100644 --- a/node/network/bitfield-distribution/src/tests.rs +++ b/node/network/bitfield-distribution/src/tests.rs @@ -51,6 +51,10 @@ fn dummy_rng() -> ChaCha12Rng { rand_chacha::ChaCha12Rng::seed_from_u64(12345) } +fn peer_data_v1(view: View) -> PeerData { + PeerData { view, version: ValidationVersion::V1.into() } +} + /// A very limited state, only interested in the relay parent of the /// given message, which must be signed by `validator` and a set of peers /// which are also only interested in that relay parent. @@ -79,7 +83,11 @@ fn prewarmed_state( span: PerLeafSpan::new(Arc::new(jaeger::Span::Disabled), "test"), }, }, - peer_views: peers.iter().cloned().map(|peer| (peer, view!(relay_parent))).collect(), + peer_data: peers + .iter() + .cloned() + .map(|peer| (peer, peer_data_v1(view![relay_parent]))) + .collect(), topologies, view: our_view!(relay_parent), } @@ -211,7 +219,10 @@ fn receive_invalid_signature() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_b.clone(), invalid_msg.into_network_message()), + NetworkBridgeEvent::PeerMessage( + peer_b.clone(), + invalid_msg.into_network_message(ValidationVersion::V1.into()) + ), &mut rng, )); @@ -222,7 +233,10 @@ fn receive_invalid_signature() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_b.clone(), invalid_msg_2.into_network_message()), + NetworkBridgeEvent::PeerMessage( + peer_b.clone(), + invalid_msg_2.into_network_message(ValidationVersion::V1.into()) + ), &mut rng, )); // reputation change due to invalid signature @@ -256,7 +270,7 @@ fn receive_invalid_validator_index() { let (mut state, signing_context, keystore, validator) = state_with_view(our_view![hash_a, hash_b], hash_a.clone()); - state.peer_views.insert(peer_b.clone(), view![hash_a]); + state.peer_data.insert(peer_b.clone(), peer_data_v1(view![hash_a])); let payload = AvailabilityBitfield(bitvec![u8, bitvec::order::Lsb0; 1u8; 32]); let signed = executor::block_on(Signed::::sign( @@ -282,7 +296,10 @@ fn receive_invalid_validator_index() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_b.clone(), msg.into_network_message()), + NetworkBridgeEvent::PeerMessage( + peer_b.clone(), + msg.into_network_message(ValidationVersion::V1.into()) + ), &mut rng, )); @@ -345,7 +362,10 @@ fn receive_duplicate_messages() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_b.clone(), msg.clone().into_network_message(),), + NetworkBridgeEvent::PeerMessage( + peer_b.clone(), + msg.clone().into_network_message(ValidationVersion::V1.into()), + ), &mut rng, )); @@ -378,7 +398,10 @@ fn receive_duplicate_messages() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_a.clone(), msg.clone().into_network_message(),), + NetworkBridgeEvent::PeerMessage( + peer_a.clone(), + msg.clone().into_network_message(ValidationVersion::V1.into()), + ), &mut rng, )); @@ -397,7 +420,10 @@ fn receive_duplicate_messages() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_b.clone(), msg.clone().into_network_message(),), + NetworkBridgeEvent::PeerMessage( + peer_b.clone(), + msg.clone().into_network_message(ValidationVersion::V1.into()), + ), &mut rng, )); @@ -443,8 +469,8 @@ fn do_not_relay_message_twice() { .flatten() .expect("should be signed"); - state.peer_views.insert(peer_b.clone(), view![hash]); - state.peer_views.insert(peer_a.clone(), view![hash]); + state.peer_data.insert(peer_b.clone(), peer_data_v1(view![hash])); + state.peer_data.insert(peer_a.clone(), peer_data_v1(view![hash])); let msg = BitfieldGossipMessage { relay_parent: hash.clone(), @@ -464,7 +490,7 @@ fn do_not_relay_message_twice() { &mut ctx, state.per_relay_parent.get_mut(&hash).unwrap(), &gossip_peers, - &mut state.peer_views, + &mut state.peer_data, validator.clone(), msg.clone(), RequiredRouting::GridXY, @@ -491,7 +517,7 @@ fn do_not_relay_message_twice() { assert_eq!(2, peers.len()); assert!(peers.contains(&peer_a)); assert!(peers.contains(&peer_b)); - assert_eq!(send_msg, msg.clone().into_validation_protocol()); + assert_eq!(send_msg, msg.clone().into_validation_protocol(ValidationVersion::V1.into())); } ); @@ -500,7 +526,7 @@ fn do_not_relay_message_twice() { &mut ctx, state.per_relay_parent.get_mut(&hash).unwrap(), &gossip_peers, - &mut state.peer_views, + &mut state.peer_data, validator.clone(), msg.clone(), RequiredRouting::GridXY, @@ -587,14 +613,17 @@ fn changing_view() { &mut rng, )); - assert!(state.peer_views.contains_key(&peer_b)); + assert!(state.peer_data.contains_key(&peer_b)); // recv a first message from the network launch!(handle_network_msg( &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_b.clone(), msg.clone().into_network_message(),), + NetworkBridgeEvent::PeerMessage( + peer_b.clone(), + msg.clone().into_network_message(ValidationVersion::V1.into()), + ), &mut rng, )); @@ -629,8 +658,11 @@ fn changing_view() { &mut rng, )); - assert!(state.peer_views.contains_key(&peer_b)); - assert_eq!(state.peer_views.get(&peer_b).expect("Must contain value for peer B"), &view![]); + assert!(state.peer_data.contains_key(&peer_b)); + assert_eq!( + &state.peer_data.get(&peer_b).expect("Must contain value for peer B").view, + &view![] + ); // on rx of the same message, since we are not interested, // should give penalty @@ -638,7 +670,10 @@ fn changing_view() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_b.clone(), msg.clone().into_network_message(),), + NetworkBridgeEvent::PeerMessage( + peer_b.clone(), + msg.clone().into_network_message(ValidationVersion::V1.into()), + ), &mut rng, )); @@ -670,7 +705,10 @@ fn changing_view() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_a.clone(), msg.clone().into_network_message(),), + NetworkBridgeEvent::PeerMessage( + peer_a.clone(), + msg.clone().into_network_message(ValidationVersion::V1.into()), + ), &mut rng, )); @@ -716,8 +754,8 @@ fn do_not_send_message_back_to_origin() { .flatten() .expect("should be signed"); - state.peer_views.insert(peer_b.clone(), view![hash]); - state.peer_views.insert(peer_a.clone(), view![hash]); + state.peer_data.insert(peer_b.clone(), peer_data_v1(view![hash])); + state.peer_data.insert(peer_a.clone(), peer_data_v1(view![hash])); let msg = BitfieldGossipMessage { relay_parent: hash.clone(), @@ -734,7 +772,10 @@ fn do_not_send_message_back_to_origin() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peer_b.clone(), msg.clone().into_network_message(),), + NetworkBridgeEvent::PeerMessage( + peer_b.clone(), + msg.clone().into_network_message(ValidationVersion::V1.into()), + ), &mut rng, )); @@ -756,7 +797,7 @@ fn do_not_send_message_back_to_origin() { ) => { assert_eq!(1, peers.len()); assert!(peers.contains(&peer_a)); - assert_eq!(send_msg, msg.clone().into_validation_protocol()); + assert_eq!(send_msg, msg.clone().into_validation_protocol(ValidationVersion::V1.into())); } ); @@ -822,7 +863,7 @@ fn topology_test() { .expect("should be signed"); peers_x.iter().chain(peers_y.iter()).for_each(|peer| { - state.peer_views.insert(peer.clone(), view![hash]); + state.peer_data.insert(peer.clone(), peer_data_v1(view![hash])); }); let msg = BitfieldGossipMessage { @@ -840,7 +881,10 @@ fn topology_test() { &mut ctx, &mut state, &Default::default(), - NetworkBridgeEvent::PeerMessage(peers_x[0].clone(), msg.clone().into_network_message(),), + NetworkBridgeEvent::PeerMessage( + peers_x[0].clone(), + msg.clone().into_network_message(ValidationVersion::V1.into()), + ), &mut rng, )); @@ -867,7 +911,7 @@ fn topology_test() { assert!(topology.peers_x.iter().filter(|peer| peers.contains(&peer)).count() == 4); // Must never include originator assert!(!peers.contains(&peers_x[0])); - assert_eq!(send_msg, msg.clone().into_validation_protocol()); + assert_eq!(send_msg, msg.clone().into_validation_protocol(ValidationVersion::V1.into())); } ); @@ -942,3 +986,128 @@ fn need_message_works() { // also not ok for Bob assert!(false == pretend_send(&mut state, peer_b, &validator_set[1])); } + +#[test] +fn network_protocol_versioning() { + let hash_a: Hash = [0; 32].into(); + let hash_b: Hash = [1; 32].into(); + + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + let peer_c = PeerId::random(); + + let peers = [ + (peer_a, ValidationVersion::VStaging), + (peer_b, ValidationVersion::V1), + (peer_c, ValidationVersion::VStaging), + ]; + + // validator 0 key pair + let (mut state, signing_context, keystore, validator) = + state_with_view(our_view![hash_a, hash_b], hash_a); + + let pool = sp_core::testing::TaskExecutor::new(); + let (mut ctx, mut handle) = make_subsystem_context::(pool); + let mut rng = dummy_rng(); + + executor::block_on(async move { + // create a signed message by validator 0 + let payload = AvailabilityBitfield(bitvec![u8, bitvec::order::Lsb0; 1u8; 32]); + let signed_bitfield = Signed::::sign( + &keystore, + payload, + &signing_context, + ValidatorIndex(0), + &validator, + ) + .await + .ok() + .flatten() + .expect("should be signed"); + let msg = BitfieldGossipMessage { + relay_parent: hash_a, + signed_availability: signed_bitfield.clone(), + }; + + for (peer, protocol_version) in peers { + launch!(handle_network_msg( + &mut ctx, + &mut state, + &Default::default(), + NetworkBridgeEvent::PeerConnected( + peer, + ObservedRole::Full, + protocol_version.into(), + None + ), + &mut rng, + )); + + launch!(handle_network_msg( + &mut ctx, + &mut state, + &Default::default(), + NetworkBridgeEvent::PeerViewChange(peer, view![hash_a, hash_b]), + &mut rng, + )); + + assert!(state.peer_data.contains_key(&peer)); + } + + launch!(handle_network_msg( + &mut ctx, + &mut state, + &Default::default(), + NetworkBridgeEvent::PeerMessage( + peer_a, + msg.clone().into_network_message(ValidationVersion::VStaging.into()), + ), + &mut rng, + )); + + // gossip to the overseer + assert_matches!( + handle.recv().await, + AllMessages::Provisioner(ProvisionerMessage::ProvisionableData( + _, + ProvisionableData::Bitfield(hash, signed) + )) => { + assert_eq!(hash, hash_a); + assert_eq!(signed, signed_bitfield) + } + ); + + // v1 gossip + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridgeTx( + NetworkBridgeTxMessage::SendValidationMessage(peers, send_msg), + ) => { + assert_eq!(peers, vec![peer_b]); + assert_eq!(send_msg, msg.clone().into_validation_protocol(ValidationVersion::V1.into())); + } + ); + + // vstaging gossip + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridgeTx( + NetworkBridgeTxMessage::SendValidationMessage(peers, send_msg), + ) => { + assert_eq!(peers, vec![peer_c]); + assert_eq!(send_msg, msg.clone().into_validation_protocol(ValidationVersion::VStaging.into())); + } + ); + + // reputation change + assert_matches!( + handle.recv().await, + AllMessages::NetworkBridgeTx( + NetworkBridgeTxMessage::ReportPeer(peer, rep) + ) => { + assert_eq!(peer, peer_a); + assert_eq!(rep, BENEFIT_VALID_MESSAGE_FIRST) + } + ); + }); +} diff --git a/node/network/bridge/src/network.rs b/node/network/bridge/src/network.rs index 9b326cbbfb38..15dc3c25dcf2 100644 --- a/node/network/bridge/src/network.rs +++ b/node/network/bridge/src/network.rs @@ -59,6 +59,9 @@ pub(crate) fn send_message( ) where M: Encode + Clone, { + if peers.is_empty() { + return + } let message = { let encoded = message.encode(); metrics.on_notification_sent(peer_set, version, encoded.len(), peers.len()); diff --git a/node/network/bridge/src/rx/mod.rs b/node/network/bridge/src/rx/mod.rs index b93024b43dfb..1a5bf97e73d0 100644 --- a/node/network/bridge/src/rx/mod.rs +++ b/node/network/bridge/src/rx/mod.rs @@ -31,7 +31,8 @@ use polkadot_node_network_protocol::{ CollationVersion, PeerSet, PeerSetProtocolNames, PerPeerSet, ProtocolVersion, ValidationVersion, }, - v1 as protocol_v1, ObservedRole, OurView, PeerId, UnifiedReputationChange as Rep, View, + v1 as protocol_v1, vstaging as protocol_vstaging, ObservedRole, OurView, PeerId, + UnifiedReputationChange as Rep, View, }; use polkadot_node_subsystem::{ @@ -267,15 +268,32 @@ where ) .await; - send_message( - &mut network_service, - vec![peer], - PeerSet::Validation, - version, - &peerset_protocol_names, - WireMessage::::ViewUpdate(local_view), - &metrics, - ); + match ValidationVersion::try_from(version) + .expect("try_get_protocol has already checked version is known; qed") + { + ValidationVersion::V1 => send_message( + &mut network_service, + vec![peer], + PeerSet::Validation, + version, + &peerset_protocol_names, + WireMessage::::ViewUpdate( + local_view, + ), + &metrics, + ), + ValidationVersion::VStaging => send_message( + &mut network_service, + vec![peer], + PeerSet::Validation, + version, + &peerset_protocol_names, + WireMessage::::ViewUpdate( + local_view, + ), + &metrics, + ), + } }, PeerSet::Collation => { dispatch_collation_events_to_all( @@ -292,15 +310,32 @@ where ) .await; - send_message( - &mut network_service, - vec![peer], - PeerSet::Collation, - version, - &peerset_protocol_names, - WireMessage::::ViewUpdate(local_view), - &metrics, - ); + match CollationVersion::try_from(version) + .expect("try_get_protocol has already checked version is known; qed") + { + CollationVersion::V1 => send_message( + &mut network_service, + vec![peer], + PeerSet::Collation, + version, + &peerset_protocol_names, + WireMessage::::ViewUpdate( + local_view, + ), + &metrics, + ), + CollationVersion::VStaging => send_message( + &mut network_service, + vec![peer], + PeerSet::Collation, + version, + &peerset_protocol_names, + WireMessage::::ViewUpdate( + local_view, + ), + &metrics, + ), + } }, } }, @@ -438,30 +473,39 @@ where ); if !v_messages.is_empty() { - let (events, reports) = - if expected_versions[PeerSet::Validation] == - Some(ValidationVersion::V1.into()) - { - handle_v1_peer_messages::( - remote.clone(), - PeerSet::Validation, - &mut shared.0.lock().validation_peers, - v_messages, - &metrics, - ) - } else { - gum::warn!( - target: LOG_TARGET, - version = ?expected_versions[PeerSet::Validation], - "Major logic bug. Peer somehow has unsupported validation protocol version." - ); + let (events, reports) = if expected_versions[PeerSet::Validation] == + Some(ValidationVersion::V1.into()) + { + handle_peer_messages::( + remote.clone(), + PeerSet::Validation, + &mut shared.0.lock().validation_peers, + v_messages, + &metrics, + ) + } else if expected_versions[PeerSet::Validation] == + Some(ValidationVersion::VStaging.into()) + { + handle_peer_messages::( + remote.clone(), + PeerSet::Validation, + &mut shared.0.lock().validation_peers, + v_messages, + &metrics, + ) + } else { + gum::warn!( + target: LOG_TARGET, + version = ?expected_versions[PeerSet::Validation], + "Major logic bug. Peer somehow has unsupported validation protocol version." + ); - never!("Only version 1 is supported; peer set connection checked above; qed"); + never!("Only versions 1 and 2 are supported; peer set connection checked above; qed"); - // If a peer somehow triggers this, we'll disconnect them - // eventually. - (Vec::new(), vec![UNCONNECTED_PEERSET_COST]) - }; + // If a peer somehow triggers this, we'll disconnect them + // eventually. + (Vec::new(), vec![UNCONNECTED_PEERSET_COST]) + }; for report in reports { network_service.report_peer(remote.clone(), report); @@ -471,30 +515,39 @@ where } if !c_messages.is_empty() { - let (events, reports) = - if expected_versions[PeerSet::Collation] == - Some(CollationVersion::V1.into()) - { - handle_v1_peer_messages::( - remote.clone(), - PeerSet::Collation, - &mut shared.0.lock().collation_peers, - c_messages, - &metrics, - ) - } else { - gum::warn!( - target: LOG_TARGET, - version = ?expected_versions[PeerSet::Collation], - "Major logic bug. Peer somehow has unsupported collation protocol version." - ); + let (events, reports) = if expected_versions[PeerSet::Collation] == + Some(CollationVersion::V1.into()) + { + handle_peer_messages::( + remote.clone(), + PeerSet::Collation, + &mut shared.0.lock().collation_peers, + c_messages, + &metrics, + ) + } else if expected_versions[PeerSet::Collation] == + Some(CollationVersion::VStaging.into()) + { + handle_peer_messages::( + remote.clone(), + PeerSet::Collation, + &mut shared.0.lock().collation_peers, + c_messages, + &metrics, + ) + } else { + gum::warn!( + target: LOG_TARGET, + version = ?expected_versions[PeerSet::Collation], + "Major logic bug. Peer somehow has unsupported collation protocol version." + ); - never!("Only version 1 is supported; peer set connection checked above; qed"); + never!("Only versions 1 and 2 are supported; peer set connection checked above; qed"); - // If a peer somehow triggers this, we'll disconnect them - // eventually. - (Vec::new(), vec![UNCONNECTED_PEERSET_COST]) - }; + // If a peer somehow triggers this, we'll disconnect them + // eventually. + (Vec::new(), vec![UNCONNECTED_PEERSET_COST]) + }; for report in reports { network_service.report_peer(remote.clone(), report); @@ -719,14 +772,38 @@ fn update_our_view( } ( - shared.validation_peers.keys().cloned().collect::>(), - shared.collation_peers.keys().cloned().collect::>(), + shared + .validation_peers + .iter() + .map(|(peer_id, data)| (peer_id.clone(), data.version)) + .collect::>(), + shared + .collation_peers + .iter() + .map(|(peer_id, data)| (peer_id.clone(), data.version)) + .collect::>(), ) }; + let filter_by_version = |peers: &[(PeerId, ProtocolVersion)], version| { + peers + .iter() + .filter(|(_, v)| v == &version) + .map(|(p, _)| p.clone()) + .collect::>() + }; + + let v1_validation_peers = filter_by_version(&validation_peers, ValidationVersion::V1.into()); + let v1_collation_peers = filter_by_version(&collation_peers, CollationVersion::V1.into()); + + let vstaging_validation_peers = + filter_by_version(&validation_peers, ValidationVersion::VStaging.into()); + let vstaging_collation_peers = + filter_by_version(&collation_peers, ValidationVersion::VStaging.into()); + send_validation_message_v1( net, - validation_peers, + v1_validation_peers, peerset_protocol_names, WireMessage::ViewUpdate(new_view.clone()), metrics, @@ -734,7 +811,23 @@ fn update_our_view( send_collation_message_v1( net, - collation_peers, + v1_collation_peers, + peerset_protocol_names, + WireMessage::ViewUpdate(new_view.clone()), + metrics, + ); + + send_validation_message_vstaging( + net, + vstaging_validation_peers, + peerset_protocol_names, + WireMessage::ViewUpdate(new_view.clone()), + metrics, + ); + + send_collation_message_vstaging( + net, + vstaging_collation_peers, peerset_protocol_names, WireMessage::ViewUpdate(new_view), metrics, @@ -758,7 +851,7 @@ fn update_our_view( // Handle messages on a specific v1 peer-set. The peer is expected to be connected on that // peer-set. -fn handle_v1_peer_messages>( +fn handle_peer_messages>( peer: PeerId, peer_set: PeerSet, peers: &mut HashMap, @@ -845,6 +938,42 @@ fn send_collation_message_v1( ); } +fn send_validation_message_vstaging( + net: &mut impl Network, + peers: Vec, + protocol_names: &PeerSetProtocolNames, + message: WireMessage, + metrics: &Metrics, +) { + send_message( + net, + peers, + PeerSet::Validation, + ValidationVersion::VStaging.into(), + protocol_names, + message, + metrics, + ); +} + +fn send_collation_message_vstaging( + net: &mut impl Network, + peers: Vec, + protocol_names: &PeerSetProtocolNames, + message: WireMessage, + metrics: &Metrics, +) { + send_message( + net, + peers, + PeerSet::Collation, + CollationVersion::VStaging.into(), + protocol_names, + message, + metrics, + ); +} + async fn dispatch_validation_event_to_all( event: NetworkBridgeEvent, ctx: &mut impl overseer::NetworkBridgeRxSenderTrait, diff --git a/node/network/bridge/src/rx/tests.rs b/node/network/bridge/src/rx/tests.rs index d4353c0342a5..dc22bf66bf22 100644 --- a/node/network/bridge/src/rx/tests.rs +++ b/node/network/bridge/src/rx/tests.rs @@ -25,6 +25,7 @@ use parking_lot::Mutex; use std::{ collections::HashSet, sync::atomic::{AtomicBool, Ordering}, + task::Poll, }; use sc_network::{Event as NetworkEvent, IfDisconnected, ProtocolName}; @@ -46,7 +47,7 @@ use polkadot_node_subsystem_test_helpers::{ SingleItemSink, SingleItemStream, TestSubsystemContextHandle, }; use polkadot_node_subsystem_util::metered; -use polkadot_primitives::v2::{AuthorityDiscoveryId, Hash}; +use polkadot_primitives::v2::{AuthorityDiscoveryId, CandidateHash, Hash}; use sc_network::Multiaddr; use sp_keyring::Sr25519Keyring; @@ -136,8 +137,7 @@ impl Network for TestNetwork { } fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName) { - let (peer_set, version) = self.protocol_names.try_get_protocol(&protocol).unwrap(); - assert_eq!(version, peer_set.get_main_version()); + let (peer_set, _) = self.protocol_names.try_get_protocol(&protocol).unwrap(); self.action_tx .lock() @@ -146,8 +146,7 @@ impl Network for TestNetwork { } fn write_notification(&self, who: PeerId, protocol: ProtocolName, message: Vec) { - let (peer_set, version) = self.protocol_names.try_get_protocol(&protocol).unwrap(); - assert_eq!(version, peer_set.get_main_version()); + let (peer_set, _) = self.protocol_names.try_get_protocol(&protocol).unwrap(); self.action_tx .lock() @@ -189,10 +188,17 @@ impl TestNetworkHandle { v } - async fn connect_peer(&mut self, peer: PeerId, peer_set: PeerSet, role: ObservedRole) { + async fn connect_peer( + &mut self, + peer: PeerId, + protocol_version: ValidationVersion, + peer_set: PeerSet, + role: ObservedRole, + ) { + let protocol_version = ProtocolVersion::from(protocol_version); self.send_network_event(NetworkEvent::NotificationStreamOpened { remote: peer, - protocol: self.protocol_names.get_main_name(peer_set), + protocol: self.protocol_names.get_name(peer_set, protocol_version), negotiated_fallback: None, role: role.into(), }) @@ -404,10 +410,20 @@ fn send_our_view_upon_connection() { handle.await_mode_switch().await; network_handle - .connect_peer(peer.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer( + peer.clone(), + ValidationVersion::V1, + PeerSet::Validation, + ObservedRole::Full, + ) .await; network_handle - .connect_peer(peer.clone(), PeerSet::Collation, ObservedRole::Full) + .connect_peer( + peer.clone(), + ValidationVersion::V1, + PeerSet::Collation, + ObservedRole::Full, + ) .await; let view = view![head]; @@ -451,10 +467,20 @@ fn sends_view_updates_to_peers() { handle.await_mode_switch().await; network_handle - .connect_peer(peer_a.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer( + peer_a.clone(), + ValidationVersion::V1, + PeerSet::Validation, + ObservedRole::Full, + ) .await; network_handle - .connect_peer(peer_b.clone(), PeerSet::Collation, ObservedRole::Full) + .connect_peer( + peer_b.clone(), + ValidationVersion::V1, + PeerSet::Collation, + ObservedRole::Full, + ) .await; let actions = network_handle.next_network_actions(2).await; @@ -512,10 +538,20 @@ fn do_not_send_view_update_until_synced() { assert_ne!(peer_a, peer_b); network_handle - .connect_peer(peer_a.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer( + peer_a.clone(), + ValidationVersion::V1, + PeerSet::Validation, + ObservedRole::Full, + ) .await; network_handle - .connect_peer(peer_b.clone(), PeerSet::Collation, ObservedRole::Full) + .connect_peer( + peer_b.clone(), + ValidationVersion::V1, + PeerSet::Collation, + ObservedRole::Full, + ) .await; { @@ -605,10 +641,20 @@ fn do_not_send_view_update_when_only_finalized_block_changed() { let peer_b = PeerId::random(); network_handle - .connect_peer(peer_a.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer( + peer_a.clone(), + ValidationVersion::V1, + PeerSet::Validation, + ObservedRole::Full, + ) .await; network_handle - .connect_peer(peer_b.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer( + peer_b.clone(), + ValidationVersion::V1, + PeerSet::Validation, + ObservedRole::Full, + ) .await; let hash_a = Hash::repeat_byte(1); @@ -664,7 +710,12 @@ fn peer_view_updates_sent_via_overseer() { let peer = PeerId::random(); network_handle - .connect_peer(peer.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer( + peer.clone(), + ValidationVersion::V1, + PeerSet::Validation, + ObservedRole::Full, + ) .await; let view = view![Hash::repeat_byte(1)]; @@ -714,7 +765,12 @@ fn peer_messages_sent_via_overseer() { let peer = PeerId::random(); network_handle - .connect_peer(peer.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer( + peer.clone(), + ValidationVersion::V1, + PeerSet::Validation, + ObservedRole::Full, + ) .await; // bridge will inform about all connected peers. @@ -786,10 +842,20 @@ fn peer_disconnect_from_just_one_peerset() { let peer = PeerId::random(); network_handle - .connect_peer(peer.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer( + peer.clone(), + ValidationVersion::V1, + PeerSet::Validation, + ObservedRole::Full, + ) .await; network_handle - .connect_peer(peer.clone(), PeerSet::Collation, ObservedRole::Full) + .connect_peer( + peer.clone(), + ValidationVersion::V1, + PeerSet::Collation, + ObservedRole::Full, + ) .await; // bridge will inform about all connected peers. @@ -879,10 +945,20 @@ fn relays_collation_protocol_messages() { let peer_b = PeerId::random(); network_handle - .connect_peer(peer_a.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer( + peer_a.clone(), + ValidationVersion::V1, + PeerSet::Validation, + ObservedRole::Full, + ) .await; network_handle - .connect_peer(peer_b.clone(), PeerSet::Collation, ObservedRole::Full) + .connect_peer( + peer_b.clone(), + ValidationVersion::V1, + PeerSet::Collation, + ObservedRole::Full, + ) .await; // bridge will inform about all connected peers. @@ -982,10 +1058,20 @@ fn different_views_on_different_peer_sets() { let peer = PeerId::random(); network_handle - .connect_peer(peer.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer( + peer.clone(), + ValidationVersion::V1, + PeerSet::Validation, + ObservedRole::Full, + ) .await; network_handle - .connect_peer(peer.clone(), PeerSet::Collation, ObservedRole::Full) + .connect_peer( + peer.clone(), + ValidationVersion::V1, + PeerSet::Collation, + ObservedRole::Full, + ) .await; // bridge will inform about all connected peers. @@ -1069,7 +1155,12 @@ fn sent_views_include_finalized_number_update() { let peer_a = PeerId::random(); network_handle - .connect_peer(peer_a.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer( + peer_a.clone(), + ValidationVersion::V1, + PeerSet::Validation, + ObservedRole::Full, + ) .await; let hash_a = Hash::repeat_byte(1); @@ -1114,7 +1205,12 @@ fn view_finalized_number_can_not_go_down() { let peer_a = PeerId::random(); network_handle - .connect_peer(peer_a.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer( + peer_a.clone(), + ValidationVersion::V1, + PeerSet::Validation, + ObservedRole::Full, + ) .await; network_handle @@ -1197,3 +1293,162 @@ fn our_view_updates_decreasing_order_and_limited_to_max() { virtual_overseer }); } + +#[test] +fn network_protocol_versioning_view_update() { + let (oracle, handle) = make_sync_oracle(false); + test_harness(Box::new(oracle), |test_harness| async move { + let TestHarness { mut network_handle, mut virtual_overseer } = test_harness; + + let peer_ids: Vec<_> = (0..4).map(|_| PeerId::random()).collect(); + let peers = [ + (peer_ids[0], PeerSet::Validation, ValidationVersion::VStaging), + (peer_ids[1], PeerSet::Collation, ValidationVersion::V1), + (peer_ids[2], PeerSet::Validation, ValidationVersion::V1), + (peer_ids[3], PeerSet::Collation, ValidationVersion::VStaging), + ]; + + let head = Hash::repeat_byte(1); + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( + ActiveLeavesUpdate::start_work(ActivatedLeaf { + hash: head, + number: 1, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + }), + ))) + .await; + + handle.await_mode_switch().await; + + for &(peer_id, peer_set, version) in &peers { + network_handle + .connect_peer(peer_id, version, peer_set, ObservedRole::Full) + .await; + } + + let view = view![head]; + let actions = network_handle.next_network_actions(4).await; + + for &(peer_id, peer_set, version) in &peers { + let wire_msg = match version { + ValidationVersion::V1 => + WireMessage::::ViewUpdate(view.clone()) + .encode(), + ValidationVersion::VStaging => + WireMessage::::ViewUpdate(view.clone()) + .encode(), + }; + assert_network_actions_contains( + &actions, + &NetworkAction::WriteNotification(peer_id, peer_set, wire_msg), + ); + } + + virtual_overseer + }); +} + +#[test] +fn network_protocol_versioning_subsystem_msg() { + let (oracle, _handle) = make_sync_oracle(false); + test_harness(Box::new(oracle), |test_harness| async move { + let TestHarness { mut network_handle, mut virtual_overseer } = test_harness; + + let peer = PeerId::random(); + + network_handle + .connect_peer( + peer.clone(), + ValidationVersion::VStaging, + PeerSet::Validation, + ObservedRole::Full, + ) + .await; + + // bridge will inform about all connected peers. + { + assert_sends_validation_event_to_all( + NetworkBridgeEvent::PeerConnected( + peer.clone(), + ObservedRole::Full, + ValidationVersion::VStaging.into(), + None, + ), + &mut virtual_overseer, + ) + .await; + + assert_sends_validation_event_to_all( + NetworkBridgeEvent::PeerViewChange(peer.clone(), View::default()), + &mut virtual_overseer, + ) + .await; + } + + let approval_distribution_message = + protocol_vstaging::ApprovalDistributionMessage::Approvals(Vec::new()); + + let msg = protocol_vstaging::ValidationProtocol::ApprovalDistribution( + approval_distribution_message.clone(), + ); + + network_handle + .peer_message( + peer.clone(), + PeerSet::Validation, + WireMessage::ProtocolMessage(msg.clone()).encode(), + ) + .await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ApprovalDistribution( + ApprovalDistributionMessage::NetworkBridgeUpdate( + NetworkBridgeEvent::PeerMessage(p, Versioned::VStaging(m)) + ) + ) => { + assert_eq!(p, peer); + assert_eq!(m, approval_distribution_message); + } + ); + + let metadata = protocol_vstaging::StatementMetadata { + relay_parent: Hash::zero(), + candidate_hash: CandidateHash::default(), + signed_by: ValidatorIndex(0), + signature: sp_core::crypto::UncheckedFrom::unchecked_from([1u8; 64]), + }; + let statement_distribution_message = + protocol_vstaging::StatementDistributionMessage::LargeStatement(metadata); + let msg = protocol_vstaging::ValidationProtocol::StatementDistribution( + statement_distribution_message.clone(), + ); + + network_handle + .peer_message( + peer.clone(), + PeerSet::Validation, + WireMessage::ProtocolMessage(msg.clone()).encode(), + ) + .await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::NetworkBridgeUpdate( + NetworkBridgeEvent::PeerMessage(p, Versioned::VStaging(m)) + ) + ) => { + assert_eq!(p, peer); + assert_eq!(m, statement_distribution_message); + } + ); + + // No more messages. + assert_matches!(futures::poll!(virtual_overseer.recv().boxed()), Poll::Pending); + + virtual_overseer + }); +} diff --git a/node/network/bridge/src/tx/mod.rs b/node/network/bridge/src/tx/mod.rs index 47f095fdf273..b3b39f0f7f0e 100644 --- a/node/network/bridge/src/tx/mod.rs +++ b/node/network/bridge/src/tx/mod.rs @@ -20,7 +20,7 @@ use super::*; use polkadot_node_network_protocol::{ peer_set::{CollationVersion, PeerSet, PeerSetProtocolNames, ValidationVersion}, request_response::ReqProtocolNames, - v1 as protocol_v1, PeerId, Versioned, + v1 as protocol_v1, vstaging as protocol_vstaging, PeerId, Versioned, }; use polkadot_node_subsystem::{ @@ -183,6 +183,13 @@ where WireMessage::ProtocolMessage(msg), &metrics, ), + Versioned::VStaging(msg) => send_validation_message_vstaging( + &mut network_service, + peers, + peerset_protocol_names, + WireMessage::ProtocolMessage(msg), + &metrics, + ), } }, NetworkBridgeTxMessage::SendValidationMessages(msgs) => { @@ -201,6 +208,13 @@ where WireMessage::ProtocolMessage(msg), &metrics, ), + Versioned::VStaging(msg) => send_validation_message_vstaging( + &mut network_service, + peers, + peerset_protocol_names, + WireMessage::ProtocolMessage(msg), + &metrics, + ), } } }, @@ -219,6 +233,13 @@ where WireMessage::ProtocolMessage(msg), &metrics, ), + Versioned::VStaging(msg) => send_collation_message_vstaging( + &mut network_service, + peers, + peerset_protocol_names, + WireMessage::ProtocolMessage(msg), + &metrics, + ), } }, NetworkBridgeTxMessage::SendCollationMessages(msgs) => { @@ -237,6 +258,13 @@ where WireMessage::ProtocolMessage(msg), &metrics, ), + Versioned::VStaging(msg) => send_collation_message_vstaging( + &mut network_service, + peers, + peerset_protocol_names, + WireMessage::ProtocolMessage(msg), + &metrics, + ), } } }, @@ -367,3 +395,39 @@ fn send_collation_message_v1( metrics, ); } + +fn send_validation_message_vstaging( + net: &mut impl Network, + peers: Vec, + protocol_names: &PeerSetProtocolNames, + message: WireMessage, + metrics: &Metrics, +) { + send_message( + net, + peers, + PeerSet::Validation, + ValidationVersion::VStaging.into(), + protocol_names, + message, + metrics, + ); +} + +fn send_collation_message_vstaging( + net: &mut impl Network, + peers: Vec, + protocol_names: &PeerSetProtocolNames, + message: WireMessage, + metrics: &Metrics, +) { + send_message( + net, + peers, + PeerSet::Collation, + CollationVersion::VStaging.into(), + protocol_names, + message, + metrics, + ); +} diff --git a/node/network/bridge/src/tx/tests.rs b/node/network/bridge/src/tx/tests.rs index c001457d592b..b424eda79ec1 100644 --- a/node/network/bridge/src/tx/tests.rs +++ b/node/network/bridge/src/tx/tests.rs @@ -124,8 +124,7 @@ impl Network for TestNetwork { } fn disconnect_peer(&self, who: PeerId, protocol: ProtocolName) { - let (peer_set, version) = self.peerset_protocol_names.try_get_protocol(&protocol).unwrap(); - assert_eq!(version, peer_set.get_main_version()); + let (peer_set, _) = self.peerset_protocol_names.try_get_protocol(&protocol).unwrap(); self.action_tx .lock() @@ -134,8 +133,7 @@ impl Network for TestNetwork { } fn write_notification(&self, who: PeerId, protocol: ProtocolName, message: Vec) { - let (peer_set, version) = self.peerset_protocol_names.try_get_protocol(&protocol).unwrap(); - assert_eq!(version, peer_set.get_main_version()); + let (peer_set, _) = self.peerset_protocol_names.try_get_protocol(&protocol).unwrap(); self.action_tx .lock() @@ -167,10 +165,17 @@ impl TestNetworkHandle { self.action_rx.next().await.expect("subsystem concluded early") } - async fn connect_peer(&mut self, peer: PeerId, peer_set: PeerSet, role: ObservedRole) { + async fn connect_peer( + &mut self, + peer: PeerId, + protocol_version: ValidationVersion, + peer_set: PeerSet, + role: ObservedRole, + ) { + let protocol_version = ProtocolVersion::from(protocol_version); self.send_network_event(NetworkEvent::NotificationStreamOpened { remote: peer, - protocol: self.peerset_protocol_names.get_main_name(peer_set), + protocol: self.peerset_protocol_names.get_name(peer_set, protocol_version), negotiated_fallback: None, role: role.into(), }) @@ -235,7 +240,12 @@ fn send_messages_to_peers() { let peer = PeerId::random(); network_handle - .connect_peer(peer.clone(), PeerSet::Validation, ObservedRole::Full) + .connect_peer( + peer.clone(), + ValidationVersion::V1, + PeerSet::Validation, + ObservedRole::Full, + ) .timeout(TIMEOUT) .await .expect("Timeout does not occur"); @@ -244,7 +254,12 @@ fn send_messages_to_peers() { // so the single item sink has to be free explicitly network_handle - .connect_peer(peer.clone(), PeerSet::Collation, ObservedRole::Full) + .connect_peer( + peer.clone(), + ValidationVersion::V1, + PeerSet::Collation, + ObservedRole::Full, + ) .timeout(TIMEOUT) .await .expect("Timeout does not occur"); @@ -321,3 +336,107 @@ fn send_messages_to_peers() { virtual_overseer }); } + +#[test] +fn network_protocol_versioning_send() { + test_harness(|test_harness| async move { + let TestHarness { mut network_handle, mut virtual_overseer } = test_harness; + + let peer_ids: Vec<_> = (0..4).map(|_| PeerId::random()).collect(); + let peers = [ + (peer_ids[0], PeerSet::Validation, ValidationVersion::VStaging), + (peer_ids[1], PeerSet::Collation, ValidationVersion::V1), + (peer_ids[2], PeerSet::Validation, ValidationVersion::V1), + (peer_ids[3], PeerSet::Collation, ValidationVersion::VStaging), + ]; + + for &(peer_id, peer_set, version) in &peers { + network_handle + .connect_peer(peer_id, version, peer_set, ObservedRole::Full) + .timeout(TIMEOUT) + .await + .expect("Timeout does not occur"); + } + + // send a validation protocol message. + + { + let approval_distribution_message = + protocol_vstaging::ApprovalDistributionMessage::Approvals(Vec::new()); + + let msg = protocol_vstaging::ValidationProtocol::ApprovalDistribution( + approval_distribution_message.clone(), + ); + + // Note that bridge doesn't ensure neither peer's protocol version + // or peer set match the message. + let receivers = vec![peer_ids[0], peer_ids[3]]; + virtual_overseer + .send(FromOrchestra::Communication { + msg: NetworkBridgeTxMessage::SendValidationMessage( + receivers.clone(), + Versioned::VStaging(msg.clone()), + ), + }) + .timeout(TIMEOUT) + .await + .expect("Timeout does not occur"); + + for peer in &receivers { + assert_eq!( + network_handle + .next_network_action() + .timeout(TIMEOUT) + .await + .expect("Timeout does not occur"), + NetworkAction::WriteNotification( + *peer, + PeerSet::Validation, + WireMessage::ProtocolMessage(msg.clone()).encode(), + ) + ); + } + } + + // send a collation protocol message. + + { + let collator_protocol_message = protocol_vstaging::CollatorProtocolMessage::Declare( + Sr25519Keyring::Alice.public().into(), + 0_u32.into(), + dummy_collator_signature(), + ); + + let msg = protocol_vstaging::CollationProtocol::CollatorProtocol( + collator_protocol_message.clone(), + ); + + let receivers = vec![peer_ids[1], peer_ids[2]]; + + virtual_overseer + .send(FromOrchestra::Communication { + msg: NetworkBridgeTxMessage::SendCollationMessages(vec![( + receivers.clone(), + Versioned::VStaging(msg.clone()), + )]), + }) + .await; + + for peer in &receivers { + assert_eq!( + network_handle + .next_network_action() + .timeout(TIMEOUT) + .await + .expect("Timeout does not occur"), + NetworkAction::WriteNotification( + *peer, + PeerSet::Collation, + WireMessage::ProtocolMessage(msg.clone()).encode(), + ) + ); + } + } + virtual_overseer + }); +} diff --git a/node/network/collator-protocol/src/collator_side/mod.rs b/node/network/collator-protocol/src/collator_side/mod.rs index c1a20a2a670b..1c561905ecf9 100644 --- a/node/network/collator-protocol/src/collator_side/mod.rs +++ b/node/network/collator-protocol/src/collator_side/mod.rs @@ -946,6 +946,7 @@ async fn handle_network_msg( PeerMessage(remote, Versioned::V1(msg)) => { handle_incoming_peer_message(ctx, runtime, state, remote, msg).await?; }, + PeerMessage(_, Versioned::VStaging(msg)) => {}, NewGossipTopology { .. } => { // impossible! }, diff --git a/node/network/collator-protocol/src/validator_side/mod.rs b/node/network/collator-protocol/src/validator_side/mod.rs index ea100e703a7d..5996ebd1d2b3 100644 --- a/node/network/collator-protocol/src/validator_side/mod.rs +++ b/node/network/collator-protocol/src/validator_side/mod.rs @@ -1084,6 +1084,7 @@ async fn handle_network_msg( PeerMessage(remote, Versioned::V1(msg)) => { process_incoming_peer_message(ctx, state, remote, msg).await; }, + PeerMessage(_, Versioned::VStaging(msg)) => {}, } Ok(()) diff --git a/node/network/gossip-support/src/lib.rs b/node/network/gossip-support/src/lib.rs index df90914b6f58..329a2dac978f 100644 --- a/node/network/gossip-support/src/lib.rs +++ b/node/network/gossip-support/src/lib.rs @@ -402,8 +402,12 @@ where NetworkBridgeEvent::OurViewChange(_) => {}, NetworkBridgeEvent::PeerViewChange(_, _) => {}, NetworkBridgeEvent::NewGossipTopology { .. } => {}, - NetworkBridgeEvent::PeerMessage(_, Versioned::V1(v)) => { - match v {}; + NetworkBridgeEvent::PeerMessage(_, message) => { + // match void -> LLVM unreachable + match message { + Versioned::V1(m) => match m {}, + Versioned::VStaging(m) => match m {}, + } }, } } diff --git a/node/network/protocol/Cargo.toml b/node/network/protocol/Cargo.toml index cda9173f8524..c6ceeff4ec62 100644 --- a/node/network/protocol/Cargo.toml +++ b/node/network/protocol/Cargo.toml @@ -24,3 +24,6 @@ gum = { package = "tracing-gum", path = "../../gum" } [dev-dependencies] rand_chacha = "0.3.1" + +[features] +network-protocol-staging = [] diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index 169d916ce6f9..b9f40241a4d7 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -251,22 +251,26 @@ impl View { /// A protocol-versioned type. #[derive(Debug, Clone, PartialEq, Eq)] -pub enum Versioned { +pub enum Versioned { /// V1 type. V1(V1), + /// VStaging type. + VStaging(VStaging), } -impl Versioned<&'_ V1> { +impl Versioned<&'_ V1, &'_ VStaging> { /// Convert to a fully-owned version of the message. - pub fn clone_inner(&self) -> Versioned { + pub fn clone_inner(&self) -> Versioned { match *self { Versioned::V1(inner) => Versioned::V1(inner.clone()), + Versioned::VStaging(inner) => Versioned::VStaging(inner.clone()), } } } /// All supported versions of the validation protocol message. -pub type VersionedValidationProtocol = Versioned; +pub type VersionedValidationProtocol = + Versioned; impl From for VersionedValidationProtocol { fn from(v1: v1::ValidationProtocol) -> Self { @@ -274,8 +278,14 @@ impl From for VersionedValidationProtocol { } } +impl From for VersionedValidationProtocol { + fn from(vstaging: vstaging::ValidationProtocol) -> Self { + VersionedValidationProtocol::VStaging(vstaging) + } +} + /// All supported versions of the collation protocol message. -pub type VersionedCollationProtocol = Versioned; +pub type VersionedCollationProtocol = Versioned; impl From for VersionedCollationProtocol { fn from(v1: v1::CollationProtocol) -> Self { @@ -283,12 +293,19 @@ impl From for VersionedCollationProtocol { } } +impl From for VersionedCollationProtocol { + fn from(vstaging: vstaging::CollationProtocol) -> Self { + VersionedCollationProtocol::VStaging(vstaging) + } +} + macro_rules! impl_versioned_full_protocol_from { ($from:ty, $out:ty, $variant:ident) => { impl From<$from> for $out { fn from(versioned_from: $from) -> $out { match versioned_from { Versioned::V1(x) => Versioned::V1(x.into()), + Versioned::VStaging(x) => Versioned::VStaging(x.into()), } } } @@ -298,7 +315,12 @@ macro_rules! impl_versioned_full_protocol_from { /// Implement `TryFrom` for one versioned enum variant into the inner type. /// `$m_ty::$variant(inner) -> Ok(inner)` macro_rules! impl_versioned_try_from { - ($from:ty, $out:ty, $v1_pat:pat => $v1_out:expr) => { + ( + $from:ty, + $out:ty, + $v1_pat:pat => $v1_out:expr, + $vstaging_pat:pat => $vstaging_out:expr + ) => { impl TryFrom<$from> for $out { type Error = crate::WrongVariant; @@ -306,6 +328,7 @@ macro_rules! impl_versioned_try_from { #[allow(unreachable_patterns)] // when there is only one variant match x { Versioned::V1($v1_pat) => Ok(Versioned::V1($v1_out)), + Versioned::VStaging($vstaging_pat) => Ok(Versioned::VStaging($vstaging_out)), _ => Err(crate::WrongVariant), } } @@ -318,6 +341,8 @@ macro_rules! impl_versioned_try_from { #[allow(unreachable_patterns)] // when there is only one variant match x { Versioned::V1($v1_pat) => Ok(Versioned::V1($v1_out.clone())), + Versioned::VStaging($vstaging_pat) => + Ok(Versioned::VStaging($vstaging_out.clone())), _ => Err(crate::WrongVariant), } } @@ -326,7 +351,8 @@ macro_rules! impl_versioned_try_from { } /// Version-annotated messages used by the bitfield distribution subsystem. -pub type BitfieldDistributionMessage = Versioned; +pub type BitfieldDistributionMessage = + Versioned; impl_versioned_full_protocol_from!( BitfieldDistributionMessage, VersionedValidationProtocol, @@ -335,11 +361,13 @@ impl_versioned_full_protocol_from!( impl_versioned_try_from!( VersionedValidationProtocol, BitfieldDistributionMessage, - v1::ValidationProtocol::BitfieldDistribution(x) => x + v1::ValidationProtocol::BitfieldDistribution(x) => x, + vstaging::ValidationProtocol::BitfieldDistribution(x) => x ); /// Version-annotated messages used by the statement distribution subsystem. -pub type StatementDistributionMessage = Versioned; +pub type StatementDistributionMessage = + Versioned; impl_versioned_full_protocol_from!( StatementDistributionMessage, VersionedValidationProtocol, @@ -348,11 +376,13 @@ impl_versioned_full_protocol_from!( impl_versioned_try_from!( VersionedValidationProtocol, StatementDistributionMessage, - v1::ValidationProtocol::StatementDistribution(x) => x + v1::ValidationProtocol::StatementDistribution(x) => x, + vstaging::ValidationProtocol::StatementDistribution(x) => x ); /// Version-annotated messages used by the approval distribution subsystem. -pub type ApprovalDistributionMessage = Versioned; +pub type ApprovalDistributionMessage = + Versioned; impl_versioned_full_protocol_from!( ApprovalDistributionMessage, VersionedValidationProtocol, @@ -361,11 +391,14 @@ impl_versioned_full_protocol_from!( impl_versioned_try_from!( VersionedValidationProtocol, ApprovalDistributionMessage, - v1::ValidationProtocol::ApprovalDistribution(x) => x + v1::ValidationProtocol::ApprovalDistribution(x) => x, + vstaging::ValidationProtocol::ApprovalDistribution(x) => x + ); /// Version-annotated messages used by the gossip-support subsystem (this is void). -pub type GossipSupportNetworkMessage = Versioned; +pub type GossipSupportNetworkMessage = + Versioned; // This is a void enum placeholder, so never gets sent over the wire. impl TryFrom for GossipSupportNetworkMessage { type Error = WrongVariant; @@ -382,7 +415,8 @@ impl<'a> TryFrom<&'a VersionedValidationProtocol> for GossipSupportNetworkMessag } /// Version-annotated messages used by the bitfield distribution subsystem. -pub type CollatorProtocolMessage = Versioned; +pub type CollatorProtocolMessage = + Versioned; impl_versioned_full_protocol_from!( CollatorProtocolMessage, VersionedCollationProtocol, @@ -391,7 +425,8 @@ impl_versioned_full_protocol_from!( impl_versioned_try_from!( VersionedCollationProtocol, CollatorProtocolMessage, - v1::CollationProtocol::CollatorProtocol(x) => x + v1::CollationProtocol::CollatorProtocol(x) => x, + vstaging::CollationProtocol::CollatorProtocol(x) => x ); /// v1 notification protocol types. @@ -551,3 +586,161 @@ pub mod v1 { payload } } + +/// vstaging network protocol types. +pub mod vstaging { + use parity_scale_codec::{Decode, Encode}; + + use polkadot_primitives::vstaging::{ + CandidateHash, CandidateIndex, CollatorId, CollatorSignature, CompactStatement, Hash, + Id as ParaId, UncheckedSignedAvailabilityBitfield, ValidatorIndex, ValidatorSignature, + }; + + use polkadot_node_primitives::{ + approval::{IndirectAssignmentCert, IndirectSignedApprovalVote}, + UncheckedSignedFullStatement, + }; + + /// Network messages used by the bitfield distribution subsystem. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] + pub enum BitfieldDistributionMessage { + /// A signed availability bitfield for a given relay-parent hash. + #[codec(index = 0)] + Bitfield(Hash, UncheckedSignedAvailabilityBitfield), + } + + /// Network messages used by the statement distribution subsystem. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] + pub enum StatementDistributionMessage { + /// A signed full statement under a given relay-parent. + #[codec(index = 0)] + Statement(Hash, UncheckedSignedFullStatement), + /// Seconded statement with large payload (e.g. containing a runtime upgrade). + /// + /// We only gossip the hash in that case, actual payloads can be fetched from sending node + /// via request/response. + #[codec(index = 1)] + LargeStatement(StatementMetadata), + } + + /// Data that makes a statement unique. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq, Hash)] + pub struct StatementMetadata { + /// Relay parent this statement is relevant under. + pub relay_parent: Hash, + /// Hash of the candidate that got validated. + pub candidate_hash: CandidateHash, + /// Validator that attested the validity. + pub signed_by: ValidatorIndex, + /// Signature of seconding validator. + pub signature: ValidatorSignature, + } + + impl StatementDistributionMessage { + /// Get fingerprint describing the contained statement uniquely. + pub fn get_fingerprint(&self) -> (CompactStatement, ValidatorIndex) { + match self { + Self::Statement(_, statement) => ( + statement.unchecked_payload().to_compact(), + statement.unchecked_validator_index(), + ), + Self::LargeStatement(meta) => + (CompactStatement::Seconded(meta.candidate_hash), meta.signed_by), + } + } + + /// Get the signature from the statement. + pub fn get_signature(&self) -> ValidatorSignature { + match self { + Self::Statement(_, statement) => statement.unchecked_signature().clone(), + Self::LargeStatement(metadata) => metadata.signature.clone(), + } + } + + /// Get contained relay parent. + pub fn get_relay_parent(&self) -> Hash { + match self { + Self::Statement(r, _) => *r, + Self::LargeStatement(meta) => meta.relay_parent, + } + } + + /// Whether this message contains a large statement. + pub fn is_large_statement(&self) -> bool { + if let Self::LargeStatement(_) = self { + true + } else { + false + } + } + } + + /// Network messages used by the approval distribution subsystem. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] + pub enum ApprovalDistributionMessage { + /// Assignments for candidates in recent, unfinalized blocks. + /// + /// Actually checking the assignment may yield a different result. + #[codec(index = 0)] + Assignments(Vec<(IndirectAssignmentCert, CandidateIndex)>), + /// Approvals for candidates in some recent, unfinalized block. + #[codec(index = 1)] + Approvals(Vec), + } + + /// Dummy network message type, so we will receive connect/disconnect events. + #[derive(Debug, Clone, PartialEq, Eq)] + pub enum GossipSupportNetworkMessage {} + + /// Network messages used by the collator protocol subsystem + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] + pub enum CollatorProtocolMessage { + /// Declare the intent to advertise collations under a collator ID, attaching a + /// signature of the `PeerId` of the node using the given collator ID key. + #[codec(index = 0)] + Declare(CollatorId, ParaId, CollatorSignature), + /// Advertise a collation to a validator. Can only be sent once the peer has + /// declared that they are a collator with given ID. + #[codec(index = 1)] + AdvertiseCollation(Hash), + /// A collation sent to a validator was seconded. + #[codec(index = 4)] + CollationSeconded(Hash, UncheckedSignedFullStatement), + } + + /// All network messages on the validation peer-set. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq, derive_more::From)] + pub enum ValidationProtocol { + /// Bitfield distribution messages + #[codec(index = 1)] + #[from] + BitfieldDistribution(BitfieldDistributionMessage), + /// Statement distribution messages + #[codec(index = 3)] + #[from] + StatementDistribution(StatementDistributionMessage), + /// Approval distribution messages + #[codec(index = 4)] + #[from] + ApprovalDistribution(ApprovalDistributionMessage), + } + + /// All network messages on the collation peer-set. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq, derive_more::From)] + pub enum CollationProtocol { + /// Collator protocol messages + #[codec(index = 0)] + #[from] + CollatorProtocol(CollatorProtocolMessage), + } + + /// Get the payload that should be signed and included in a `Declare` message. + /// + /// The payload is the local peer id of the node, which serves to prove that it + /// controls the collator key it is declaring an intention to collate under. + pub fn declare_signature_payload(peer_id: &sc_network::PeerId) -> Vec { + let mut payload = peer_id.to_bytes(); + payload.extend_from_slice(b"COLL"); + payload + } +} diff --git a/node/network/protocol/src/peer_set.rs b/node/network/protocol/src/peer_set.rs index 4a3220995f98..58fb058a803d 100644 --- a/node/network/protocol/src/peer_set.rs +++ b/node/network/protocol/src/peer_set.rs @@ -115,10 +115,17 @@ impl PeerSet { /// Networking layer relies on `get_main_version()` being the version /// of the main protocol name reported by [`PeerSetProtocolNames::get_main_name()`]. pub fn get_main_version(self) -> ProtocolVersion { + #[cfg(not(feature = "network-protocol-staging"))] match self { PeerSet::Validation => ValidationVersion::V1.into(), PeerSet::Collation => CollationVersion::V1.into(), } + + #[cfg(feature = "network-protocol-staging")] + match self { + PeerSet::Validation => ValidationVersion::VStaging.into(), + PeerSet::Collation => CollationVersion::VStaging.into(), + } } /// Get the max notification size for this peer set. @@ -142,12 +149,16 @@ impl PeerSet { PeerSet::Validation => if version == ValidationVersion::V1.into() { Some("validation/1") + } else if version == ValidationVersion::VStaging.into() { + Some("validation/2") } else { None }, PeerSet::Collation => if version == CollationVersion::V1.into() { Some("collation/1") + } else if version == CollationVersion::VStaging.into() { + Some("collation/2") } else { None }, @@ -209,6 +220,8 @@ impl From for u32 { pub enum ValidationVersion { /// The first version. V1 = 1, + /// The staging version. + VStaging = 2, } /// Supported collation protocol versions. Only versions defined here must be used in the codebase. @@ -216,6 +229,40 @@ pub enum ValidationVersion { pub enum CollationVersion { /// The first version. V1 = 1, + /// The staging version. + VStaging = 2, +} + +/// Marker indicating the version is unknown. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct UnknownVersion; + +impl TryFrom for ValidationVersion { + type Error = UnknownVersion; + + fn try_from(p: ProtocolVersion) -> Result { + for v in Self::iter() { + if v as u32 == p.0 { + return Ok(v) + } + } + + Err(UnknownVersion) + } +} + +impl TryFrom for CollationVersion { + type Error = UnknownVersion; + + fn try_from(p: ProtocolVersion) -> Result { + for v in Self::iter() { + if v as u32 == p.0 { + return Ok(v) + } + } + + Err(UnknownVersion) + } } impl From for ProtocolVersion { diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 2f4a1ad90a71..40497f77557f 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -31,7 +31,8 @@ use polkadot_node_network_protocol::{ peer_set::{IsAuthority, PeerSet}, request_response::{v1 as request_v1, IncomingRequestReceiver}, v1::{self as protocol_v1, StatementMetadata}, - IfDisconnected, PeerId, UnifiedReputationChange as Rep, Versioned, View, + vstaging as protocol_vstaging, IfDisconnected, PeerId, UnifiedReputationChange as Rep, + Versioned, View, }; use polkadot_node_primitives::{ SignedFullStatement, Statement, StatementWithPVD, UncheckedSignedFullStatement, @@ -607,7 +608,7 @@ struct FetchingInfo { /// /// We use an `IndexMap` here to preserve the ordering of peers sending us messages. This is /// desirable because we reward first sending peers with reputation. - available_peers: IndexMap>, + available_peers: IndexMap>, /// Peers left to try in case the background task needs it. peers_to_try: Vec, /// Sender for sending fresh peers to the fetching task in case of failure. @@ -1249,11 +1250,11 @@ async fn retrieve_statement_from_message<'a, Context>( let is_new_peer = match info.available_peers.entry(peer) { IEntry::Occupied(mut occupied) => { - occupied.get_mut().push(message); + occupied.get_mut().push(Versioned::V1(message)); false }, IEntry::Vacant(vacant) => { - vacant.insert(vec![message]); + vacant.insert(vec![Versioned::V1(message)]); true }, }; @@ -1331,7 +1332,10 @@ async fn launch_request( } let available_peers = { let mut m = IndexMap::new(); - m.insert(peer, vec![protocol_v1::StatementDistributionMessage::LargeStatement(meta)]); + m.insert( + peer, + vec![Versioned::V1(protocol_v1::StatementDistributionMessage::LargeStatement(meta))], + ); m }; Some(LargeStatementStatus::Fetching(FetchingInfo { @@ -1351,7 +1355,7 @@ async fn handle_incoming_message_and_circulate<'a, Context, R>( active_heads: &'a mut HashMap, recent_outdated_heads: &RecentOutdatedHeads, ctx: &mut Context, - message: protocol_v1::StatementDistributionMessage, + message: net_protocol::StatementDistributionMessage, req_sender: &mpsc::Sender, metrics: &Metrics, runtime: &mut RuntimeInfo, @@ -1384,7 +1388,7 @@ async fn handle_incoming_message_and_circulate<'a, Context, R>( // statement before a `Seconded` statement. `Seconded` statements are the only ones // that require dependents. Thus, if this is a `Seconded` statement for a candidate we // were not aware of before, we cannot have any dependent statements from the candidate. - let _ = metrics.time_network_bridge_update_v1("circulate_statement"); + let _ = metrics.time_network_bridge_update("circulate_statement"); let session_index = runtime.get_session_index_for_child(ctx.sender(), relay_parent).await; let topology = match session_index { @@ -1430,12 +1434,19 @@ async fn handle_incoming_message<'a, Context>( active_heads: &'a mut HashMap, recent_outdated_heads: &RecentOutdatedHeads, ctx: &mut Context, - message: protocol_v1::StatementDistributionMessage, + message: net_protocol::StatementDistributionMessage, req_sender: &mpsc::Sender, metrics: &Metrics, ) -> Option<(Hash, StoredStatement<'a>)> { + let _ = metrics.time_network_bridge_update("handle_incoming_message"); + + // TODO [now] handle vstaging messages + let message = match message { + Versioned::V1(m) => m, + Versioned::VStaging(_) => unimplemented!(), + }; + let relay_parent = message.get_relay_parent(); - let _ = metrics.time_network_bridge_update_v1("handle_incoming_message"); let active_head = match active_heads.get_mut(&relay_parent) { Some(h) => h, @@ -1648,8 +1659,11 @@ async fn handle_incoming_message<'a, Context>( // When we receive a new message from a peer, we forward it to the // candidate backing subsystem. - ctx.send_message(CandidateBackingMessage::Statement(relay_parent, statement_with_pvd)) - .await; + ctx.send_message(CandidateBackingMessage::Statement( + relay_parent, + unimplemented!(), // TODO [now]: fixme + )) + .await; Some((relay_parent, statement)) }, @@ -1742,7 +1756,7 @@ async fn handle_network_update( } }, NetworkBridgeEvent::NewGossipTopology(topology) => { - let _ = metrics.time_network_bridge_update_v1("new_gossip_topology"); + let _ = metrics.time_network_bridge_update("new_gossip_topology"); let new_session_index = topology.session; let new_topology: SessionGridTopology = topology.into(); @@ -1766,7 +1780,7 @@ async fn handle_network_update( } } }, - NetworkBridgeEvent::PeerMessage(peer, Versioned::V1(message)) => { + NetworkBridgeEvent::PeerMessage(peer, message) => { handle_incoming_message_and_circulate( peer, topology_storage, @@ -1783,7 +1797,7 @@ async fn handle_network_update( .await; }, NetworkBridgeEvent::PeerViewChange(peer, view) => { - let _ = metrics.time_network_bridge_update_v1("peer_view_change"); + let _ = metrics.time_network_bridge_update("peer_view_change"); gum::trace!(target: LOG_TARGET, ?peer, ?view, "Peer view change"); match peers.get_mut(&peer) { Some(data) => diff --git a/node/network/statement-distribution/src/metrics.rs b/node/network/statement-distribution/src/metrics.rs index 6bc6f724ae09..f0e9d3be7efb 100644 --- a/node/network/statement-distribution/src/metrics.rs +++ b/node/network/statement-distribution/src/metrics.rs @@ -27,7 +27,7 @@ struct MetricsInner { received_responses: prometheus::CounterVec, active_leaves_update: prometheus::Histogram, share: prometheus::Histogram, - network_bridge_update_v1: prometheus::HistogramVec, + network_bridge_update: prometheus::HistogramVec, statements_unexpected: prometheus::CounterVec, created_message_size: prometheus::Gauge, } @@ -75,16 +75,13 @@ impl Metrics { self.0.as_ref().map(|metrics| metrics.share.start_timer()) } - /// Provide a timer for `network_bridge_update_v1` which observes on drop. - pub fn time_network_bridge_update_v1( + /// Provide a timer for `network_bridge_update` which observes on drop. + pub fn time_network_bridge_update( &self, message_type: &'static str, ) -> Option { self.0.as_ref().map(|metrics| { - metrics - .network_bridge_update_v1 - .with_label_values(&[message_type]) - .start_timer() + metrics.network_bridge_update.with_label_values(&[message_type]).start_timer() }) } @@ -166,11 +163,11 @@ impl metrics::Metrics for Metrics { )?, registry, )?, - network_bridge_update_v1: prometheus::register( + network_bridge_update: prometheus::register( prometheus::HistogramVec::new( prometheus::HistogramOpts::new( - "polkadot_parachain_statement_distribution_network_bridge_update_v1", - "Time spent within `statement_distribution::network_bridge_update_v1`", + "polkadot_parachain_statement_distribution_network_bridge_update", + "Time spent within `statement_distribution::network_bridge_update`", ) .buckets(HISTOGRAM_LATENCY_BUCKETS.into()), &["message_type"], From 2c215b21cbb00e4dac033892dc750958513c1295 Mon Sep 17 00:00:00 2001 From: asynchronous rob Date: Thu, 15 Sep 2022 00:03:11 -0500 Subject: [PATCH 10/76] remove max_pov_size requirement from prospective pvd request (#6014) * remove max_pov_size requirement from prospective pvd request * fmt --- node/core/prospective-parachains/src/lib.rs | 22 +++++++++++++++++---- node/subsystem-types/src/messages.rs | 3 --- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index f90d23e92ad7..6d203e902cdd 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -506,13 +506,14 @@ fn answer_prospective_validation_data_request( let mut head_data = storage.head_data_by_hash(&request.parent_head_data_hash).map(|x| x.clone()); let mut relay_parent_info = None; + let mut max_pov_size = None; for fragment_tree in view .active_leaves .values() .filter_map(|x| x.fragment_trees.get(&request.para_id)) { - if head_data.is_some() && relay_parent_info.is_some() { + if head_data.is_some() && relay_parent_info.is_some() && max_pov_size.is_some() { break } if relay_parent_info.is_none() { @@ -525,14 +526,27 @@ fn answer_prospective_validation_data_request( head_data = Some(required_parent.clone()); } } + if max_pov_size.is_none() { + let contains_ancestor = fragment_tree + .scope() + .ancestor_by_hash(&request.candidate_relay_parent) + .is_some(); + if contains_ancestor { + // We are leaning hard on two assumptions here. + // 1. That the fragment tree never contains allowed relay-parents whose session for children + // is different from that of the base block's. + // 2. That the max_pov_size is only configurable per session. + max_pov_size = Some(fragment_tree.scope().base_constraints().max_pov_size); + } + } } - let _ = tx.send(match (head_data, relay_parent_info) { - (Some(h), Some(i)) => Some(PersistedValidationData { + let _ = tx.send(match (head_data, relay_parent_info, max_pov_size) { + (Some(h), Some(i), Some(m)) => Some(PersistedValidationData { parent_head: h, relay_parent_number: i.number, relay_parent_storage_root: i.storage_root, - max_pov_size: request.max_pov_size, + max_pov_size: m as _, }), _ => None, }); diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 5fb1817c700e..7c907506803c 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -987,9 +987,6 @@ pub struct ProspectiveValidationDataRequest { pub candidate_relay_parent: Hash, /// The parent head-data hash. pub parent_head_data_hash: Hash, - /// The maximum POV size expected of this candidate. This should be - /// the maximum as configured during the session. - pub max_pov_size: u32, } /// Indicates the relay-parents whose fragment tree a candidate From 4b913e7ba29697ba697ca3c76472f5c58ee22708 Mon Sep 17 00:00:00 2001 From: asynchronous rob Date: Fri, 23 Sep 2022 05:17:08 -0500 Subject: [PATCH 11/76] Extract legacy statement distribution to its own module (#6026) * add compatibility type to v2 statement distribution message * warning cleanup * handle compatibility layer for v2 * clean up an unimplemented!() block * circulate statements based on version * extract legacy v1 code into separate module * remove unimplemented * clean up naming of from_requester/responder * remove TODOs * have backing share seconded statements with PVD * fmt * fix warning * Quick fix unused warning for not yet implemented/used staging messages. * Fix network bridge test * Fix wrong merge. We now have 23 subsystems (network bridge split + prospective parachains) Co-authored-by: Robert Klotzner --- node/core/backing/src/lib.rs | 5 +- node/core/backing/src/tests/mod.rs | 8 +- .../src/tests/prospective_parachains.rs | 2 +- node/network/bridge/src/rx/tests.rs | 6 +- .../src/collator_side/mod.rs | 2 +- .../src/validator_side/mod.rs | 2 +- node/network/protocol/src/lib.rs | 73 +- .../src/legacy_v1/mod.rs | 2118 ++++++++++++++++ .../src/{ => legacy_v1}/requester.rs | 5 +- .../src/{ => legacy_v1}/responder.rs | 0 .../src/{ => legacy_v1}/tests.rs | 37 +- .../network/statement-distribution/src/lib.rs | 2150 +---------------- node/overseer/src/tests.rs | 2 +- node/primitives/src/lib.rs | 8 + node/subsystem-types/src/messages.rs | 2 +- .../src/node/backing/candidate-backing.md | 2 +- .../src/types/overseer-protocol.md | 2 +- 17 files changed, 2285 insertions(+), 2139 deletions(-) create mode 100644 node/network/statement-distribution/src/legacy_v1/mod.rs rename node/network/statement-distribution/src/{ => legacy_v1}/requester.rs (98%) rename node/network/statement-distribution/src/{ => legacy_v1}/responder.rs (100%) rename node/network/statement-distribution/src/{ => legacy_v1}/tests.rs (98%) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 957953cf38e2..c79bfdada8cb 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -1606,10 +1606,7 @@ async fn sign_import_and_distribute_statement( if let Some(signed_statement) = sign_statement(&*rp_state, statement, keystore, metrics).await { import_statement(ctx, rp_state, per_candidate, &signed_statement).await?; - let smsg = StatementDistributionMessage::Share( - rp_state.parent, - StatementWithPVD::drop_pvd_from_signed(signed_statement.clone()), - ); + let smsg = StatementDistributionMessage::Share(rp_state.parent, signed_statement.clone()); ctx.send_unbounded_message(smsg); Ok(Some(signed_statement)) diff --git a/node/core/backing/src/tests/mod.rs b/node/core/backing/src/tests/mod.rs index 81ebcf5c6dc4..2e2a5878a888 100644 --- a/node/core/backing/src/tests/mod.rs +++ b/node/core/backing/src/tests/mod.rs @@ -928,7 +928,7 @@ fn backing_misbehavior_works() { signed_statement, ) ) if relay_parent == test_state.relay_parent => { - assert_eq!(*signed_statement.payload(), Statement::Valid(candidate_a_hash)); + assert_eq!(*signed_statement.payload(), StatementWithPVD::Valid(candidate_a_hash)); } ); @@ -1092,14 +1092,14 @@ fn backing_dont_second_invalid() { virtual_overseer.recv().await, AllMessages::CandidateValidation( CandidateValidationMessage::ValidateFromExhaustive( - _pvd, + pvd, _validation_code, candidate_receipt, _pov, timeout, tx, ), - ) if _pvd == pvd_b && + ) if pvd == pvd_b && _validation_code == validation_code_b && *_pov == pov_block_b && &candidate_receipt.descriptor == candidate_b.descriptor() && timeout == BACKING_EXECUTION_TIMEOUT && @@ -1135,7 +1135,7 @@ fn backing_dont_second_invalid() { signed_statement, ) ) if parent_hash == test_state.relay_parent => { - assert_eq!(*signed_statement.payload(), Statement::Seconded(candidate_b)); + assert_eq!(*signed_statement.payload(), StatementWithPVD::Seconded(candidate_b, pvd_b.clone())); } ); diff --git a/node/core/backing/src/tests/prospective_parachains.rs b/node/core/backing/src/tests/prospective_parachains.rs index 212c7d7167ae..3f7065d7d5df 100644 --- a/node/core/backing/src/tests/prospective_parachains.rs +++ b/node/core/backing/src/tests/prospective_parachains.rs @@ -1276,7 +1276,7 @@ fn concurrent_dependent_candidates() { let payload = statement.payload(); assert_matches!( payload.clone(), - Statement::Valid(hash) + StatementWithPVD::Valid(hash) if hash == candidate_a_hash || hash == candidate_b_hash => { assert!(valid_statements.insert(hash)); diff --git a/node/network/bridge/src/rx/tests.rs b/node/network/bridge/src/rx/tests.rs index dc22bf66bf22..5b43695d6ff6 100644 --- a/node/network/bridge/src/rx/tests.rs +++ b/node/network/bridge/src/rx/tests.rs @@ -1414,14 +1414,16 @@ fn network_protocol_versioning_subsystem_msg() { } ); - let metadata = protocol_vstaging::StatementMetadata { + let metadata = protocol_v1::StatementMetadata { relay_parent: Hash::zero(), candidate_hash: CandidateHash::default(), signed_by: ValidatorIndex(0), signature: sp_core::crypto::UncheckedFrom::unchecked_from([1u8; 64]), }; let statement_distribution_message = - protocol_vstaging::StatementDistributionMessage::LargeStatement(metadata); + protocol_vstaging::StatementDistributionMessage::V1Compatibility( + protocol_v1::StatementDistributionMessage::LargeStatement(metadata), + ); let msg = protocol_vstaging::ValidationProtocol::StatementDistribution( statement_distribution_message.clone(), ); diff --git a/node/network/collator-protocol/src/collator_side/mod.rs b/node/network/collator-protocol/src/collator_side/mod.rs index 1c561905ecf9..9b8545a14e2c 100644 --- a/node/network/collator-protocol/src/collator_side/mod.rs +++ b/node/network/collator-protocol/src/collator_side/mod.rs @@ -946,7 +946,7 @@ async fn handle_network_msg( PeerMessage(remote, Versioned::V1(msg)) => { handle_incoming_peer_message(ctx, runtime, state, remote, msg).await?; }, - PeerMessage(_, Versioned::VStaging(msg)) => {}, + PeerMessage(_, Versioned::VStaging(_msg)) => {}, NewGossipTopology { .. } => { // impossible! }, diff --git a/node/network/collator-protocol/src/validator_side/mod.rs b/node/network/collator-protocol/src/validator_side/mod.rs index 5996ebd1d2b3..806581a2dd89 100644 --- a/node/network/collator-protocol/src/validator_side/mod.rs +++ b/node/network/collator-protocol/src/validator_side/mod.rs @@ -1084,7 +1084,7 @@ async fn handle_network_msg( PeerMessage(remote, Versioned::V1(msg)) => { process_incoming_peer_message(ctx, state, remote, msg).await; }, - PeerMessage(_, Versioned::VStaging(msg)) => {}, + PeerMessage(_, Versioned::VStaging(_msg)) => {}, } Ok(()) diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index b9f40241a4d7..a45bca82df49 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -592,8 +592,8 @@ pub mod vstaging { use parity_scale_codec::{Decode, Encode}; use polkadot_primitives::vstaging::{ - CandidateHash, CandidateIndex, CollatorId, CollatorSignature, CompactStatement, Hash, - Id as ParaId, UncheckedSignedAvailabilityBitfield, ValidatorIndex, ValidatorSignature, + CandidateIndex, CollatorId, CollatorSignature, Hash, Id as ParaId, + UncheckedSignedAvailabilityBitfield, }; use polkadot_node_primitives::{ @@ -612,67 +612,16 @@ pub mod vstaging { /// Network messages used by the statement distribution subsystem. #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] pub enum StatementDistributionMessage { - /// A signed full statement under a given relay-parent. - #[codec(index = 0)] - Statement(Hash, UncheckedSignedFullStatement), - /// Seconded statement with large payload (e.g. containing a runtime upgrade). + // TODO [now]: notifications for v2 + /// All messages for V1 for compatibility with the statement distribution + /// protocol, for relay-parents that don't support asynchronous backing. /// - /// We only gossip the hash in that case, actual payloads can be fetched from sending node - /// via request/response. - #[codec(index = 1)] - LargeStatement(StatementMetadata), - } - - /// Data that makes a statement unique. - #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq, Hash)] - pub struct StatementMetadata { - /// Relay parent this statement is relevant under. - pub relay_parent: Hash, - /// Hash of the candidate that got validated. - pub candidate_hash: CandidateHash, - /// Validator that attested the validity. - pub signed_by: ValidatorIndex, - /// Signature of seconding validator. - pub signature: ValidatorSignature, - } - - impl StatementDistributionMessage { - /// Get fingerprint describing the contained statement uniquely. - pub fn get_fingerprint(&self) -> (CompactStatement, ValidatorIndex) { - match self { - Self::Statement(_, statement) => ( - statement.unchecked_payload().to_compact(), - statement.unchecked_validator_index(), - ), - Self::LargeStatement(meta) => - (CompactStatement::Seconded(meta.candidate_hash), meta.signed_by), - } - } - - /// Get the signature from the statement. - pub fn get_signature(&self) -> ValidatorSignature { - match self { - Self::Statement(_, statement) => statement.unchecked_signature().clone(), - Self::LargeStatement(metadata) => metadata.signature.clone(), - } - } - - /// Get contained relay parent. - pub fn get_relay_parent(&self) -> Hash { - match self { - Self::Statement(r, _) => *r, - Self::LargeStatement(meta) => meta.relay_parent, - } - } - - /// Whether this message contains a large statement. - pub fn is_large_statement(&self) -> bool { - if let Self::LargeStatement(_) = self { - true - } else { - false - } - } + /// These are illegal to send to V1 peers, and illegal to send concerning relay-parents + /// which support asynchronous backing. This backwards compatibility should be + /// considered immediately deprecated and can be removed once the node software + /// is not required to support asynchronous backing anymore. + #[codec(index = 255)] + V1Compatibility(crate::v1::StatementDistributionMessage), } /// Network messages used by the approval distribution subsystem. diff --git a/node/network/statement-distribution/src/legacy_v1/mod.rs b/node/network/statement-distribution/src/legacy_v1/mod.rs new file mode 100644 index 000000000000..e07f4489dfba --- /dev/null +++ b/node/network/statement-distribution/src/legacy_v1/mod.rs @@ -0,0 +1,2118 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use parity_scale_codec::Encode; + +use polkadot_node_network_protocol::{ + self as net_protocol, + grid_topology::{RequiredRouting, SessionBoundGridTopologyStorage, SessionGridTopology}, + peer_set::{IsAuthority, PeerSet, ValidationVersion}, + v1::{self as protocol_v1, StatementMetadata}, + vstaging as protocol_vstaging, IfDisconnected, PeerId, UnifiedReputationChange as Rep, + Versioned, View, +}; +use polkadot_node_primitives::{ + SignedFullStatement, Statement, StatementWithPVD, UncheckedSignedFullStatement, +}; +use polkadot_node_subsystem_util::{self as util, rand, MIN_GOSSIP_PEERS}; + +use polkadot_node_subsystem::{ + jaeger, + messages::{CandidateBackingMessage, NetworkBridgeEvent, NetworkBridgeTxMessage}, + overseer, ActivatedLeaf, PerLeafSpan, StatementDistributionSenderTrait, +}; +use polkadot_primitives::v2::{ + AuthorityDiscoveryId, CandidateHash, CommittedCandidateReceipt, CompactStatement, Hash, + Id as ParaId, OccupiedCoreAssumption, PersistedValidationData, SignedStatement, SigningContext, + UncheckedSignedStatement, ValidatorId, ValidatorIndex, ValidatorSignature, +}; + +use futures::{ + channel::{mpsc, oneshot}, + future::RemoteHandle, + prelude::*, +}; +use indexmap::{map::Entry as IEntry, IndexMap}; +use rand::Rng; +use sp_keystore::SyncCryptoStorePtr; +use util::runtime::RuntimeInfo; + +use std::collections::{hash_map::Entry, HashMap, HashSet, VecDeque}; + +use crate::error::{Error, JfyiError, JfyiErrorResult, Result}; + +/// Background task logic for requesting of large statements. +mod requester; +use requester::fetch; + +/// Background task logic for responding for large statements. +mod responder; + +use crate::{metrics::Metrics, LOG_TARGET}; + +pub use requester::RequesterMessage; +pub use responder::{respond, ResponderMessage}; + +#[cfg(test)] +mod tests; + +const COST_UNEXPECTED_STATEMENT: Rep = Rep::CostMinor("Unexpected Statement"); +const COST_UNEXPECTED_STATEMENT_MISSING_KNOWLEDGE: Rep = + Rep::CostMinor("Unexpected Statement, missing knowlege for relay parent"); +const COST_UNEXPECTED_STATEMENT_UNKNOWN_CANDIDATE: Rep = + Rep::CostMinor("Unexpected Statement, unknown candidate"); +const COST_UNEXPECTED_STATEMENT_REMOTE: Rep = + Rep::CostMinor("Unexpected Statement, remote not allowed"); + +const COST_FETCH_FAIL: Rep = + Rep::CostMinor("Requesting `CommittedCandidateReceipt` from peer failed"); +const COST_INVALID_SIGNATURE: Rep = Rep::CostMajor("Invalid Statement Signature"); +const COST_WRONG_HASH: Rep = Rep::CostMajor("Received candidate had wrong hash"); +const COST_DUPLICATE_STATEMENT: Rep = + Rep::CostMajorRepeated("Statement sent more than once by peer"); +const COST_APPARENT_FLOOD: Rep = Rep::Malicious("Peer appears to be flooding us with statements"); + +const BENEFIT_VALID_STATEMENT: Rep = Rep::BenefitMajor("Peer provided a valid statement"); +const BENEFIT_VALID_STATEMENT_FIRST: Rep = + Rep::BenefitMajorFirst("Peer was the first to provide a valid statement"); +const BENEFIT_VALID_RESPONSE: Rep = + Rep::BenefitMajor("Peer provided a valid large statement response"); + +/// The maximum amount of candidates each validator is allowed to second at any relay-parent. +/// Short for "Validator Candidate Threshold". +/// +/// This is the amount of candidates we keep per validator at any relay-parent. +/// Typically we will only keep 1, but when a validator equivocates we will need to track 2. +const VC_THRESHOLD: usize = 2; + +/// Large statements should be rare. +const MAX_LARGE_STATEMENTS_PER_SENDER: usize = 20; + +/// Overall state of the legacy-v1 portion of the subsystem. +pub(crate) struct State { + peers: HashMap, + topology_storage: SessionBoundGridTopologyStorage, + authorities: HashMap, + active_heads: HashMap, + recent_outdated_heads: RecentOutdatedHeads, + runtime: RuntimeInfo, +} + +impl State { + /// Create a new state. + pub(crate) fn new(keystore: SyncCryptoStorePtr) -> Self { + State { + peers: HashMap::new(), + topology_storage: Default::default(), + authorities: HashMap::new(), + active_heads: HashMap::new(), + recent_outdated_heads: RecentOutdatedHeads::default(), + runtime: RuntimeInfo::new(Some(keystore)), + } + } + + /// Query whether the state contains some relay-parent. + pub(crate) fn contains_relay_parent(&self, relay_parent: &Hash) -> bool { + self.active_heads.contains_key(relay_parent) + } +} + +#[derive(Default)] +struct RecentOutdatedHeads { + buf: VecDeque, +} + +impl RecentOutdatedHeads { + fn note_outdated(&mut self, hash: Hash) { + const MAX_BUF_LEN: usize = 10; + + self.buf.push_back(hash); + + while self.buf.len() > MAX_BUF_LEN { + let _ = self.buf.pop_front(); + } + } + + fn is_recent_outdated(&self, hash: &Hash) -> bool { + self.buf.contains(hash) + } +} + +/// Tracks our impression of a single peer's view of the candidates a validator has seconded +/// for a given relay-parent. +/// +/// It is expected to receive at most `VC_THRESHOLD` from us and be aware of at most `VC_THRESHOLD` +/// via other means. +#[derive(Default)] +struct VcPerPeerTracker { + local_observed: arrayvec::ArrayVec<[CandidateHash; VC_THRESHOLD]>, + remote_observed: arrayvec::ArrayVec<[CandidateHash; VC_THRESHOLD]>, +} + +impl VcPerPeerTracker { + /// Note that the remote should now be aware that a validator has seconded a given candidate (by hash) + /// based on a message that we have sent it from our local pool. + fn note_local(&mut self, h: CandidateHash) { + if !note_hash(&mut self.local_observed, h) { + gum::warn!( + target: LOG_TARGET, + "Statement distribution is erroneously attempting to distribute more \ + than {} candidate(s) per validator index. Ignoring", + VC_THRESHOLD, + ); + } + } + + /// Note that the remote should now be aware that a validator has seconded a given candidate (by hash) + /// based on a message that it has sent us. + /// + /// Returns `true` if the peer was allowed to send us such a message, `false` otherwise. + fn note_remote(&mut self, h: CandidateHash) -> bool { + note_hash(&mut self.remote_observed, h) + } + + /// Returns `true` if the peer is allowed to send us such a message, `false` otherwise. + fn is_wanted_candidate(&self, h: &CandidateHash) -> bool { + !self.remote_observed.contains(h) && !self.remote_observed.is_full() + } +} + +fn note_hash( + observed: &mut arrayvec::ArrayVec<[CandidateHash; VC_THRESHOLD]>, + h: CandidateHash, +) -> bool { + if observed.contains(&h) { + return true + } + + observed.try_push(h).is_ok() +} + +/// knowledge that a peer has about goings-on in a relay parent. +#[derive(Default)] +struct PeerRelayParentKnowledge { + /// candidates that the peer is aware of because we sent statements to it. This indicates that we can + /// send other statements pertaining to that candidate. + sent_candidates: HashSet, + /// candidates that peer is aware of, because we received statements from it. + received_candidates: HashSet, + /// fingerprints of all statements a peer should be aware of: those that + /// were sent to the peer by us. + sent_statements: HashSet<(CompactStatement, ValidatorIndex)>, + /// fingerprints of all statements a peer should be aware of: those that + /// were sent to us by the peer. + received_statements: HashSet<(CompactStatement, ValidatorIndex)>, + /// How many candidates this peer is aware of for each given validator index. + seconded_counts: HashMap, + /// How many statements we've received for each candidate that we're aware of. + received_message_count: HashMap, + + /// How many large statements this peer already sent us. + /// + /// Flood protection for large statements is rather hard and as soon as we get + /// `https://github.com/paritytech/polkadot/issues/2979` implemented also no longer necessary. + /// Reason: We keep messages around until we fetched the payload, but if a node makes up + /// statements and never provides the data, we will keep it around for the slot duration. Not + /// even signature checking would help, as the sender, if a validator, can just sign arbitrary + /// invalid statements and will not face any consequences as long as it won't provide the + /// payload. + /// + /// Quick and temporary fix, only accept `MAX_LARGE_STATEMENTS_PER_SENDER` per connected node. + /// + /// Large statements should be rare, if they were not, we would run into problems anyways, as + /// we would not be able to distribute them in a timely manner. Therefore + /// `MAX_LARGE_STATEMENTS_PER_SENDER` can be set to a relatively small number. It is also not + /// per candidate hash, but in total as candidate hashes can be made up, as illustrated above. + /// + /// An attacker could still try to fill up our memory, by repeatedly disconnecting and + /// connecting again with new peer ids, but we assume that the resulting effective bandwidth + /// for such an attack would be too low. + large_statement_count: usize, + + /// We have seen a message that that is unexpected from this peer, so note this fact + /// and stop subsequent logging and peer reputation flood. + unexpected_count: usize, +} + +impl PeerRelayParentKnowledge { + /// Updates our view of the peer's knowledge with this statement's fingerprint based + /// on something that we would like to send to the peer. + /// + /// NOTE: assumes `self.can_send` returned true before this call. + /// + /// Once the knowledge has incorporated a statement, it cannot be incorporated again. + /// + /// This returns `true` if this is the first time the peer has become aware of a + /// candidate with the given hash. + fn send(&mut self, fingerprint: &(CompactStatement, ValidatorIndex)) -> bool { + debug_assert!( + self.can_send(fingerprint), + "send is only called after `can_send` returns true; qed", + ); + + let new_known = match fingerprint.0 { + CompactStatement::Seconded(ref h) => { + self.seconded_counts.entry(fingerprint.1).or_default().note_local(h.clone()); + + let was_known = self.is_known_candidate(h); + self.sent_candidates.insert(h.clone()); + !was_known + }, + CompactStatement::Valid(_) => false, + }; + + self.sent_statements.insert(fingerprint.clone()); + + new_known + } + + /// This returns `true` if the peer cannot accept this statement, without altering internal + /// state, `false` otherwise. + fn can_send(&self, fingerprint: &(CompactStatement, ValidatorIndex)) -> bool { + let already_known = self.sent_statements.contains(fingerprint) || + self.received_statements.contains(fingerprint); + + if already_known { + return false + } + + match fingerprint.0 { + CompactStatement::Valid(ref h) => { + // The peer can only accept Valid statements for which it is aware + // of the corresponding candidate. + self.is_known_candidate(h) + }, + CompactStatement::Seconded(_) => true, + } + } + + /// Attempt to update our view of the peer's knowledge with this statement's fingerprint based on + /// a message we are receiving from the peer. + /// + /// Provide the maximum message count that we can receive per candidate. In practice we should + /// not receive more statements for any one candidate than there are members in the group assigned + /// to that para, but this maximum needs to be lenient to account for equivocations that may be + /// cross-group. As such, a maximum of 2 * `n_validators` is recommended. + /// + /// This returns an error if the peer should not have sent us this message according to protocol + /// rules for flood protection. + /// + /// If this returns `Ok`, the internal state has been altered. After `receive`ing a new + /// candidate, we are then cleared to send the peer further statements about that candidate. + /// + /// This returns `Ok(true)` if this is the first time the peer has become aware of a + /// candidate with given hash. + fn receive( + &mut self, + fingerprint: &(CompactStatement, ValidatorIndex), + max_message_count: usize, + ) -> std::result::Result { + // We don't check `sent_statements` because a statement could be in-flight from both + // sides at the same time. + if self.received_statements.contains(fingerprint) { + return Err(COST_DUPLICATE_STATEMENT) + } + + let (candidate_hash, fresh) = match fingerprint.0 { + CompactStatement::Seconded(ref h) => { + let allowed_remote = self + .seconded_counts + .entry(fingerprint.1) + .or_insert_with(Default::default) + .note_remote(h.clone()); + + if !allowed_remote { + return Err(COST_UNEXPECTED_STATEMENT_REMOTE) + } + + (h, !self.is_known_candidate(h)) + }, + CompactStatement::Valid(ref h) => { + if !self.is_known_candidate(h) { + return Err(COST_UNEXPECTED_STATEMENT_UNKNOWN_CANDIDATE) + } + + (h, false) + }, + }; + + { + let received_per_candidate = + self.received_message_count.entry(*candidate_hash).or_insert(0); + + if *received_per_candidate >= max_message_count { + return Err(COST_APPARENT_FLOOD) + } + + *received_per_candidate += 1; + } + + self.received_statements.insert(fingerprint.clone()); + self.received_candidates.insert(candidate_hash.clone()); + Ok(fresh) + } + + /// Note a received large statement metadata. + fn receive_large_statement(&mut self) -> std::result::Result<(), Rep> { + if self.large_statement_count >= MAX_LARGE_STATEMENTS_PER_SENDER { + return Err(COST_APPARENT_FLOOD) + } + self.large_statement_count += 1; + Ok(()) + } + + /// This method does the same checks as `receive` without modifying the internal state. + /// Returns an error if the peer should not have sent us this message according to protocol + /// rules for flood protection. + fn check_can_receive( + &self, + fingerprint: &(CompactStatement, ValidatorIndex), + max_message_count: usize, + ) -> std::result::Result<(), Rep> { + // We don't check `sent_statements` because a statement could be in-flight from both + // sides at the same time. + if self.received_statements.contains(fingerprint) { + return Err(COST_DUPLICATE_STATEMENT) + } + + let candidate_hash = match fingerprint.0 { + CompactStatement::Seconded(ref h) => { + let allowed_remote = self + .seconded_counts + .get(&fingerprint.1) + .map_or(true, |r| r.is_wanted_candidate(h)); + + if !allowed_remote { + return Err(COST_UNEXPECTED_STATEMENT_REMOTE) + } + + h + }, + CompactStatement::Valid(ref h) => { + if !self.is_known_candidate(&h) { + return Err(COST_UNEXPECTED_STATEMENT_UNKNOWN_CANDIDATE) + } + + h + }, + }; + + let received_per_candidate = self.received_message_count.get(candidate_hash).unwrap_or(&0); + + if *received_per_candidate >= max_message_count { + Err(COST_APPARENT_FLOOD) + } else { + Ok(()) + } + } + + /// Check for candidates that the peer is aware of. This indicates that we can + /// send other statements pertaining to that candidate. + fn is_known_candidate(&self, candidate: &CandidateHash) -> bool { + self.sent_candidates.contains(candidate) || self.received_candidates.contains(candidate) + } +} + +struct PeerData { + view: View, + protocol_version: ValidationVersion, + view_knowledge: HashMap, + /// Peer might be known as authority with the given ids. + maybe_authority: Option>, +} + +impl PeerData { + /// Updates our view of the peer's knowledge with this statement's fingerprint based + /// on something that we would like to send to the peer. + /// + /// NOTE: assumes `self.can_send` returned true before this call. + /// + /// Once the knowledge has incorporated a statement, it cannot be incorporated again. + /// + /// This returns `true` if this is the first time the peer has become aware of a + /// candidate with the given hash. + fn send( + &mut self, + relay_parent: &Hash, + fingerprint: &(CompactStatement, ValidatorIndex), + ) -> bool { + debug_assert!( + self.can_send(relay_parent, fingerprint), + "send is only called after `can_send` returns true; qed", + ); + self.view_knowledge + .get_mut(relay_parent) + .expect("send is only called after `can_send` returns true; qed") + .send(fingerprint) + } + + /// This returns `None` if the peer cannot accept this statement, without altering internal + /// state. + fn can_send( + &self, + relay_parent: &Hash, + fingerprint: &(CompactStatement, ValidatorIndex), + ) -> bool { + self.view_knowledge.get(relay_parent).map_or(false, |k| k.can_send(fingerprint)) + } + + /// Attempt to update our view of the peer's knowledge with this statement's fingerprint based on + /// a message we are receiving from the peer. + /// + /// Provide the maximum message count that we can receive per candidate. In practice we should + /// not receive more statements for any one candidate than there are members in the group assigned + /// to that para, but this maximum needs to be lenient to account for equivocations that may be + /// cross-group. As such, a maximum of 2 * `n_validators` is recommended. + /// + /// This returns an error if the peer should not have sent us this message according to protocol + /// rules for flood protection. + /// + /// If this returns `Ok`, the internal state has been altered. After `receive`ing a new + /// candidate, we are then cleared to send the peer further statements about that candidate. + /// + /// This returns `Ok(true)` if this is the first time the peer has become aware of a + /// candidate with given hash. + fn receive( + &mut self, + relay_parent: &Hash, + fingerprint: &(CompactStatement, ValidatorIndex), + max_message_count: usize, + ) -> std::result::Result { + self.view_knowledge + .get_mut(relay_parent) + .ok_or(COST_UNEXPECTED_STATEMENT_MISSING_KNOWLEDGE)? + .receive(fingerprint, max_message_count) + } + + /// This method does the same checks as `receive` without modifying the internal state. + /// Returns an error if the peer should not have sent us this message according to protocol + /// rules for flood protection. + fn check_can_receive( + &self, + relay_parent: &Hash, + fingerprint: &(CompactStatement, ValidatorIndex), + max_message_count: usize, + ) -> std::result::Result<(), Rep> { + self.view_knowledge + .get(relay_parent) + .ok_or(COST_UNEXPECTED_STATEMENT_MISSING_KNOWLEDGE)? + .check_can_receive(fingerprint, max_message_count) + } + + /// Receive a notice about out of view statement and returns the value of the old flag + fn receive_unexpected(&mut self, relay_parent: &Hash) -> usize { + self.view_knowledge + .get_mut(relay_parent) + .map_or(0_usize, |relay_parent_peer_knowledge| { + let old = relay_parent_peer_knowledge.unexpected_count; + relay_parent_peer_knowledge.unexpected_count += 1_usize; + old + }) + } + + /// Basic flood protection for large statements. + fn receive_large_statement(&mut self, relay_parent: &Hash) -> std::result::Result<(), Rep> { + self.view_knowledge + .get_mut(relay_parent) + .ok_or(COST_UNEXPECTED_STATEMENT_MISSING_KNOWLEDGE)? + .receive_large_statement() + } +} + +// A statement stored while a relay chain head is active. +#[derive(Debug, Copy, Clone)] +struct StoredStatement<'a> { + comparator: &'a StoredStatementComparator, + statement: &'a SignedFullStatement, +} + +// A value used for comparison of stored statements to each other. +// +// The compact version of the statement, the validator index, and the signature of the validator +// is enough to differentiate between all types of equivocations, as long as the signature is +// actually checked to be valid. The same statement with 2 signatures and 2 statements with +// different (or same) signatures wll all be correctly judged to be unequal with this comparator. +#[derive(PartialEq, Eq, Hash, Clone, Debug)] +struct StoredStatementComparator { + compact: CompactStatement, + validator_index: ValidatorIndex, + signature: ValidatorSignature, +} + +impl<'a> From<(&'a StoredStatementComparator, &'a SignedFullStatement)> for StoredStatement<'a> { + fn from( + (comparator, statement): (&'a StoredStatementComparator, &'a SignedFullStatement), + ) -> Self { + Self { comparator, statement } + } +} + +impl<'a> StoredStatement<'a> { + fn compact(&self) -> &'a CompactStatement { + &self.comparator.compact + } + + fn fingerprint(&self) -> (CompactStatement, ValidatorIndex) { + (self.comparator.compact.clone(), self.statement.validator_index()) + } +} + +#[derive(Debug)] +enum NotedStatement<'a> { + NotUseful, + Fresh(StoredStatement<'a>), + UsefulButKnown, +} + +/// Large statement fetching status. +enum LargeStatementStatus { + /// We are currently fetching the statement data from a remote peer. We keep a list of other nodes + /// claiming to have that data and will fallback on them. + Fetching(FetchingInfo), + /// Statement data is fetched or we got it locally via `StatementDistributionMessage::Share`. + FetchedOrShared(CommittedCandidateReceipt), +} + +/// Info about a fetch in progress. +struct FetchingInfo { + /// All peers that send us a `LargeStatement` or a `Valid` statement for the given + /// `CandidateHash`, together with their originally sent messages. + /// + /// We use an `IndexMap` here to preserve the ordering of peers sending us messages. This is + /// desirable because we reward first sending peers with reputation. + available_peers: IndexMap>, + /// Peers left to try in case the background task needs it. + peers_to_try: Vec, + /// Sender for sending fresh peers to the fetching task in case of failure. + peer_sender: Option>>, + /// Task taking care of the request. + /// + /// Will be killed once dropped. + #[allow(dead_code)] + fetching_task: RemoteHandle<()>, +} + +#[derive(Debug, PartialEq, Eq)] +enum DeniedStatement { + NotUseful, + UsefulButKnown, +} + +pub(crate) struct ActiveHeadData { + /// All candidates we are aware of for this head, keyed by hash. + candidates: HashSet, + /// Persisted validation data cache. + cached_validation_data: HashMap, + /// Stored statements for circulation to peers. + /// + /// These are iterable in insertion order, and `Seconded` statements are always + /// accepted before dependent statements. + statements: IndexMap, + /// Large statements we are waiting for with associated meta data. + waiting_large_statements: HashMap, + /// The parachain validators at the head's child session index. + validators: Vec, + /// The current session index of this fork. + session_index: sp_staking::SessionIndex, + /// How many `Seconded` statements we've seen per validator. + seconded_counts: HashMap, + /// A Jaeger span for this head, so we can attach data to it. + span: PerLeafSpan, +} + +impl ActiveHeadData { + fn new( + validators: Vec, + session_index: sp_staking::SessionIndex, + span: PerLeafSpan, + ) -> Self { + ActiveHeadData { + candidates: Default::default(), + cached_validation_data: Default::default(), + statements: Default::default(), + waiting_large_statements: Default::default(), + validators, + session_index, + seconded_counts: Default::default(), + span, + } + } + + /// Fetches the `PersistedValidationData` from the runtime, assuming + /// that the core is free. The relay parent must match that of the active + /// head. + async fn fetch_persisted_validation_data( + &mut self, + sender: &mut Sender, + relay_parent: Hash, + para_id: ParaId, + ) -> Result> + where + Sender: StatementDistributionSenderTrait, + { + if let Entry::Vacant(entry) = self.cached_validation_data.entry(para_id) { + let persisted_validation_data = + polkadot_node_subsystem_util::request_persisted_validation_data( + relay_parent, + para_id, + OccupiedCoreAssumption::Free, + sender, + ) + .await + .await + .map_err(Error::RuntimeApiUnavailable)? + .map_err(|err| Error::FetchPersistedValidationData(para_id, err))?; + + match persisted_validation_data { + Some(pvd) => entry.insert(pvd), + None => return Ok(None), + }; + } + + Ok(self.cached_validation_data.get(¶_id)) + } + + /// Note the given statement. + /// + /// If it was not already known and can be accepted, returns `NotedStatement::Fresh`, + /// with a handle to the statement. + /// + /// If it can be accepted, but we already know it, returns `NotedStatement::UsefulButKnown`. + /// + /// We accept up to `VC_THRESHOLD` (2 at time of writing) `Seconded` statements + /// per validator. These will be the first ones we see. The statement is assumed + /// to have been checked, including that the validator index is not out-of-bounds and + /// the signature is valid. + /// + /// Any other statements or those that reference a candidate we are not aware of cannot be accepted + /// and will return `NotedStatement::NotUseful`. + fn note_statement(&mut self, statement: SignedFullStatement) -> NotedStatement { + let validator_index = statement.validator_index(); + let comparator = StoredStatementComparator { + compact: statement.payload().to_compact(), + validator_index, + signature: statement.signature().clone(), + }; + + match comparator.compact { + CompactStatement::Seconded(h) => { + let seconded_so_far = self.seconded_counts.entry(validator_index).or_insert(0); + if *seconded_so_far >= VC_THRESHOLD { + gum::trace!( + target: LOG_TARGET, + ?validator_index, + ?statement, + "Extra statement is ignored" + ); + return NotedStatement::NotUseful + } + + self.candidates.insert(h); + if let Some(old) = self.statements.insert(comparator.clone(), statement) { + gum::trace!( + target: LOG_TARGET, + ?validator_index, + statement = ?old, + "Known statement" + ); + NotedStatement::UsefulButKnown + } else { + *seconded_so_far += 1; + + gum::trace!( + target: LOG_TARGET, + ?validator_index, + statement = ?self.statements.last().expect("Just inserted").1, + "Noted new statement" + ); + // This will always return `Some` because it was just inserted. + let key_value = self + .statements + .get_key_value(&comparator) + .expect("Statement was just inserted; qed"); + + NotedStatement::Fresh(key_value.into()) + } + }, + CompactStatement::Valid(h) => { + if !self.candidates.contains(&h) { + gum::trace!( + target: LOG_TARGET, + ?validator_index, + ?statement, + "Statement for unknown candidate" + ); + return NotedStatement::NotUseful + } + + if let Some(old) = self.statements.insert(comparator.clone(), statement) { + gum::trace!( + target: LOG_TARGET, + ?validator_index, + statement = ?old, + "Known statement" + ); + NotedStatement::UsefulButKnown + } else { + gum::trace!( + target: LOG_TARGET, + ?validator_index, + statement = ?self.statements.last().expect("Just inserted").1, + "Noted new statement" + ); + // This will always return `Some` because it was just inserted. + NotedStatement::Fresh( + self.statements + .get_key_value(&comparator) + .expect("Statement was just inserted; qed") + .into(), + ) + } + }, + } + } + + /// Returns an error if the statement is already known or not useful + /// without modifying the internal state. + fn check_useful_or_unknown( + &self, + statement: &UncheckedSignedStatement, + ) -> std::result::Result<(), DeniedStatement> { + let validator_index = statement.unchecked_validator_index(); + let compact = statement.unchecked_payload(); + let comparator = StoredStatementComparator { + compact: compact.clone(), + validator_index, + signature: statement.unchecked_signature().clone(), + }; + + match compact { + CompactStatement::Seconded(_) => { + let seconded_so_far = self.seconded_counts.get(&validator_index).unwrap_or(&0); + if *seconded_so_far >= VC_THRESHOLD { + gum::trace!( + target: LOG_TARGET, + ?validator_index, + ?statement, + "Extra statement is ignored", + ); + return Err(DeniedStatement::NotUseful) + } + + if self.statements.contains_key(&comparator) { + gum::trace!( + target: LOG_TARGET, + ?validator_index, + ?statement, + "Known statement", + ); + return Err(DeniedStatement::UsefulButKnown) + } + }, + CompactStatement::Valid(h) => { + if !self.candidates.contains(&h) { + gum::trace!( + target: LOG_TARGET, + ?validator_index, + ?statement, + "Statement for unknown candidate", + ); + return Err(DeniedStatement::NotUseful) + } + + if self.statements.contains_key(&comparator) { + gum::trace!( + target: LOG_TARGET, + ?validator_index, + ?statement, + "Known statement", + ); + return Err(DeniedStatement::UsefulButKnown) + } + }, + } + Ok(()) + } + + /// Get an iterator over all statements for the active head. Seconded statements come first. + fn statements(&self) -> impl Iterator> + '_ { + self.statements.iter().map(Into::into) + } + + /// Get an iterator over all statements for the active head that are for a particular candidate. + fn statements_about( + &self, + candidate_hash: CandidateHash, + ) -> impl Iterator> + '_ { + self.statements() + .filter(move |s| s.compact().candidate_hash() == &candidate_hash) + } +} + +/// Check a statement signature under this parent hash. +fn check_statement_signature( + head: &ActiveHeadData, + relay_parent: Hash, + statement: UncheckedSignedStatement, +) -> std::result::Result { + let signing_context = + SigningContext { session_index: head.session_index, parent_hash: relay_parent }; + + head.validators + .get(statement.unchecked_validator_index().0 as usize) + .ok_or_else(|| statement.clone()) + .and_then(|v| statement.try_into_checked(&signing_context, v)) +} + +/// Places the statement in storage if it is new, and then +/// circulates the statement to all peers who have not seen it yet, and +/// sends all statements dependent on that statement to peers who could previously not receive +/// them but now can. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn circulate_statement_and_dependents( + topology_store: &SessionBoundGridTopologyStorage, + peers: &mut HashMap, + active_heads: &mut HashMap, + ctx: &mut Context, + relay_parent: Hash, + statement: SignedFullStatement, + priority_peers: Vec, + metrics: &Metrics, + rng: &mut impl rand::Rng, +) { + let active_head = match active_heads.get_mut(&relay_parent) { + Some(res) => res, + None => return, + }; + + let _span = active_head + .span + .child("circulate-statement") + .with_candidate(statement.payload().candidate_hash()) + .with_stage(jaeger::Stage::StatementDistribution); + + let topology = topology_store.get_topology_or_fallback(active_head.session_index); + // First circulate the statement directly to all peers needing it. + // The borrow of `active_head` needs to encompass only this (Rust) statement. + let outputs: Option<(CandidateHash, Vec)> = { + match active_head.note_statement(statement) { + NotedStatement::Fresh(stored) => Some(( + *stored.compact().candidate_hash(), + circulate_statement( + RequiredRouting::GridXY, + topology, + peers, + ctx, + relay_parent, + stored, + priority_peers, + metrics, + rng, + ) + .await, + )), + _ => None, + } + }; + + let _span = _span.child("send-to-peers"); + // Now send dependent statements to all peers needing them, if any. + if let Some((candidate_hash, peers_needing_dependents)) = outputs { + for peer in peers_needing_dependents { + if let Some(peer_data) = peers.get_mut(&peer) { + let _span_loop = _span.child("to-peer").with_peer_id(&peer); + // defensive: the peer data should always be some because the iterator + // of peers is derived from the set of peers. + send_statements_about( + peer, + peer_data, + ctx, + relay_parent, + candidate_hash, + &*active_head, + metrics, + ) + .await; + } + } + } +} + +/// Create a network message from a given statement. +fn v1_statement_message( + relay_parent: Hash, + statement: SignedFullStatement, + metrics: &Metrics, +) -> protocol_v1::StatementDistributionMessage { + let (is_large, size) = is_statement_large(&statement); + if let Some(size) = size { + metrics.on_created_message(size); + } + + if is_large { + protocol_v1::StatementDistributionMessage::LargeStatement(StatementMetadata { + relay_parent, + candidate_hash: statement.payload().candidate_hash(), + signed_by: statement.validator_index(), + signature: statement.signature().clone(), + }) + } else { + protocol_v1::StatementDistributionMessage::Statement(relay_parent, statement.into()) + } +} + +/// Check whether a statement should be treated as large statement. +/// +/// Also report size of statement - if it is a `Seconded` statement, otherwise `None`. +fn is_statement_large(statement: &SignedFullStatement) -> (bool, Option) { + match &statement.payload() { + Statement::Seconded(committed) => { + let size = statement.as_unchecked().encoded_size(); + // Runtime upgrades will always be large and even if not - no harm done. + if committed.commitments.new_validation_code.is_some() { + return (true, Some(size)) + } + + // Half max size seems to be a good threshold to start not using notifications: + let threshold = + PeerSet::Validation.get_max_notification_size(IsAuthority::Yes) as usize / 2; + + (size >= threshold, Some(size)) + }, + Statement::Valid(_) => (false, None), + } +} + +/// Circulates a statement to all peers who have not seen it yet, and returns +/// an iterator over peers who need to have dependent statements sent. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn circulate_statement<'a, Context>( + required_routing: RequiredRouting, + topology: &SessionGridTopology, + peers: &mut HashMap, + ctx: &mut Context, + relay_parent: Hash, + stored: StoredStatement<'a>, + mut priority_peers: Vec, + metrics: &Metrics, + rng: &mut impl rand::Rng, +) -> Vec { + let fingerprint = stored.fingerprint(); + + let mut peers_to_send: Vec = peers + .iter() + .filter_map(|(peer, data)| { + if data.can_send(&relay_parent, &fingerprint) { + Some(peer.clone()) + } else { + None + } + }) + .collect(); + + let good_peers: HashSet<&PeerId> = peers_to_send.iter().collect(); + // Only take priority peers we can send data to: + priority_peers.retain(|p| good_peers.contains(p)); + + // Avoid duplicates: + let priority_set: HashSet<&PeerId> = priority_peers.iter().collect(); + peers_to_send.retain(|p| !priority_set.contains(p)); + + util::choose_random_subset_with_rng( + |e| topology.route_to_peer(required_routing, e), + &mut peers_to_send, + rng, + MIN_GOSSIP_PEERS, + ); + // We don't want to use less peers, than we would without any priority peers: + let min_size = std::cmp::max(peers_to_send.len(), MIN_GOSSIP_PEERS); + // Make set full: + let needed_peers = min_size as i64 - priority_peers.len() as i64; + if needed_peers > 0 { + peers_to_send.truncate(needed_peers as usize); + // Order important here - priority peers are placed first, so will be sent first. + // This gives backers a chance to be among the first in requesting any large statement + // data. + priority_peers.append(&mut peers_to_send); + } + peers_to_send = priority_peers; + // We must not have duplicates: + debug_assert!( + peers_to_send.len() == peers_to_send.clone().into_iter().collect::>().len(), + "We filter out duplicates above. qed.", + ); + + let (v1_peers_to_send, vstaging_peers_to_send) = peers_to_send + .into_iter() + .filter_map(|peer_id| { + let peer_data = + peers.get_mut(&peer_id).expect("a subset is taken above, so it exists; qed"); + + let new = peer_data.send(&relay_parent, &fingerprint); + + Some((peer_id, new, peer_data.protocol_version)) + }) + .partition::, _>(|(_, _, version)| match version { + ValidationVersion::V1 => true, + ValidationVersion::VStaging => false, + }); // partition is handy here but not if we add more protocol versions + + let payload = v1_statement_message(relay_parent, stored.statement.clone(), metrics); + + // Send all these peers the initial statement. + if !v1_peers_to_send.is_empty() { + gum::trace!( + target: LOG_TARGET, + ?v1_peers_to_send, + ?relay_parent, + statement = ?stored.statement, + "Sending statement to v1 peers", + ); + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + v1_peers_to_send.iter().map(|(p, _, _)| p.clone()).collect(), + compatible_v1_message(ValidationVersion::V1, payload.clone()).into(), + )) + .await; + } + if !vstaging_peers_to_send.is_empty() { + gum::trace!( + target: LOG_TARGET, + ?vstaging_peers_to_send, + ?relay_parent, + statement = ?stored.statement, + "Sending statement to vstaging peers", + ); + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + vstaging_peers_to_send.iter().map(|(p, _, _)| p.clone()).collect(), + compatible_v1_message(ValidationVersion::VStaging, payload.clone()).into(), + )) + .await; + } + + v1_peers_to_send + .into_iter() + .chain(vstaging_peers_to_send) + .filter_map(|(peer, needs_dependent, _)| if needs_dependent { Some(peer) } else { None }) + .collect() +} + +/// Send all statements about a given candidate hash to a peer. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn send_statements_about( + peer: PeerId, + peer_data: &mut PeerData, + ctx: &mut Context, + relay_parent: Hash, + candidate_hash: CandidateHash, + active_head: &ActiveHeadData, + metrics: &Metrics, +) { + for statement in active_head.statements_about(candidate_hash) { + let fingerprint = statement.fingerprint(); + if !peer_data.can_send(&relay_parent, &fingerprint) { + continue + } + peer_data.send(&relay_parent, &fingerprint); + let payload = v1_statement_message(relay_parent, statement.statement.clone(), metrics); + + gum::trace!( + target: LOG_TARGET, + ?peer, + ?relay_parent, + ?candidate_hash, + statement = ?statement.statement, + "Sending statement", + ); + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + vec![peer.clone()], + compatible_v1_message(peer_data.protocol_version, payload).into(), + )) + .await; + + metrics.on_statement_distributed(); + } +} + +/// Send all statements at a given relay-parent to a peer. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn send_statements( + peer: PeerId, + peer_data: &mut PeerData, + ctx: &mut Context, + relay_parent: Hash, + active_head: &ActiveHeadData, + metrics: &Metrics, +) { + for statement in active_head.statements() { + let fingerprint = statement.fingerprint(); + if !peer_data.can_send(&relay_parent, &fingerprint) { + continue + } + peer_data.send(&relay_parent, &fingerprint); + let payload = v1_statement_message(relay_parent, statement.statement.clone(), metrics); + + gum::trace!( + target: LOG_TARGET, + ?peer, + ?relay_parent, + statement = ?statement.statement, + "Sending statement" + ); + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + vec![peer.clone()], + compatible_v1_message(peer_data.protocol_version, payload).into(), + )) + .await; + + metrics.on_statement_distributed(); + } +} + +async fn report_peer( + sender: &mut impl overseer::StatementDistributionSenderTrait, + peer: PeerId, + rep: Rep, +) { + sender.send_message(NetworkBridgeTxMessage::ReportPeer(peer, rep)).await +} + +/// If message contains a statement, then retrieve it, otherwise fork task to fetch it. +/// +/// This function will also return `None` if the message did not pass some basic checks, in that +/// case no statement will be requested, on the flipside you get `ActiveHeadData` in addition to +/// your statement. +/// +/// If the message was large, but the result has been fetched already that one is returned. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn retrieve_statement_from_message<'a, Context>( + peer: PeerId, + peer_version: ValidationVersion, + message: protocol_v1::StatementDistributionMessage, + active_head: &'a mut ActiveHeadData, + ctx: &mut Context, + req_sender: &mpsc::Sender, + metrics: &Metrics, +) -> Option { + let fingerprint = message.get_fingerprint(); + let candidate_hash = *fingerprint.0.candidate_hash(); + + // Immediately return any Seconded statement: + let message = if let protocol_v1::StatementDistributionMessage::Statement(h, s) = message { + if let Statement::Seconded(_) = s.unchecked_payload() { + return Some(s) + } + protocol_v1::StatementDistributionMessage::Statement(h, s) + } else { + message + }; + + match active_head.waiting_large_statements.entry(candidate_hash) { + Entry::Occupied(mut occupied) => { + match occupied.get_mut() { + LargeStatementStatus::Fetching(info) => { + let is_large_statement = message.is_large_statement(); + + let is_new_peer = match info.available_peers.entry(peer) { + IEntry::Occupied(mut occupied) => { + occupied.get_mut().push(compatible_v1_message(peer_version, message)); + false + }, + IEntry::Vacant(vacant) => { + vacant.insert(vec![compatible_v1_message(peer_version, message)]); + true + }, + }; + + if is_new_peer & is_large_statement { + info.peers_to_try.push(peer); + // Answer any pending request for more peers: + if let Some(sender) = info.peer_sender.take() { + let to_send = std::mem::take(&mut info.peers_to_try); + if let Err(peers) = sender.send(to_send) { + // Requester no longer interested for now, might want them + // later: + info.peers_to_try = peers; + } + } + } + }, + LargeStatementStatus::FetchedOrShared(committed) => { + match message { + protocol_v1::StatementDistributionMessage::Statement(_, s) => { + // We can now immediately return any statements (should only be + // `Statement::Valid` ones, but we don't care at this point.) + return Some(s) + }, + protocol_v1::StatementDistributionMessage::LargeStatement(metadata) => + return Some(UncheckedSignedFullStatement::new( + Statement::Seconded(committed.clone()), + metadata.signed_by, + metadata.signature.clone(), + )), + } + }, + } + }, + Entry::Vacant(vacant) => { + match message { + protocol_v1::StatementDistributionMessage::LargeStatement(metadata) => { + if let Some(new_status) = launch_request( + metadata, + peer, + peer_version, + req_sender.clone(), + ctx, + metrics, + ) + .await + { + vacant.insert(new_status); + } + }, + protocol_v1::StatementDistributionMessage::Statement(_, s) => { + // No fetch in progress, safe to return any statement immediately (we don't bother + // about normal network jitter which might cause `Valid` statements to arrive early + // for now.). + return Some(s) + }, + } + }, + } + None +} + +/// Launch request for a large statement and get tracking status. +/// +/// Returns `None` if spawning task failed. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn launch_request( + meta: StatementMetadata, + peer: PeerId, + peer_version: ValidationVersion, + req_sender: mpsc::Sender, + ctx: &mut Context, + metrics: &Metrics, +) -> Option { + let (task, handle) = + fetch(meta.relay_parent, meta.candidate_hash, vec![peer], req_sender, metrics.clone()) + .remote_handle(); + + let result = ctx.spawn("large-statement-fetcher", task.boxed()); + if let Err(err) = result { + gum::error!(target: LOG_TARGET, ?err, "Spawning task failed."); + return None + } + let available_peers = { + let mut m = IndexMap::new(); + m.insert( + peer, + vec![compatible_v1_message( + peer_version, + protocol_v1::StatementDistributionMessage::LargeStatement(meta), + )], + ); + m + }; + Some(LargeStatementStatus::Fetching(FetchingInfo { + available_peers, + peers_to_try: Vec::new(), + peer_sender: None, + fetching_task: handle, + })) +} + +/// Handle incoming message and circulate it to peers, if we did not know it already. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn handle_incoming_message_and_circulate<'a, Context, R>( + peer: PeerId, + topology_storage: &SessionBoundGridTopologyStorage, + peers: &mut HashMap, + active_heads: &'a mut HashMap, + recent_outdated_heads: &RecentOutdatedHeads, + ctx: &mut Context, + message: net_protocol::StatementDistributionMessage, + req_sender: &mpsc::Sender, + metrics: &Metrics, + runtime: &mut RuntimeInfo, + rng: &mut R, +) where + R: rand::Rng, +{ + let handled_incoming = match peers.get_mut(&peer) { + Some(data) => + handle_incoming_message( + peer, + data, + active_heads, + recent_outdated_heads, + ctx, + message, + req_sender, + metrics, + ) + .await, + None => None, + }; + + // if we got a fresh message, we need to circulate it to all peers. + if let Some((relay_parent, statement)) = handled_incoming { + // we can ignore the set of peers who this function returns as now expecting + // dependent statements. + // + // we have the invariant in this subsystem that we never store a `Valid` or `Invalid` + // statement before a `Seconded` statement. `Seconded` statements are the only ones + // that require dependents. Thus, if this is a `Seconded` statement for a candidate we + // were not aware of before, we cannot have any dependent statements from the candidate. + let _ = metrics.time_network_bridge_update("circulate_statement"); + + let session_index = runtime.get_session_index_for_child(ctx.sender(), relay_parent).await; + let topology = match session_index { + Ok(session_index) => topology_storage.get_topology_or_fallback(session_index), + Err(e) => { + gum::debug!( + target: LOG_TARGET, + %relay_parent, + "cannot get session index for the specific relay parent: {:?}", + e + ); + + topology_storage.get_current_topology() + }, + }; + let required_routing = + topology.required_routing_by_index(statement.statement.validator_index(), false); + + let _ = circulate_statement( + required_routing, + topology, + peers, + ctx, + relay_parent, + statement, + Vec::new(), + metrics, + rng, + ) + .await; + } +} + +// Handle a statement. Returns a reference to a newly-stored statement +// if we were not already aware of it, along with the corresponding relay-parent. +// +// This function checks the signature and ensures the statement is compatible with our +// view. It also notifies candidate backing if the statement was previously unknown. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn handle_incoming_message<'a, Context>( + peer: PeerId, + peer_data: &mut PeerData, + active_heads: &'a mut HashMap, + recent_outdated_heads: &RecentOutdatedHeads, + ctx: &mut Context, + message: net_protocol::StatementDistributionMessage, + req_sender: &mpsc::Sender, + metrics: &Metrics, +) -> Option<(Hash, StoredStatement<'a>)> { + let _ = metrics.time_network_bridge_update("handle_incoming_message"); + + let message = match message { + Versioned::V1(m) => m, + Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::V1Compatibility( + m, + )) => m, + }; + + let relay_parent = message.get_relay_parent(); + + let active_head = match active_heads.get_mut(&relay_parent) { + Some(h) => h, + None => { + gum::debug!( + target: LOG_TARGET, + %relay_parent, + "our view out-of-sync with active heads; head not found", + ); + + if !recent_outdated_heads.is_recent_outdated(&relay_parent) { + report_peer(ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; + } + + return None + }, + }; + + if let protocol_v1::StatementDistributionMessage::LargeStatement(_) = message { + if let Err(rep) = peer_data.receive_large_statement(&relay_parent) { + gum::debug!(target: LOG_TARGET, ?peer, ?message, ?rep, "Unexpected large statement.",); + report_peer(ctx.sender(), peer, rep).await; + return None + } + } + + let fingerprint = message.get_fingerprint(); + let candidate_hash = fingerprint.0.candidate_hash().clone(); + let handle_incoming_span = active_head + .span + .child("handle-incoming") + .with_candidate(candidate_hash) + .with_peer_id(&peer); + + let max_message_count = active_head.validators.len() * 2; + + // perform only basic checks before verifying the signature + // as it's more computationally heavy + if let Err(rep) = peer_data.check_can_receive(&relay_parent, &fingerprint, max_message_count) { + // This situation can happen when a peer's Seconded message was lost + // but we have received the Valid statement. + // So we check it once and then ignore repeated violation to avoid + // reputation change flood. + let unexpected_count = peer_data.receive_unexpected(&relay_parent); + + gum::debug!( + target: LOG_TARGET, + ?relay_parent, + ?peer, + ?message, + ?rep, + ?unexpected_count, + "Error inserting received statement" + ); + + match rep { + // This happens when a Valid statement has been received but there is no corresponding Seconded + COST_UNEXPECTED_STATEMENT_UNKNOWN_CANDIDATE => { + metrics.on_unexpected_statement_valid(); + // Report peer merely if this is not a duplicate out-of-view statement that + // was caused by a missing Seconded statement from this peer + if unexpected_count == 0_usize { + report_peer(ctx.sender(), peer, rep).await; + } + }, + // This happens when we have an unexpected remote peer that announced Seconded + COST_UNEXPECTED_STATEMENT_REMOTE => { + metrics.on_unexpected_statement_seconded(); + report_peer(ctx.sender(), peer, rep).await; + }, + _ => { + report_peer(ctx.sender(), peer, rep).await; + }, + } + + return None + } + + let checked_compact = { + let (compact, validator_index) = message.get_fingerprint(); + let signature = message.get_signature(); + + let unchecked_compact = UncheckedSignedStatement::new(compact, validator_index, signature); + + match active_head.check_useful_or_unknown(&unchecked_compact) { + Ok(()) => {}, + Err(DeniedStatement::NotUseful) => return None, + Err(DeniedStatement::UsefulButKnown) => { + // Note a received statement in the peer data + peer_data + .receive(&relay_parent, &fingerprint, max_message_count) + .expect("checked in `check_can_receive` above; qed"); + report_peer(ctx.sender(), peer, BENEFIT_VALID_STATEMENT).await; + + return None + }, + } + + // check the signature on the statement. + match check_statement_signature(&active_head, relay_parent, unchecked_compact) { + Err(statement) => { + gum::debug!(target: LOG_TARGET, ?peer, ?statement, "Invalid statement signature"); + report_peer(ctx.sender(), peer, COST_INVALID_SIGNATURE).await; + return None + }, + Ok(statement) => statement, + } + }; + + // Fetch from the network only after signature and usefulness checks are completed. + let is_large_statement = message.is_large_statement(); + let statement = retrieve_statement_from_message( + peer, + peer_data.protocol_version, + message, + active_head, + ctx, + req_sender, + metrics, + ) + .await?; + + let payload = statement.unchecked_into_payload(); + + // Upgrade the `Signed` wrapper from the compact payload to the full payload. + // This fails if the payload doesn't encode correctly. + let statement: SignedFullStatement = match checked_compact.convert_to_superpayload(payload) { + Err((compact, _)) => { + gum::debug!( + target: LOG_TARGET, + ?peer, + ?compact, + is_large_statement, + "Full statement had bad payload." + ); + report_peer(ctx.sender(), peer, COST_WRONG_HASH).await; + return None + }, + Ok(statement) => statement, + }; + + // Ensure the statement is stored in the peer data. + // + // Note that if the peer is sending us something that is not within their view, + // it will not be kept within their log. + match peer_data.receive(&relay_parent, &fingerprint, max_message_count) { + Err(_) => { + unreachable!("checked in `check_can_receive` above; qed"); + }, + Ok(true) => { + gum::trace!(target: LOG_TARGET, ?peer, ?statement, "Statement accepted"); + // Send the peer all statements concerning the candidate that we have, + // since it appears to have just learned about the candidate. + send_statements_about( + peer.clone(), + peer_data, + ctx, + relay_parent, + candidate_hash, + &*active_head, + metrics, + ) + .await; + }, + Ok(false) => {}, + } + + // For `Seconded` statements `None` or `Err` means we couldn't fetch the PVD, which + // means the statement shouldn't be accepted. + // + // In case of `Valid` we should have it cached prior, therefore this performs + // no Runtime API calls and always returns `Ok(Some(_))`. + let pvd = if let Statement::Seconded(receipt) = statement.payload() { + let para_id = receipt.descriptor.para_id; + // Either call the Runtime API or check that validation data is cached. + let result = active_head + .fetch_persisted_validation_data(ctx.sender(), relay_parent, para_id) + .await; + + match result { + Ok(Some(pvd)) => Some(pvd.clone()), + Ok(None) | Err(_) => return None, + } + } else { + None + }; + + // Extend the payload with persisted validation data required by the backing + // subsystem. + // + // Do it in advance before noting the statement because we don't want to borrow active + // head mutable and use the cache. + let statement_with_pvd = statement + .clone() + .convert_to_superpayload_with(move |statement| match statement { + Statement::Seconded(receipt) => { + let persisted_validation_data = pvd + .expect("PVD is ensured to be `Some` for all `Seconded` messages above; qed"); + StatementWithPVD::Seconded(receipt, persisted_validation_data) + }, + Statement::Valid(candidate_hash) => StatementWithPVD::Valid(candidate_hash), + }) + .expect("payload was checked with conversion from compact; qed"); + + // Note: `peer_data.receive` already ensures that the statement is not an unbounded equivocation + // or unpinned to a seconded candidate. So it is safe to place it into the storage. + match active_head.note_statement(statement) { + NotedStatement::NotUseful | NotedStatement::UsefulButKnown => { + unreachable!("checked in `is_useful_or_unknown` above; qed"); + }, + NotedStatement::Fresh(statement) => { + report_peer(ctx.sender(), peer, BENEFIT_VALID_STATEMENT_FIRST).await; + + let mut _span = handle_incoming_span.child("notify-backing"); + + // When we receive a new message from a peer, we forward it to the + // candidate backing subsystem. + ctx.send_message(CandidateBackingMessage::Statement(relay_parent, statement_with_pvd)) + .await; + + Some((relay_parent, statement)) + }, + } +} + +/// Update a peer's view. Sends all newly unlocked statements based on the previous +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn update_peer_view_and_maybe_send_unlocked( + peer: PeerId, + topology: &SessionGridTopology, + peer_data: &mut PeerData, + ctx: &mut Context, + active_heads: &HashMap, + new_view: View, + metrics: &Metrics, + rng: &mut R, +) where + R: rand::Rng, +{ + let old_view = std::mem::replace(&mut peer_data.view, new_view); + + // Remove entries for all relay-parents in the old view but not the new. + for removed in old_view.difference(&peer_data.view) { + let _ = peer_data.view_knowledge.remove(removed); + } + + // Use both grid directions + let is_gossip_peer = topology.route_to_peer(RequiredRouting::GridXY, &peer); + let lucky = is_gossip_peer || + util::gen_ratio_rng( + util::MIN_GOSSIP_PEERS.saturating_sub(topology.len()), + util::MIN_GOSSIP_PEERS, + rng, + ); + + // Add entries for all relay-parents in the new view but not the old. + // Furthermore, send all statements we have for those relay parents. + let new_view = peer_data.view.difference(&old_view).copied().collect::>(); + for new in new_view.iter().copied() { + peer_data.view_knowledge.insert(new, Default::default()); + if !lucky { + continue + } + if let Some(active_head) = active_heads.get(&new) { + send_statements(peer.clone(), peer_data, ctx, new, active_head, metrics).await; + } + } +} + +/// Handle a local network update. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +pub(crate) async fn handle_network_update( + ctx: &mut Context, + state: &mut State, + req_sender: &mpsc::Sender, + update: NetworkBridgeEvent, + rng: &mut R, + metrics: &Metrics, +) where + R: rand::Rng, +{ + let peers = &mut state.peers; + let topology_storage = &mut state.topology_storage; + let authorities = &mut state.authorities; + let active_heads = &mut state.active_heads; + let recent_outdated_heads = &state.recent_outdated_heads; + let runtime = &mut state.runtime; + + match update { + NetworkBridgeEvent::PeerConnected(peer, role, protocol_version, maybe_authority) => { + gum::trace!(target: LOG_TARGET, ?peer, ?role, ?protocol_version, "Peer connected"); + + let protocol_version = match ValidationVersion::try_from(protocol_version).ok() { + Some(v) => v, + None => { + gum::trace!( + target: LOG_TARGET, + ?peer, + ?protocol_version, + "unknown protocol version, ignoring" + ); + return + }, + }; + + peers.insert( + peer, + PeerData { + view: Default::default(), + protocol_version, + view_knowledge: Default::default(), + maybe_authority: maybe_authority.clone(), + }, + ); + if let Some(authority_ids) = maybe_authority { + authority_ids.into_iter().for_each(|a| { + authorities.insert(a, peer); + }); + } + }, + NetworkBridgeEvent::PeerDisconnected(peer) => { + gum::trace!(target: LOG_TARGET, ?peer, "Peer disconnected"); + if let Some(auth_ids) = peers.remove(&peer).and_then(|p| p.maybe_authority) { + auth_ids.into_iter().for_each(|a| { + authorities.remove(&a); + }); + } + }, + NetworkBridgeEvent::NewGossipTopology(topology) => { + let _ = metrics.time_network_bridge_update("new_gossip_topology"); + + let new_session_index = topology.session; + let new_topology: SessionGridTopology = topology.into(); + let old_topology = topology_storage.get_current_topology(); + let newly_added = new_topology.peers_diff(old_topology); + topology_storage.update_topology(new_session_index, new_topology); + for peer in newly_added { + if let Some(data) = peers.get_mut(&peer) { + let view = std::mem::take(&mut data.view); + update_peer_view_and_maybe_send_unlocked( + peer, + topology_storage.get_current_topology(), + data, + ctx, + &*active_heads, + view, + metrics, + rng, + ) + .await + } + } + }, + NetworkBridgeEvent::PeerMessage(peer, message) => { + handle_incoming_message_and_circulate( + peer, + topology_storage, + peers, + active_heads, + &*recent_outdated_heads, + ctx, + message, + req_sender, + metrics, + runtime, + rng, + ) + .await; + }, + NetworkBridgeEvent::PeerViewChange(peer, view) => { + let _ = metrics.time_network_bridge_update("peer_view_change"); + gum::trace!(target: LOG_TARGET, ?peer, ?view, "Peer view change"); + match peers.get_mut(&peer) { + Some(data) => + update_peer_view_and_maybe_send_unlocked( + peer, + topology_storage.get_current_topology(), + data, + ctx, + &*active_heads, + view, + metrics, + rng, + ) + .await, + None => (), + } + }, + NetworkBridgeEvent::OurViewChange(_view) => { + // handled by `ActiveLeavesUpdate` + }, + } +} + +/// Handle messages from responder background task. +pub(crate) async fn handle_responder_message( + state: &mut State, + message: ResponderMessage, +) -> JfyiErrorResult<()> { + let peers = &state.peers; + let active_heads = &mut state.active_heads; + + match message { + ResponderMessage::GetData { requesting_peer, relay_parent, candidate_hash, tx } => { + if !requesting_peer_knows_about_candidate( + peers, + &requesting_peer, + &relay_parent, + &candidate_hash, + )? { + return Err(JfyiError::RequestedUnannouncedCandidate( + requesting_peer, + candidate_hash, + )) + } + + let active_head = + active_heads.get(&relay_parent).ok_or(JfyiError::NoSuchHead(relay_parent))?; + + let committed = match active_head.waiting_large_statements.get(&candidate_hash) { + Some(LargeStatementStatus::FetchedOrShared(committed)) => committed.clone(), + _ => + return Err(JfyiError::NoSuchFetchedLargeStatement(relay_parent, candidate_hash)), + }; + + tx.send(committed).map_err(|_| JfyiError::ResponderGetDataCanceled)?; + }, + } + Ok(()) +} + +#[overseer::contextbounds(StatementDistribution, prefix = self::overseer)] +pub(crate) async fn handle_requester_message( + ctx: &mut Context, + state: &mut State, + req_sender: &mpsc::Sender, + rng: &mut R, + message: RequesterMessage, + metrics: &Metrics, +) -> JfyiErrorResult<()> { + let topology_storage = &state.topology_storage; + let peers = &mut state.peers; + let active_heads = &mut state.active_heads; + let recent_outdated_heads = &state.recent_outdated_heads; + let runtime = &mut state.runtime; + + match message { + RequesterMessage::Finished { + relay_parent, + candidate_hash, + from_peer, + response, + bad_peers, + } => { + for bad in bad_peers { + report_peer(ctx.sender(), bad, COST_FETCH_FAIL).await; + } + report_peer(ctx.sender(), from_peer, BENEFIT_VALID_RESPONSE).await; + + let active_head = + active_heads.get_mut(&relay_parent).ok_or(JfyiError::NoSuchHead(relay_parent))?; + + let status = active_head.waiting_large_statements.remove(&candidate_hash); + + let info = match status { + Some(LargeStatementStatus::Fetching(info)) => info, + Some(LargeStatementStatus::FetchedOrShared(_)) => { + // We are no longer interested in the data. + return Ok(()) + }, + None => + return Err(JfyiError::NoSuchLargeStatementStatus(relay_parent, candidate_hash)), + }; + + active_head + .waiting_large_statements + .insert(candidate_hash, LargeStatementStatus::FetchedOrShared(response)); + + // Cache is now populated, send all messages: + for (peer, messages) in info.available_peers { + for message in messages { + handle_incoming_message_and_circulate( + peer, + topology_storage, + peers, + active_heads, + recent_outdated_heads, + ctx, + message, + req_sender, + &metrics, + runtime, + rng, + ) + .await; + } + } + }, + RequesterMessage::SendRequest(req) => { + ctx.send_message(NetworkBridgeTxMessage::SendRequests( + vec![req], + IfDisconnected::ImmediateError, + )) + .await; + }, + RequesterMessage::GetMorePeers { relay_parent, candidate_hash, tx } => { + let active_head = + active_heads.get_mut(&relay_parent).ok_or(JfyiError::NoSuchHead(relay_parent))?; + + let status = active_head.waiting_large_statements.get_mut(&candidate_hash); + + let info = match status { + Some(LargeStatementStatus::Fetching(info)) => info, + Some(LargeStatementStatus::FetchedOrShared(_)) => { + // This task is going to die soon - no need to send it anything. + gum::debug!(target: LOG_TARGET, "Zombie task wanted more peers."); + return Ok(()) + }, + None => + return Err(JfyiError::NoSuchLargeStatementStatus(relay_parent, candidate_hash)), + }; + + if info.peers_to_try.is_empty() { + info.peer_sender = Some(tx); + } else { + let peers_to_try = std::mem::take(&mut info.peers_to_try); + if let Err(peers) = tx.send(peers_to_try) { + // No longer interested for now - might want them later: + info.peers_to_try = peers; + } + } + }, + RequesterMessage::ReportPeer(peer, rep) => report_peer(ctx.sender(), peer, rep).await, + } + Ok(()) +} + +/// Handle a deactivated leaf. +pub(crate) fn handle_deactivate_leaf(state: &mut State, deactivated: Hash) { + if state.active_heads.remove(&deactivated).is_some() { + gum::trace!( + target: LOG_TARGET, + hash = ?deactivated, + "Deactivating leaf", + ); + + state.recent_outdated_heads.note_outdated(deactivated); + } +} + +/// Handle a new activated leaf. This assumes that the leaf does not +/// support prospective parachains. +#[overseer::contextbounds(StatementDistribution, prefix = self::overseer)] +pub(crate) async fn handle_activated_leaf( + ctx: &mut Context, + state: &mut State, + activated: ActivatedLeaf, +) -> Result<()> { + let relay_parent = activated.hash; + let span = PerLeafSpan::new(activated.span, "statement-distribution-legacy"); + gum::trace!( + target: LOG_TARGET, + hash = ?relay_parent, + "New active leaf", + ); + + // Retrieve the parachain validators at the child of the head we track. + let session_index = + state.runtime.get_session_index_for_child(ctx.sender(), relay_parent).await?; + let info = state + .runtime + .get_session_info_by_index(ctx.sender(), relay_parent, session_index) + .await?; + let session_info = &info.session_info; + + state.active_heads.entry(relay_parent).or_insert(ActiveHeadData::new( + session_info.validators.clone(), + session_index, + span, + )); + + Ok(()) +} + +/// Share a local statement with the rest of the network. +#[overseer::contextbounds(StatementDistribution, prefix = self::overseer)] +pub(crate) async fn share_local_statement( + ctx: &mut Context, + state: &mut State, + relay_parent: Hash, + statement: SignedFullStatement, + rng: &mut R, + metrics: &Metrics, +) -> Result<()> { + // Make sure we have data in cache: + if is_statement_large(&statement).0 { + if let Statement::Seconded(committed) = &statement.payload() { + let active_head = state + .active_heads + .get_mut(&relay_parent) + // This should never be out-of-sync with our view if the view + // updates correspond to actual `StartWork` messages. + .ok_or(JfyiError::NoSuchHead(relay_parent))?; + active_head.waiting_large_statements.insert( + statement.payload().candidate_hash(), + LargeStatementStatus::FetchedOrShared(committed.clone()), + ); + } + } + + let info = state.runtime.get_session_info(ctx.sender(), relay_parent).await?; + let session_info = &info.session_info; + let validator_info = &info.validator_info; + + // Get peers in our group, so we can make sure they get our statement + // directly: + let group_peers = { + if let Some(our_group) = validator_info.our_group { + let our_group = &session_info.validator_groups[our_group.0 as usize]; + + our_group + .into_iter() + .filter_map(|i| { + if Some(*i) == validator_info.our_index { + return None + } + let authority_id = &session_info.discovery_keys[i.0 as usize]; + state.authorities.get(authority_id).map(|p| *p) + }) + .collect() + } else { + Vec::new() + } + }; + circulate_statement_and_dependents( + &mut state.topology_storage, + &mut state.peers, + &mut state.active_heads, + ctx, + relay_parent, + statement, + group_peers, + metrics, + rng, + ) + .await; + + Ok(()) +} + +/// Check whether a peer knows about a candidate from us. +/// +/// If not, it is deemed illegal for it to request corresponding data from us. +fn requesting_peer_knows_about_candidate( + peers: &HashMap, + requesting_peer: &PeerId, + relay_parent: &Hash, + candidate_hash: &CandidateHash, +) -> JfyiErrorResult { + let peer_data = peers + .get(requesting_peer) + .ok_or_else(|| JfyiError::NoSuchPeer(*requesting_peer))?; + let knowledge = peer_data + .view_knowledge + .get(relay_parent) + .ok_or_else(|| JfyiError::NoSuchHead(*relay_parent))?; + Ok(knowledge.sent_candidates.get(&candidate_hash).is_some()) +} + +fn compatible_v1_message( + version: ValidationVersion, + message: protocol_v1::StatementDistributionMessage, +) -> net_protocol::StatementDistributionMessage { + match version { + ValidationVersion::V1 => Versioned::V1(message), + ValidationVersion::VStaging => Versioned::VStaging( + protocol_vstaging::StatementDistributionMessage::V1Compatibility(message), + ), + } +} diff --git a/node/network/statement-distribution/src/requester.rs b/node/network/statement-distribution/src/legacy_v1/requester.rs similarity index 98% rename from node/network/statement-distribution/src/requester.rs rename to node/network/statement-distribution/src/legacy_v1/requester.rs index 24ffa5e41742..f711b09d413a 100644 --- a/node/network/statement-distribution/src/requester.rs +++ b/node/network/statement-distribution/src/legacy_v1/requester.rs @@ -32,7 +32,10 @@ use polkadot_node_subsystem::{Span, Stage}; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_primitives::v2::{CandidateHash, CommittedCandidateReceipt, Hash}; -use crate::{metrics::Metrics, COST_WRONG_HASH, LOG_TARGET}; +use crate::{ + legacy_v1::{COST_WRONG_HASH, LOG_TARGET}, + metrics::Metrics, +}; // In case we failed fetching from our known peers, how long we should wait before attempting a // retry, even though we have not yet discovered any new peers. Or in other words how long to diff --git a/node/network/statement-distribution/src/responder.rs b/node/network/statement-distribution/src/legacy_v1/responder.rs similarity index 100% rename from node/network/statement-distribution/src/responder.rs rename to node/network/statement-distribution/src/legacy_v1/responder.rs diff --git a/node/network/statement-distribution/src/tests.rs b/node/network/statement-distribution/src/legacy_v1/tests.rs similarity index 98% rename from node/network/statement-distribution/src/tests.rs rename to node/network/statement-distribution/src/legacy_v1/tests.rs index f1a4c0562c94..f3db07757e06 100644 --- a/node/network/statement-distribution/src/tests.rs +++ b/node/network/statement-distribution/src/legacy_v1/tests.rs @@ -14,7 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use super::{metrics::Metrics, *}; +use super::*; +use crate::{metrics::Metrics, *}; + use assert_matches::assert_matches; use futures::executor::{self, block_on}; use futures_timer::Delay; @@ -25,7 +27,7 @@ use polkadot_node_network_protocol::{ v1::{StatementFetchingRequest, StatementFetchingResponse}, IncomingRequest, Recipient, ReqProtocolNames, Requests, }, - view, ObservedRole, + view, ObservedRole, VersionedValidationProtocol, }; use polkadot_node_primitives::{ SignedFullStatementWithPVD, Statement, UncheckedSignedFullStatement, @@ -512,6 +514,7 @@ fn peer_view_update_sends_messages() { let mut peer_data = PeerData { view: old_view, + protocol_version: ValidationVersion::V1, view_knowledge: { let mut k = HashMap::new(); @@ -570,8 +573,9 @@ fn peer_view_update_sends_messages() { for statement in active_head.statements_about(candidate_hash) { let message = handle.recv().await; let expected_to = vec![peer.clone()]; - let expected_payload = - statement_message(hash_c, statement.statement.clone(), &Metrics::default()); + let expected_payload = VersionedValidationProtocol::from(Versioned::V1( + v1_statement_message(hash_c, statement.statement.clone(), &Metrics::default()), + )); assert_matches!( message, @@ -612,6 +616,7 @@ fn circulated_statement_goes_to_all_peers_with_view() { let peer_data_from_view = |view: View| PeerData { view: view.clone(), + protocol_version: ValidationVersion::V1, view_knowledge: view.iter().map(|v| (v.clone(), Default::default())).collect(), maybe_authority: None, }; @@ -716,7 +721,7 @@ fn circulated_statement_goes_to_all_peers_with_view() { assert_eq!( payload, - statement_message(hash_b, statement.statement.clone(), &Metrics::default()), + VersionedValidationProtocol::from(Versioned::V1(v1_statement_message(hash_b, statement.statement.clone(), &Metrics::default()))), ); } ) @@ -1688,9 +1693,17 @@ fn share_prioritizes_backing_group() { .await .unwrap(); - SignedFullStatement::sign( + // note: this is ignored by legacy-v1 code. + let pvd = PersistedValidationData { + parent_head: HeadData::from(vec![1, 2, 3]), + relay_parent_number: 0, + relay_parent_storage_root: Hash::repeat_byte(42), + max_pov_size: 100, + }; + + SignedFullStatementWithPVD::sign( &keystore, - Statement::Seconded(candidate.clone()), + Statement::Seconded(candidate.clone()).supply_pvd(pvd), &signing_context, ValidatorIndex(4), &ferdie_public.into(), @@ -1701,14 +1714,15 @@ fn share_prioritizes_backing_group() { .expect("should be signed") }; - let metadata = derive_metadata_assuming_seconded(hash_a, statement.clone().into()); - handle .send(FromOrchestra::Communication { msg: StatementDistributionMessage::Share(hash_a, statement.clone()), }) .await; + let statement = StatementWithPVD::drop_pvd_from_signed(statement); + let metadata = derive_metadata_assuming_seconded(hash_a, statement.clone().into()); + // Messages should go out: assert_matches!( handle.recv().await, @@ -2390,3 +2404,8 @@ fn derive_metadata_assuming_seconded( signature: statement.unchecked_signature().clone(), } } + +// TODO [now]: adapt most tests to v2 messages. +// TODO [now]: test that v2 peers send v1 messages to v1 peers +// TODO [now]: test that v2 peers handle v1 messages from v1 peers. +// TODO [now]: test that v2 peers send v2 messages to v2 peers. diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 40497f77557f..9731818a459a 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -22,109 +22,45 @@ #![deny(unused_crate_dependencies)] #![warn(missing_docs)] -use error::{log_error, FatalResult, JfyiErrorResult}; -use parity_scale_codec::Encode; +use error::{log_error, FatalResult}; use polkadot_node_network_protocol::{ - self as net_protocol, - grid_topology::{RequiredRouting, SessionBoundGridTopologyStorage, SessionGridTopology}, - peer_set::{IsAuthority, PeerSet}, request_response::{v1 as request_v1, IncomingRequestReceiver}, - v1::{self as protocol_v1, StatementMetadata}, - vstaging as protocol_vstaging, IfDisconnected, PeerId, UnifiedReputationChange as Rep, - Versioned, View, + vstaging as protocol_vstaging, Versioned, }; -use polkadot_node_primitives::{ - SignedFullStatement, Statement, StatementWithPVD, UncheckedSignedFullStatement, -}; -use polkadot_node_subsystem_util::{self as util, rand, MIN_GOSSIP_PEERS}; - +use polkadot_node_primitives::StatementWithPVD; use polkadot_node_subsystem::{ - jaeger, - messages::{ - CandidateBackingMessage, NetworkBridgeEvent, NetworkBridgeTxMessage, - StatementDistributionMessage, - }, - overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, PerLeafSpan, SpawnedSubsystem, - StatementDistributionSenderTrait, SubsystemError, -}; -use polkadot_primitives::v2::{ - AuthorityDiscoveryId, CandidateHash, CommittedCandidateReceipt, CompactStatement, Hash, - Id as ParaId, OccupiedCoreAssumption, PersistedValidationData, SignedStatement, SigningContext, - UncheckedSignedStatement, ValidatorId, ValidatorIndex, ValidatorSignature, + messages::{NetworkBridgeEvent, StatementDistributionMessage}, + overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; +use polkadot_node_subsystem_util::rand; -use futures::{ - channel::{mpsc, oneshot}, - future::RemoteHandle, - prelude::*, -}; -use indexmap::{map::Entry as IEntry, IndexMap}; +use futures::{channel::mpsc, prelude::*}; use sp_keystore::SyncCryptoStorePtr; -use util::runtime::RuntimeInfo; - -use std::collections::{hash_map::Entry, HashMap, HashSet, VecDeque}; use fatality::Nested; mod error; pub use error::{Error, FatalError, JfyiError, Result}; -/// Background task logic for requesting of large statements. -mod requester; -use requester::{fetch, RequesterMessage}; - -/// Background task logic for responding for large statements. -mod responder; -use responder::{respond, ResponderMessage}; - /// Metrics for the statement distribution pub(crate) mod metrics; use metrics::Metrics; -#[cfg(test)] -mod tests; - -const COST_UNEXPECTED_STATEMENT: Rep = Rep::CostMinor("Unexpected Statement"); -const COST_UNEXPECTED_STATEMENT_MISSING_KNOWLEDGE: Rep = - Rep::CostMinor("Unexpected Statement, missing knowlege for relay parent"); -const COST_UNEXPECTED_STATEMENT_UNKNOWN_CANDIDATE: Rep = - Rep::CostMinor("Unexpected Statement, unknown candidate"); -const COST_UNEXPECTED_STATEMENT_REMOTE: Rep = - Rep::CostMinor("Unexpected Statement, remote not allowed"); - -const COST_FETCH_FAIL: Rep = - Rep::CostMinor("Requesting `CommittedCandidateReceipt` from peer failed"); -const COST_INVALID_SIGNATURE: Rep = Rep::CostMajor("Invalid Statement Signature"); -const COST_WRONG_HASH: Rep = Rep::CostMajor("Received candidate had wrong hash"); -const COST_DUPLICATE_STATEMENT: Rep = - Rep::CostMajorRepeated("Statement sent more than once by peer"); -const COST_APPARENT_FLOOD: Rep = Rep::Malicious("Peer appears to be flooding us with statements"); - -const BENEFIT_VALID_STATEMENT: Rep = Rep::BenefitMajor("Peer provided a valid statement"); -const BENEFIT_VALID_STATEMENT_FIRST: Rep = - Rep::BenefitMajorFirst("Peer was the first to provide a valid statement"); -const BENEFIT_VALID_RESPONSE: Rep = - Rep::BenefitMajor("Peer provided a valid large statement response"); - -/// The maximum amount of candidates each validator is allowed to second at any relay-parent. -/// Short for "Validator Candidate Threshold". -/// -/// This is the amount of candidates we keep per validator at any relay-parent. -/// Typically we will only keep 1, but when a validator equivocates we will need to track 2. -const VC_THRESHOLD: usize = 2; +mod legacy_v1; +use legacy_v1::{ + respond as v1_respond_task, RequesterMessage as V1RequesterMessage, + ResponderMessage as V1ResponderMessage, +}; const LOG_TARGET: &str = "parachain::statement-distribution"; -/// Large statements should be rare. -const MAX_LARGE_STATEMENTS_PER_SENDER: usize = 20; - /// The statement distribution subsystem. pub struct StatementDistributionSubsystem { /// Pointer to a keystore, which is required for determining this node's validator index. keystore: SyncCryptoStorePtr, /// Receiver for incoming large statement requests. - req_receiver: Option>, + v1_req_receiver: Option>, /// Prometheus metrics metrics: Metrics, /// Pseudo-random generator for peers selection logic @@ -146,1679 +82,35 @@ impl StatementDistributionSubsyst } } -#[derive(Default)] -struct RecentOutdatedHeads { - buf: VecDeque, -} - -impl RecentOutdatedHeads { - fn note_outdated(&mut self, hash: Hash) { - const MAX_BUF_LEN: usize = 10; - - self.buf.push_back(hash); - - while self.buf.len() > MAX_BUF_LEN { - let _ = self.buf.pop_front(); - } - } - - fn is_recent_outdated(&self, hash: &Hash) -> bool { - self.buf.contains(hash) - } -} - -/// Tracks our impression of a single peer's view of the candidates a validator has seconded -/// for a given relay-parent. -/// -/// It is expected to receive at most `VC_THRESHOLD` from us and be aware of at most `VC_THRESHOLD` -/// via other means. -#[derive(Default)] -struct VcPerPeerTracker { - local_observed: arrayvec::ArrayVec<[CandidateHash; VC_THRESHOLD]>, - remote_observed: arrayvec::ArrayVec<[CandidateHash; VC_THRESHOLD]>, -} - -impl VcPerPeerTracker { - /// Note that the remote should now be aware that a validator has seconded a given candidate (by hash) - /// based on a message that we have sent it from our local pool. - fn note_local(&mut self, h: CandidateHash) { - if !note_hash(&mut self.local_observed, h) { - gum::warn!( - target: LOG_TARGET, - "Statement distribution is erroneously attempting to distribute more \ - than {} candidate(s) per validator index. Ignoring", - VC_THRESHOLD, - ); - } - } - - /// Note that the remote should now be aware that a validator has seconded a given candidate (by hash) - /// based on a message that it has sent us. - /// - /// Returns `true` if the peer was allowed to send us such a message, `false` otherwise. - fn note_remote(&mut self, h: CandidateHash) -> bool { - note_hash(&mut self.remote_observed, h) - } - - /// Returns `true` if the peer is allowed to send us such a message, `false` otherwise. - fn is_wanted_candidate(&self, h: &CandidateHash) -> bool { - !self.remote_observed.contains(h) && !self.remote_observed.is_full() - } -} - -fn note_hash( - observed: &mut arrayvec::ArrayVec<[CandidateHash; VC_THRESHOLD]>, - h: CandidateHash, -) -> bool { - if observed.contains(&h) { - return true - } - - observed.try_push(h).is_ok() -} - -/// knowledge that a peer has about goings-on in a relay parent. -#[derive(Default)] -struct PeerRelayParentKnowledge { - /// candidates that the peer is aware of because we sent statements to it. This indicates that we can - /// send other statements pertaining to that candidate. - sent_candidates: HashSet, - /// candidates that peer is aware of, because we received statements from it. - received_candidates: HashSet, - /// fingerprints of all statements a peer should be aware of: those that - /// were sent to the peer by us. - sent_statements: HashSet<(CompactStatement, ValidatorIndex)>, - /// fingerprints of all statements a peer should be aware of: those that - /// were sent to us by the peer. - received_statements: HashSet<(CompactStatement, ValidatorIndex)>, - /// How many candidates this peer is aware of for each given validator index. - seconded_counts: HashMap, - /// How many statements we've received for each candidate that we're aware of. - received_message_count: HashMap, - - /// How many large statements this peer already sent us. - /// - /// Flood protection for large statements is rather hard and as soon as we get - /// `https://github.com/paritytech/polkadot/issues/2979` implemented also no longer necessary. - /// Reason: We keep messages around until we fetched the payload, but if a node makes up - /// statements and never provides the data, we will keep it around for the slot duration. Not - /// even signature checking would help, as the sender, if a validator, can just sign arbitrary - /// invalid statements and will not face any consequences as long as it won't provide the - /// payload. - /// - /// Quick and temporary fix, only accept `MAX_LARGE_STATEMENTS_PER_SENDER` per connected node. - /// - /// Large statements should be rare, if they were not, we would run into problems anyways, as - /// we would not be able to distribute them in a timely manner. Therefore - /// `MAX_LARGE_STATEMENTS_PER_SENDER` can be set to a relatively small number. It is also not - /// per candidate hash, but in total as candidate hashes can be made up, as illustrated above. - /// - /// An attacker could still try to fill up our memory, by repeatedly disconnecting and - /// connecting again with new peer ids, but we assume that the resulting effective bandwidth - /// for such an attack would be too low. - large_statement_count: usize, - - /// We have seen a message that that is unexpected from this peer, so note this fact - /// and stop subsequent logging and peer reputation flood. - unexpected_count: usize, -} - -impl PeerRelayParentKnowledge { - /// Updates our view of the peer's knowledge with this statement's fingerprint based - /// on something that we would like to send to the peer. - /// - /// NOTE: assumes `self.can_send` returned true before this call. - /// - /// Once the knowledge has incorporated a statement, it cannot be incorporated again. - /// - /// This returns `true` if this is the first time the peer has become aware of a - /// candidate with the given hash. - fn send(&mut self, fingerprint: &(CompactStatement, ValidatorIndex)) -> bool { - debug_assert!( - self.can_send(fingerprint), - "send is only called after `can_send` returns true; qed", - ); - - let new_known = match fingerprint.0 { - CompactStatement::Seconded(ref h) => { - self.seconded_counts.entry(fingerprint.1).or_default().note_local(h.clone()); - - let was_known = self.is_known_candidate(h); - self.sent_candidates.insert(h.clone()); - !was_known - }, - CompactStatement::Valid(_) => false, - }; - - self.sent_statements.insert(fingerprint.clone()); - - new_known - } - - /// This returns `true` if the peer cannot accept this statement, without altering internal - /// state, `false` otherwise. - fn can_send(&self, fingerprint: &(CompactStatement, ValidatorIndex)) -> bool { - let already_known = self.sent_statements.contains(fingerprint) || - self.received_statements.contains(fingerprint); - - if already_known { - return false - } - - match fingerprint.0 { - CompactStatement::Valid(ref h) => { - // The peer can only accept Valid statements for which it is aware - // of the corresponding candidate. - self.is_known_candidate(h) - }, - CompactStatement::Seconded(_) => true, - } - } - - /// Attempt to update our view of the peer's knowledge with this statement's fingerprint based on - /// a message we are receiving from the peer. - /// - /// Provide the maximum message count that we can receive per candidate. In practice we should - /// not receive more statements for any one candidate than there are members in the group assigned - /// to that para, but this maximum needs to be lenient to account for equivocations that may be - /// cross-group. As such, a maximum of 2 * `n_validators` is recommended. - /// - /// This returns an error if the peer should not have sent us this message according to protocol - /// rules for flood protection. - /// - /// If this returns `Ok`, the internal state has been altered. After `receive`ing a new - /// candidate, we are then cleared to send the peer further statements about that candidate. - /// - /// This returns `Ok(true)` if this is the first time the peer has become aware of a - /// candidate with given hash. - fn receive( - &mut self, - fingerprint: &(CompactStatement, ValidatorIndex), - max_message_count: usize, - ) -> std::result::Result { - // We don't check `sent_statements` because a statement could be in-flight from both - // sides at the same time. - if self.received_statements.contains(fingerprint) { - return Err(COST_DUPLICATE_STATEMENT) - } - - let (candidate_hash, fresh) = match fingerprint.0 { - CompactStatement::Seconded(ref h) => { - let allowed_remote = self - .seconded_counts - .entry(fingerprint.1) - .or_insert_with(Default::default) - .note_remote(h.clone()); - - if !allowed_remote { - return Err(COST_UNEXPECTED_STATEMENT_REMOTE) - } - - (h, !self.is_known_candidate(h)) - }, - CompactStatement::Valid(ref h) => { - if !self.is_known_candidate(h) { - return Err(COST_UNEXPECTED_STATEMENT_UNKNOWN_CANDIDATE) - } - - (h, false) - }, - }; - - { - let received_per_candidate = - self.received_message_count.entry(*candidate_hash).or_insert(0); - - if *received_per_candidate >= max_message_count { - return Err(COST_APPARENT_FLOOD) - } - - *received_per_candidate += 1; - } - - self.received_statements.insert(fingerprint.clone()); - self.received_candidates.insert(candidate_hash.clone()); - Ok(fresh) - } - - /// Note a received large statement metadata. - fn receive_large_statement(&mut self) -> std::result::Result<(), Rep> { - if self.large_statement_count >= MAX_LARGE_STATEMENTS_PER_SENDER { - return Err(COST_APPARENT_FLOOD) - } - self.large_statement_count += 1; - Ok(()) - } - - /// This method does the same checks as `receive` without modifying the internal state. - /// Returns an error if the peer should not have sent us this message according to protocol - /// rules for flood protection. - fn check_can_receive( - &self, - fingerprint: &(CompactStatement, ValidatorIndex), - max_message_count: usize, - ) -> std::result::Result<(), Rep> { - // We don't check `sent_statements` because a statement could be in-flight from both - // sides at the same time. - if self.received_statements.contains(fingerprint) { - return Err(COST_DUPLICATE_STATEMENT) - } - - let candidate_hash = match fingerprint.0 { - CompactStatement::Seconded(ref h) => { - let allowed_remote = self - .seconded_counts - .get(&fingerprint.1) - .map_or(true, |r| r.is_wanted_candidate(h)); - - if !allowed_remote { - return Err(COST_UNEXPECTED_STATEMENT_REMOTE) - } - - h - }, - CompactStatement::Valid(ref h) => { - if !self.is_known_candidate(&h) { - return Err(COST_UNEXPECTED_STATEMENT_UNKNOWN_CANDIDATE) - } - - h - }, - }; - - let received_per_candidate = self.received_message_count.get(candidate_hash).unwrap_or(&0); - - if *received_per_candidate >= max_message_count { - Err(COST_APPARENT_FLOOD) - } else { - Ok(()) - } - } - - /// Check for candidates that the peer is aware of. This indicates that we can - /// send other statements pertaining to that candidate. - fn is_known_candidate(&self, candidate: &CandidateHash) -> bool { - self.sent_candidates.contains(candidate) || self.received_candidates.contains(candidate) - } -} - -struct PeerData { - view: View, - view_knowledge: HashMap, - /// Peer might be known as authority with the given ids. - maybe_authority: Option>, -} - -impl PeerData { - /// Updates our view of the peer's knowledge with this statement's fingerprint based - /// on something that we would like to send to the peer. - /// - /// NOTE: assumes `self.can_send` returned true before this call. - /// - /// Once the knowledge has incorporated a statement, it cannot be incorporated again. - /// - /// This returns `true` if this is the first time the peer has become aware of a - /// candidate with the given hash. - fn send( - &mut self, - relay_parent: &Hash, - fingerprint: &(CompactStatement, ValidatorIndex), - ) -> bool { - debug_assert!( - self.can_send(relay_parent, fingerprint), - "send is only called after `can_send` returns true; qed", - ); - self.view_knowledge - .get_mut(relay_parent) - .expect("send is only called after `can_send` returns true; qed") - .send(fingerprint) - } - - /// This returns `None` if the peer cannot accept this statement, without altering internal - /// state. - fn can_send( - &self, - relay_parent: &Hash, - fingerprint: &(CompactStatement, ValidatorIndex), - ) -> bool { - self.view_knowledge.get(relay_parent).map_or(false, |k| k.can_send(fingerprint)) - } - - /// Attempt to update our view of the peer's knowledge with this statement's fingerprint based on - /// a message we are receiving from the peer. - /// - /// Provide the maximum message count that we can receive per candidate. In practice we should - /// not receive more statements for any one candidate than there are members in the group assigned - /// to that para, but this maximum needs to be lenient to account for equivocations that may be - /// cross-group. As such, a maximum of 2 * `n_validators` is recommended. - /// - /// This returns an error if the peer should not have sent us this message according to protocol - /// rules for flood protection. - /// - /// If this returns `Ok`, the internal state has been altered. After `receive`ing a new - /// candidate, we are then cleared to send the peer further statements about that candidate. - /// - /// This returns `Ok(true)` if this is the first time the peer has become aware of a - /// candidate with given hash. - fn receive( - &mut self, - relay_parent: &Hash, - fingerprint: &(CompactStatement, ValidatorIndex), - max_message_count: usize, - ) -> std::result::Result { - self.view_knowledge - .get_mut(relay_parent) - .ok_or(COST_UNEXPECTED_STATEMENT_MISSING_KNOWLEDGE)? - .receive(fingerprint, max_message_count) - } - - /// This method does the same checks as `receive` without modifying the internal state. - /// Returns an error if the peer should not have sent us this message according to protocol - /// rules for flood protection. - fn check_can_receive( - &self, - relay_parent: &Hash, - fingerprint: &(CompactStatement, ValidatorIndex), - max_message_count: usize, - ) -> std::result::Result<(), Rep> { - self.view_knowledge - .get(relay_parent) - .ok_or(COST_UNEXPECTED_STATEMENT_MISSING_KNOWLEDGE)? - .check_can_receive(fingerprint, max_message_count) - } - - /// Receive a notice about out of view statement and returns the value of the old flag - fn receive_unexpected(&mut self, relay_parent: &Hash) -> usize { - self.view_knowledge - .get_mut(relay_parent) - .map_or(0_usize, |relay_parent_peer_knowledge| { - let old = relay_parent_peer_knowledge.unexpected_count; - relay_parent_peer_knowledge.unexpected_count += 1_usize; - old - }) - } - - /// Basic flood protection for large statements. - fn receive_large_statement(&mut self, relay_parent: &Hash) -> std::result::Result<(), Rep> { - self.view_knowledge - .get_mut(relay_parent) - .ok_or(COST_UNEXPECTED_STATEMENT_MISSING_KNOWLEDGE)? - .receive_large_statement() - } -} - -// A statement stored while a relay chain head is active. -#[derive(Debug, Copy, Clone)] -struct StoredStatement<'a> { - comparator: &'a StoredStatementComparator, - statement: &'a SignedFullStatement, -} - -// A value used for comparison of stored statements to each other. -// -// The compact version of the statement, the validator index, and the signature of the validator -// is enough to differentiate between all types of equivocations, as long as the signature is -// actually checked to be valid. The same statement with 2 signatures and 2 statements with -// different (or same) signatures wll all be correctly judged to be unequal with this comparator. -#[derive(PartialEq, Eq, Hash, Clone, Debug)] -struct StoredStatementComparator { - compact: CompactStatement, - validator_index: ValidatorIndex, - signature: ValidatorSignature, -} - -impl<'a> From<(&'a StoredStatementComparator, &'a SignedFullStatement)> for StoredStatement<'a> { - fn from( - (comparator, statement): (&'a StoredStatementComparator, &'a SignedFullStatement), - ) -> Self { - Self { comparator, statement } - } -} - -impl<'a> StoredStatement<'a> { - fn compact(&self) -> &'a CompactStatement { - &self.comparator.compact - } - - fn fingerprint(&self) -> (CompactStatement, ValidatorIndex) { - (self.comparator.compact.clone(), self.statement.validator_index()) - } -} - -#[derive(Debug)] -enum NotedStatement<'a> { - NotUseful, - Fresh(StoredStatement<'a>), - UsefulButKnown, -} - -/// Large statement fetching status. -enum LargeStatementStatus { - /// We are currently fetching the statement data from a remote peer. We keep a list of other nodes - /// claiming to have that data and will fallback on them. - Fetching(FetchingInfo), - /// Statement data is fetched or we got it locally via `StatementDistributionMessage::Share`. - FetchedOrShared(CommittedCandidateReceipt), -} - -/// Info about a fetch in progress. -struct FetchingInfo { - /// All peers that send us a `LargeStatement` or a `Valid` statement for the given - /// `CandidateHash`, together with their originally sent messages. - /// - /// We use an `IndexMap` here to preserve the ordering of peers sending us messages. This is - /// desirable because we reward first sending peers with reputation. - available_peers: IndexMap>, - /// Peers left to try in case the background task needs it. - peers_to_try: Vec, - /// Sender for sending fresh peers to the fetching task in case of failure. - peer_sender: Option>>, - /// Task taking care of the request. - /// - /// Will be killed once dropped. - #[allow(dead_code)] - fetching_task: RemoteHandle<()>, -} - /// Messages to be handled in this subsystem. enum MuxedMessage { /// Messages from other subsystems. Subsystem(FatalResult>), - /// Messages from spawned requester background tasks. - Requester(Option), - /// Messages from spawned responder background task. - Responder(Option), + /// Messages from spawned v1 (legacy) requester background tasks. + V1Requester(Option), + /// Messages from spawned v1 (legacy) responder background task. + V1Responder(Option), } #[overseer::contextbounds(StatementDistribution, prefix = self::overseer)] impl MuxedMessage { async fn receive( ctx: &mut Context, - from_requester: &mut mpsc::Receiver, - from_responder: &mut mpsc::Receiver, + from_v1_requester: &mut mpsc::Receiver, + from_v1_responder: &mut mpsc::Receiver, ) -> MuxedMessage { // We are only fusing here to make `select` happy, in reality we will quit if one of those // streams end: - let from_overseer = ctx.recv().fuse(); - let from_requester = from_requester.next(); - let from_responder = from_responder.next(); - futures::pin_mut!(from_overseer, from_requester, from_responder); + let from_orchestra = ctx.recv().fuse(); + let from_v1_requester = from_v1_requester.next(); + let from_v1_responder = from_v1_responder.next(); + futures::pin_mut!(from_orchestra, from_v1_requester, from_v1_responder); futures::select! { - msg = from_overseer => MuxedMessage::Subsystem(msg.map_err(FatalError::SubsystemReceive)), - msg = from_requester => MuxedMessage::Requester(msg), - msg = from_responder => MuxedMessage::Responder(msg), - } - } -} - -#[derive(Debug, PartialEq, Eq)] -enum DeniedStatement { - NotUseful, - UsefulButKnown, -} - -struct ActiveHeadData { - /// All candidates we are aware of for this head, keyed by hash. - candidates: HashSet, - /// Persisted validation data cache. - cached_validation_data: HashMap, - /// Stored statements for circulation to peers. - /// - /// These are iterable in insertion order, and `Seconded` statements are always - /// accepted before dependent statements. - statements: IndexMap, - /// Large statements we are waiting for with associated meta data. - waiting_large_statements: HashMap, - /// The parachain validators at the head's child session index. - validators: Vec, - /// The current session index of this fork. - session_index: sp_staking::SessionIndex, - /// How many `Seconded` statements we've seen per validator. - seconded_counts: HashMap, - /// A Jaeger span for this head, so we can attach data to it. - span: PerLeafSpan, -} - -impl ActiveHeadData { - fn new( - validators: Vec, - session_index: sp_staking::SessionIndex, - span: PerLeafSpan, - ) -> Self { - ActiveHeadData { - candidates: Default::default(), - cached_validation_data: Default::default(), - statements: Default::default(), - waiting_large_statements: Default::default(), - validators, - session_index, - seconded_counts: Default::default(), - span, - } - } - - async fn fetch_persisted_validation_data( - &mut self, - sender: &mut Sender, - relay_parent: Hash, - para_id: ParaId, - ) -> Result> - where - Sender: StatementDistributionSenderTrait, - { - if let Entry::Vacant(entry) = self.cached_validation_data.entry(para_id) { - let persisted_validation_data = - polkadot_node_subsystem_util::request_persisted_validation_data( - relay_parent, - para_id, - OccupiedCoreAssumption::Free, - sender, - ) - .await - .await - .map_err(Error::RuntimeApiUnavailable)? - .map_err(|err| Error::FetchPersistedValidationData(para_id, err))?; - - match persisted_validation_data { - Some(pvd) => entry.insert(pvd), - None => return Ok(None), - }; - } - - Ok(self.cached_validation_data.get(¶_id)) - } - - /// Note the given statement. - /// - /// If it was not already known and can be accepted, returns `NotedStatement::Fresh`, - /// with a handle to the statement. - /// - /// If it can be accepted, but we already know it, returns `NotedStatement::UsefulButKnown`. - /// - /// We accept up to `VC_THRESHOLD` (2 at time of writing) `Seconded` statements - /// per validator. These will be the first ones we see. The statement is assumed - /// to have been checked, including that the validator index is not out-of-bounds and - /// the signature is valid. - /// - /// Any other statements or those that reference a candidate we are not aware of cannot be accepted - /// and will return `NotedStatement::NotUseful`. - fn note_statement(&mut self, statement: SignedFullStatement) -> NotedStatement { - let validator_index = statement.validator_index(); - let comparator = StoredStatementComparator { - compact: statement.payload().to_compact(), - validator_index, - signature: statement.signature().clone(), - }; - - match comparator.compact { - CompactStatement::Seconded(h) => { - let seconded_so_far = self.seconded_counts.entry(validator_index).or_insert(0); - if *seconded_so_far >= VC_THRESHOLD { - gum::trace!( - target: LOG_TARGET, - ?validator_index, - ?statement, - "Extra statement is ignored" - ); - return NotedStatement::NotUseful - } - - self.candidates.insert(h); - if let Some(old) = self.statements.insert(comparator.clone(), statement) { - gum::trace!( - target: LOG_TARGET, - ?validator_index, - statement = ?old, - "Known statement" - ); - NotedStatement::UsefulButKnown - } else { - *seconded_so_far += 1; - - gum::trace!( - target: LOG_TARGET, - ?validator_index, - statement = ?self.statements.last().expect("Just inserted").1, - "Noted new statement" - ); - // This will always return `Some` because it was just inserted. - let key_value = self - .statements - .get_key_value(&comparator) - .expect("Statement was just inserted; qed"); - - NotedStatement::Fresh(key_value.into()) - } - }, - CompactStatement::Valid(h) => { - if !self.candidates.contains(&h) { - gum::trace!( - target: LOG_TARGET, - ?validator_index, - ?statement, - "Statement for unknown candidate" - ); - return NotedStatement::NotUseful - } - - if let Some(old) = self.statements.insert(comparator.clone(), statement) { - gum::trace!( - target: LOG_TARGET, - ?validator_index, - statement = ?old, - "Known statement" - ); - NotedStatement::UsefulButKnown - } else { - gum::trace!( - target: LOG_TARGET, - ?validator_index, - statement = ?self.statements.last().expect("Just inserted").1, - "Noted new statement" - ); - // This will always return `Some` because it was just inserted. - NotedStatement::Fresh( - self.statements - .get_key_value(&comparator) - .expect("Statement was just inserted; qed") - .into(), - ) - } - }, - } - } - - /// Returns an error if the statement is already known or not useful - /// without modifying the internal state. - fn check_useful_or_unknown( - &self, - statement: &UncheckedSignedStatement, - ) -> std::result::Result<(), DeniedStatement> { - let validator_index = statement.unchecked_validator_index(); - let compact = statement.unchecked_payload(); - let comparator = StoredStatementComparator { - compact: compact.clone(), - validator_index, - signature: statement.unchecked_signature().clone(), - }; - - match compact { - CompactStatement::Seconded(_) => { - let seconded_so_far = self.seconded_counts.get(&validator_index).unwrap_or(&0); - if *seconded_so_far >= VC_THRESHOLD { - gum::trace!( - target: LOG_TARGET, - ?validator_index, - ?statement, - "Extra statement is ignored", - ); - return Err(DeniedStatement::NotUseful) - } - - if self.statements.contains_key(&comparator) { - gum::trace!( - target: LOG_TARGET, - ?validator_index, - ?statement, - "Known statement", - ); - return Err(DeniedStatement::UsefulButKnown) - } - }, - CompactStatement::Valid(h) => { - if !self.candidates.contains(&h) { - gum::trace!( - target: LOG_TARGET, - ?validator_index, - ?statement, - "Statement for unknown candidate", - ); - return Err(DeniedStatement::NotUseful) - } - - if self.statements.contains_key(&comparator) { - gum::trace!( - target: LOG_TARGET, - ?validator_index, - ?statement, - "Known statement", - ); - return Err(DeniedStatement::UsefulButKnown) - } - }, - } - Ok(()) - } - - /// Get an iterator over all statements for the active head. Seconded statements come first. - fn statements(&self) -> impl Iterator> + '_ { - self.statements.iter().map(Into::into) - } - - /// Get an iterator over all statements for the active head that are for a particular candidate. - fn statements_about( - &self, - candidate_hash: CandidateHash, - ) -> impl Iterator> + '_ { - self.statements() - .filter(move |s| s.compact().candidate_hash() == &candidate_hash) - } -} - -/// Check a statement signature under this parent hash. -fn check_statement_signature( - head: &ActiveHeadData, - relay_parent: Hash, - statement: UncheckedSignedStatement, -) -> std::result::Result { - let signing_context = - SigningContext { session_index: head.session_index, parent_hash: relay_parent }; - - head.validators - .get(statement.unchecked_validator_index().0 as usize) - .ok_or_else(|| statement.clone()) - .and_then(|v| statement.try_into_checked(&signing_context, v)) -} - -/// Places the statement in storage if it is new, and then -/// circulates the statement to all peers who have not seen it yet, and -/// sends all statements dependent on that statement to peers who could previously not receive -/// them but now can. -#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn circulate_statement_and_dependents( - topology_store: &SessionBoundGridTopologyStorage, - peers: &mut HashMap, - active_heads: &mut HashMap, - ctx: &mut Context, - relay_parent: Hash, - statement: SignedFullStatement, - priority_peers: Vec, - metrics: &Metrics, - rng: &mut impl rand::Rng, -) { - let active_head = match active_heads.get_mut(&relay_parent) { - Some(res) => res, - None => return, - }; - - let _span = active_head - .span - .child("circulate-statement") - .with_candidate(statement.payload().candidate_hash()) - .with_stage(jaeger::Stage::StatementDistribution); - - let topology = topology_store.get_topology_or_fallback(active_head.session_index); - // First circulate the statement directly to all peers needing it. - // The borrow of `active_head` needs to encompass only this (Rust) statement. - let outputs: Option<(CandidateHash, Vec)> = { - match active_head.note_statement(statement) { - NotedStatement::Fresh(stored) => Some(( - *stored.compact().candidate_hash(), - circulate_statement( - RequiredRouting::GridXY, - topology, - peers, - ctx, - relay_parent, - stored, - priority_peers, - metrics, - rng, - ) - .await, - )), - _ => None, - } - }; - - let _span = _span.child("send-to-peers"); - // Now send dependent statements to all peers needing them, if any. - if let Some((candidate_hash, peers_needing_dependents)) = outputs { - for peer in peers_needing_dependents { - if let Some(peer_data) = peers.get_mut(&peer) { - let _span_loop = _span.child("to-peer").with_peer_id(&peer); - // defensive: the peer data should always be some because the iterator - // of peers is derived from the set of peers. - send_statements_about( - peer, - peer_data, - ctx, - relay_parent, - candidate_hash, - &*active_head, - metrics, - ) - .await; - } - } - } -} - -/// Create a network message from a given statement. -fn statement_message( - relay_parent: Hash, - statement: SignedFullStatement, - metrics: &Metrics, -) -> net_protocol::VersionedValidationProtocol { - let (is_large, size) = is_statement_large(&statement); - if let Some(size) = size { - metrics.on_created_message(size); - } - - let msg = if is_large { - protocol_v1::StatementDistributionMessage::LargeStatement(StatementMetadata { - relay_parent, - candidate_hash: statement.payload().candidate_hash(), - signed_by: statement.validator_index(), - signature: statement.signature().clone(), - }) - } else { - protocol_v1::StatementDistributionMessage::Statement(relay_parent, statement.into()) - }; - - protocol_v1::ValidationProtocol::StatementDistribution(msg).into() -} - -/// Check whether a statement should be treated as large statement. -/// -/// Also report size of statement - if it is a `Seconded` statement, otherwise `None`. -fn is_statement_large(statement: &SignedFullStatement) -> (bool, Option) { - match &statement.payload() { - Statement::Seconded(committed) => { - let size = statement.as_unchecked().encoded_size(); - // Runtime upgrades will always be large and even if not - no harm done. - if committed.commitments.new_validation_code.is_some() { - return (true, Some(size)) - } - - // Half max size seems to be a good threshold to start not using notifications: - let threshold = - PeerSet::Validation.get_max_notification_size(IsAuthority::Yes) as usize / 2; - - (size >= threshold, Some(size)) - }, - Statement::Valid(_) => (false, None), - } -} - -/// Circulates a statement to all peers who have not seen it yet, and returns -/// an iterator over peers who need to have dependent statements sent. -#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn circulate_statement<'a, Context>( - required_routing: RequiredRouting, - topology: &SessionGridTopology, - peers: &mut HashMap, - ctx: &mut Context, - relay_parent: Hash, - stored: StoredStatement<'a>, - mut priority_peers: Vec, - metrics: &Metrics, - rng: &mut impl rand::Rng, -) -> Vec { - let fingerprint = stored.fingerprint(); - - let mut peers_to_send: Vec = peers - .iter() - .filter_map(|(peer, data)| { - if data.can_send(&relay_parent, &fingerprint) { - Some(peer.clone()) - } else { - None - } - }) - .collect(); - - let good_peers: HashSet<&PeerId> = peers_to_send.iter().collect(); - // Only take priority peers we can send data to: - priority_peers.retain(|p| good_peers.contains(p)); - - // Avoid duplicates: - let priority_set: HashSet<&PeerId> = priority_peers.iter().collect(); - peers_to_send.retain(|p| !priority_set.contains(p)); - - util::choose_random_subset_with_rng( - |e| topology.route_to_peer(required_routing, e), - &mut peers_to_send, - rng, - MIN_GOSSIP_PEERS, - ); - // We don't want to use less peers, than we would without any priority peers: - let min_size = std::cmp::max(peers_to_send.len(), MIN_GOSSIP_PEERS); - // Make set full: - let needed_peers = min_size as i64 - priority_peers.len() as i64; - if needed_peers > 0 { - peers_to_send.truncate(needed_peers as usize); - // Order important here - priority peers are placed first, so will be sent first. - // This gives backers a chance to be among the first in requesting any large statement - // data. - priority_peers.append(&mut peers_to_send); - } - peers_to_send = priority_peers; - // We must not have duplicates: - debug_assert!( - peers_to_send.len() == peers_to_send.clone().into_iter().collect::>().len(), - "We filter out duplicates above. qed.", - ); - let peers_to_send: Vec<(PeerId, bool)> = peers_to_send - .into_iter() - .map(|peer_id| { - let new = peers - .get_mut(&peer_id) - .expect("a subset is taken above, so it exists; qed") - .send(&relay_parent, &fingerprint); - (peer_id, new) - }) - .collect(); - - // Send all these peers the initial statement. - if !peers_to_send.is_empty() { - let payload = statement_message(relay_parent, stored.statement.clone(), metrics); - gum::trace!( - target: LOG_TARGET, - ?peers_to_send, - ?relay_parent, - statement = ?stored.statement, - "Sending statement", - ); - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - peers_to_send.iter().map(|(p, _)| p.clone()).collect(), - payload, - )) - .await; - } - - peers_to_send - .into_iter() - .filter_map(|(peer, needs_dependent)| if needs_dependent { Some(peer) } else { None }) - .collect() -} - -/// Send all statements about a given candidate hash to a peer. -#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn send_statements_about( - peer: PeerId, - peer_data: &mut PeerData, - ctx: &mut Context, - relay_parent: Hash, - candidate_hash: CandidateHash, - active_head: &ActiveHeadData, - metrics: &Metrics, -) { - for statement in active_head.statements_about(candidate_hash) { - let fingerprint = statement.fingerprint(); - if !peer_data.can_send(&relay_parent, &fingerprint) { - continue - } - peer_data.send(&relay_parent, &fingerprint); - let payload = statement_message(relay_parent, statement.statement.clone(), metrics); - - gum::trace!( - target: LOG_TARGET, - ?peer, - ?relay_parent, - ?candidate_hash, - statement = ?statement.statement, - "Sending statement", - ); - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - vec![peer.clone()], - payload, - )) - .await; - - metrics.on_statement_distributed(); - } -} - -/// Send all statements at a given relay-parent to a peer. -#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn send_statements( - peer: PeerId, - peer_data: &mut PeerData, - ctx: &mut Context, - relay_parent: Hash, - active_head: &ActiveHeadData, - metrics: &Metrics, -) { - for statement in active_head.statements() { - let fingerprint = statement.fingerprint(); - if !peer_data.can_send(&relay_parent, &fingerprint) { - continue - } - peer_data.send(&relay_parent, &fingerprint); - let payload = statement_message(relay_parent, statement.statement.clone(), metrics); - - gum::trace!( - target: LOG_TARGET, - ?peer, - ?relay_parent, - statement = ?statement.statement, - "Sending statement" - ); - ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - vec![peer.clone()], - payload, - )) - .await; - - metrics.on_statement_distributed(); - } -} - -async fn report_peer( - sender: &mut impl overseer::StatementDistributionSenderTrait, - peer: PeerId, - rep: Rep, -) { - sender.send_message(NetworkBridgeTxMessage::ReportPeer(peer, rep)).await -} - -/// If message contains a statement, then retrieve it, otherwise fork task to fetch it. -/// -/// This function will also return `None` if the message did not pass some basic checks, in that -/// case no statement will be requested, on the flipside you get `ActiveHeadData` in addition to -/// your statement. -/// -/// If the message was large, but the result has been fetched already that one is returned. -#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn retrieve_statement_from_message<'a, Context>( - peer: PeerId, - message: protocol_v1::StatementDistributionMessage, - active_head: &'a mut ActiveHeadData, - ctx: &mut Context, - req_sender: &mpsc::Sender, - metrics: &Metrics, -) -> Option { - let fingerprint = message.get_fingerprint(); - let candidate_hash = *fingerprint.0.candidate_hash(); - - // Immediately return any Seconded statement: - let message = if let protocol_v1::StatementDistributionMessage::Statement(h, s) = message { - if let Statement::Seconded(_) = s.unchecked_payload() { - return Some(s) - } - protocol_v1::StatementDistributionMessage::Statement(h, s) - } else { - message - }; - - match active_head.waiting_large_statements.entry(candidate_hash) { - Entry::Occupied(mut occupied) => { - match occupied.get_mut() { - LargeStatementStatus::Fetching(info) => { - let is_large_statement = message.is_large_statement(); - - let is_new_peer = match info.available_peers.entry(peer) { - IEntry::Occupied(mut occupied) => { - occupied.get_mut().push(Versioned::V1(message)); - false - }, - IEntry::Vacant(vacant) => { - vacant.insert(vec![Versioned::V1(message)]); - true - }, - }; - - if is_new_peer & is_large_statement { - info.peers_to_try.push(peer); - // Answer any pending request for more peers: - if let Some(sender) = info.peer_sender.take() { - let to_send = std::mem::take(&mut info.peers_to_try); - if let Err(peers) = sender.send(to_send) { - // Requester no longer interested for now, might want them - // later: - info.peers_to_try = peers; - } - } - } - }, - LargeStatementStatus::FetchedOrShared(committed) => { - match message { - protocol_v1::StatementDistributionMessage::Statement(_, s) => { - // We can now immediately return any statements (should only be - // `Statement::Valid` ones, but we don't care at this point.) - return Some(s) - }, - protocol_v1::StatementDistributionMessage::LargeStatement(metadata) => - return Some(UncheckedSignedFullStatement::new( - Statement::Seconded(committed.clone()), - metadata.signed_by, - metadata.signature.clone(), - )), - } - }, - } - }, - Entry::Vacant(vacant) => { - match message { - protocol_v1::StatementDistributionMessage::LargeStatement(metadata) => { - if let Some(new_status) = - launch_request(metadata, peer, req_sender.clone(), ctx, metrics).await - { - vacant.insert(new_status); - } - }, - protocol_v1::StatementDistributionMessage::Statement(_, s) => { - // No fetch in progress, safe to return any statement immediately (we don't bother - // about normal network jitter which might cause `Valid` statements to arrive early - // for now.). - return Some(s) - }, - } - }, - } - None -} - -/// Launch request for a large statement and get tracking status. -/// -/// Returns `None` if spawning task failed. -#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn launch_request( - meta: StatementMetadata, - peer: PeerId, - req_sender: mpsc::Sender, - ctx: &mut Context, - metrics: &Metrics, -) -> Option { - let (task, handle) = - fetch(meta.relay_parent, meta.candidate_hash, vec![peer], req_sender, metrics.clone()) - .remote_handle(); - - let result = ctx.spawn("large-statement-fetcher", task.boxed()); - if let Err(err) = result { - gum::error!(target: LOG_TARGET, ?err, "Spawning task failed."); - return None - } - let available_peers = { - let mut m = IndexMap::new(); - m.insert( - peer, - vec![Versioned::V1(protocol_v1::StatementDistributionMessage::LargeStatement(meta))], - ); - m - }; - Some(LargeStatementStatus::Fetching(FetchingInfo { - available_peers, - peers_to_try: Vec::new(), - peer_sender: None, - fetching_task: handle, - })) -} - -/// Handle incoming message and circulate it to peers, if we did not know it already. -#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn handle_incoming_message_and_circulate<'a, Context, R>( - peer: PeerId, - topology_storage: &SessionBoundGridTopologyStorage, - peers: &mut HashMap, - active_heads: &'a mut HashMap, - recent_outdated_heads: &RecentOutdatedHeads, - ctx: &mut Context, - message: net_protocol::StatementDistributionMessage, - req_sender: &mpsc::Sender, - metrics: &Metrics, - runtime: &mut RuntimeInfo, - rng: &mut R, -) where - R: rand::Rng, -{ - let handled_incoming = match peers.get_mut(&peer) { - Some(data) => - handle_incoming_message( - peer, - data, - active_heads, - recent_outdated_heads, - ctx, - message, - req_sender, - metrics, - ) - .await, - None => None, - }; - - // if we got a fresh message, we need to circulate it to all peers. - if let Some((relay_parent, statement)) = handled_incoming { - // we can ignore the set of peers who this function returns as now expecting - // dependent statements. - // - // we have the invariant in this subsystem that we never store a `Valid` or `Invalid` - // statement before a `Seconded` statement. `Seconded` statements are the only ones - // that require dependents. Thus, if this is a `Seconded` statement for a candidate we - // were not aware of before, we cannot have any dependent statements from the candidate. - let _ = metrics.time_network_bridge_update("circulate_statement"); - - let session_index = runtime.get_session_index_for_child(ctx.sender(), relay_parent).await; - let topology = match session_index { - Ok(session_index) => topology_storage.get_topology_or_fallback(session_index), - Err(e) => { - gum::debug!( - target: LOG_TARGET, - %relay_parent, - "cannot get session index for the specific relay parent: {:?}", - e - ); - - topology_storage.get_current_topology() - }, - }; - let required_routing = - topology.required_routing_by_index(statement.statement.validator_index(), false); - - let _ = circulate_statement( - required_routing, - topology, - peers, - ctx, - relay_parent, - statement, - Vec::new(), - metrics, - rng, - ) - .await; - } -} - -// Handle a statement. Returns a reference to a newly-stored statement -// if we were not already aware of it, along with the corresponding relay-parent. -// -// This function checks the signature and ensures the statement is compatible with our -// view. It also notifies candidate backing if the statement was previously unknown. -#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn handle_incoming_message<'a, Context>( - peer: PeerId, - peer_data: &mut PeerData, - active_heads: &'a mut HashMap, - recent_outdated_heads: &RecentOutdatedHeads, - ctx: &mut Context, - message: net_protocol::StatementDistributionMessage, - req_sender: &mpsc::Sender, - metrics: &Metrics, -) -> Option<(Hash, StoredStatement<'a>)> { - let _ = metrics.time_network_bridge_update("handle_incoming_message"); - - // TODO [now] handle vstaging messages - let message = match message { - Versioned::V1(m) => m, - Versioned::VStaging(_) => unimplemented!(), - }; - - let relay_parent = message.get_relay_parent(); - - let active_head = match active_heads.get_mut(&relay_parent) { - Some(h) => h, - None => { - gum::debug!( - target: LOG_TARGET, - %relay_parent, - "our view out-of-sync with active heads; head not found", - ); - - if !recent_outdated_heads.is_recent_outdated(&relay_parent) { - report_peer(ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; - } - - return None - }, - }; - - if let protocol_v1::StatementDistributionMessage::LargeStatement(_) = message { - if let Err(rep) = peer_data.receive_large_statement(&relay_parent) { - gum::debug!(target: LOG_TARGET, ?peer, ?message, ?rep, "Unexpected large statement.",); - report_peer(ctx.sender(), peer, rep).await; - return None - } - } - - let fingerprint = message.get_fingerprint(); - let candidate_hash = fingerprint.0.candidate_hash().clone(); - let handle_incoming_span = active_head - .span - .child("handle-incoming") - .with_candidate(candidate_hash) - .with_peer_id(&peer); - - let max_message_count = active_head.validators.len() * 2; - - // perform only basic checks before verifying the signature - // as it's more computationally heavy - if let Err(rep) = peer_data.check_can_receive(&relay_parent, &fingerprint, max_message_count) { - // This situation can happen when a peer's Seconded message was lost - // but we have received the Valid statement. - // So we check it once and then ignore repeated violation to avoid - // reputation change flood. - let unexpected_count = peer_data.receive_unexpected(&relay_parent); - - gum::debug!( - target: LOG_TARGET, - ?relay_parent, - ?peer, - ?message, - ?rep, - ?unexpected_count, - "Error inserting received statement" - ); - - match rep { - // This happens when a Valid statement has been received but there is no corresponding Seconded - COST_UNEXPECTED_STATEMENT_UNKNOWN_CANDIDATE => { - metrics.on_unexpected_statement_valid(); - // Report peer merely if this is not a duplicate out-of-view statement that - // was caused by a missing Seconded statement from this peer - if unexpected_count == 0_usize { - report_peer(ctx.sender(), peer, rep).await; - } - }, - // This happens when we have an unexpected remote peer that announced Seconded - COST_UNEXPECTED_STATEMENT_REMOTE => { - metrics.on_unexpected_statement_seconded(); - report_peer(ctx.sender(), peer, rep).await; - }, - _ => { - report_peer(ctx.sender(), peer, rep).await; - }, - } - - return None - } - - let checked_compact = { - let (compact, validator_index) = message.get_fingerprint(); - let signature = message.get_signature(); - - let unchecked_compact = UncheckedSignedStatement::new(compact, validator_index, signature); - - match active_head.check_useful_or_unknown(&unchecked_compact) { - Ok(()) => {}, - Err(DeniedStatement::NotUseful) => return None, - Err(DeniedStatement::UsefulButKnown) => { - // Note a received statement in the peer data - peer_data - .receive(&relay_parent, &fingerprint, max_message_count) - .expect("checked in `check_can_receive` above; qed"); - report_peer(ctx.sender(), peer, BENEFIT_VALID_STATEMENT).await; - - return None - }, - } - - // check the signature on the statement. - match check_statement_signature(&active_head, relay_parent, unchecked_compact) { - Err(statement) => { - gum::debug!(target: LOG_TARGET, ?peer, ?statement, "Invalid statement signature"); - report_peer(ctx.sender(), peer, COST_INVALID_SIGNATURE).await; - return None - }, - Ok(statement) => statement, - } - }; - - // Fetch from the network only after signature and usefulness checks are completed. - let is_large_statement = message.is_large_statement(); - let statement = - retrieve_statement_from_message(peer, message, active_head, ctx, req_sender, metrics) - .await?; - - let payload = statement.unchecked_into_payload(); - - // Upgrade the `Signed` wrapper from the compact payload to the full payload. - // This fails if the payload doesn't encode correctly. - let statement: SignedFullStatement = match checked_compact.convert_to_superpayload(payload) { - Err((compact, _)) => { - gum::debug!( - target: LOG_TARGET, - ?peer, - ?compact, - is_large_statement, - "Full statement had bad payload." - ); - report_peer(ctx.sender(), peer, COST_WRONG_HASH).await; - return None - }, - Ok(statement) => statement, - }; - - // Ensure the statement is stored in the peer data. - // - // Note that if the peer is sending us something that is not within their view, - // it will not be kept within their log. - match peer_data.receive(&relay_parent, &fingerprint, max_message_count) { - Err(_) => { - unreachable!("checked in `check_can_receive` above; qed"); - }, - Ok(true) => { - gum::trace!(target: LOG_TARGET, ?peer, ?statement, "Statement accepted"); - // Send the peer all statements concerning the candidate that we have, - // since it appears to have just learned about the candidate. - send_statements_about( - peer.clone(), - peer_data, - ctx, - relay_parent, - candidate_hash, - &*active_head, - metrics, - ) - .await; - }, - Ok(false) => {}, - } - - // TODO [https://github.com/paritytech/polkadot/issues/5055] - // - // For `Seconded` statements `None` or `Err` means we couldn't fetch the PVD, which - // means the statement shouldn't be accepted. - // - // In case of `Valid` we should have it cached prior, therefore this performs - // no Runtime API calls and always returns `Ok(Some(_))`. - if let Statement::Seconded(receipt) = statement.payload() { - let para_id = receipt.descriptor.para_id; - // Either call the Runtime API or check that validation data is cached. - let result = active_head - .fetch_persisted_validation_data(ctx.sender(), relay_parent, para_id) - .await; - if !matches!(result, Ok(Some(_))) { - return None + msg = from_orchestra => MuxedMessage::Subsystem(msg.map_err(FatalError::SubsystemReceive)), + msg = from_v1_requester => MuxedMessage::V1Requester(msg), + msg = from_v1_responder => MuxedMessage::V1Responder(msg), } } - - // Extend the payload with persisted validation data required by the backing - // subsystem. - // - // Do it in advance before noting the statement because we don't want to borrow active - // head mutable and use the cache. - let statement_with_pvd = statement - .clone() - .convert_to_superpayload_with(|statement| match statement { - Statement::Seconded(receipt) => { - let para_id = &receipt.descriptor.para_id; - let persisted_validation_data = active_head - .cached_validation_data - .get(para_id) - .cloned() - .expect("pvd is ensured to be cached above; qed"); - StatementWithPVD::Seconded(receipt, persisted_validation_data) - }, - Statement::Valid(candidate_hash) => StatementWithPVD::Valid(candidate_hash), - }) - .expect("payload was checked with conversion from compact; qed"); - - // Note: `peer_data.receive` already ensures that the statement is not an unbounded equivocation - // or unpinned to a seconded candidate. So it is safe to place it into the storage. - match active_head.note_statement(statement) { - NotedStatement::NotUseful | NotedStatement::UsefulButKnown => { - unreachable!("checked in `is_useful_or_unknown` above; qed"); - }, - NotedStatement::Fresh(statement) => { - report_peer(ctx.sender(), peer, BENEFIT_VALID_STATEMENT_FIRST).await; - - let mut _span = handle_incoming_span.child("notify-backing"); - - // When we receive a new message from a peer, we forward it to the - // candidate backing subsystem. - ctx.send_message(CandidateBackingMessage::Statement( - relay_parent, - unimplemented!(), // TODO [now]: fixme - )) - .await; - - Some((relay_parent, statement)) - }, - } -} - -/// Update a peer's view. Sends all newly unlocked statements based on the previous -#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn update_peer_view_and_maybe_send_unlocked( - peer: PeerId, - topology: &SessionGridTopology, - peer_data: &mut PeerData, - ctx: &mut Context, - active_heads: &HashMap, - new_view: View, - metrics: &Metrics, - rng: &mut R, -) where - R: rand::Rng, -{ - let old_view = std::mem::replace(&mut peer_data.view, new_view); - - // Remove entries for all relay-parents in the old view but not the new. - for removed in old_view.difference(&peer_data.view) { - let _ = peer_data.view_knowledge.remove(removed); - } - - // Use both grid directions - let is_gossip_peer = topology.route_to_peer(RequiredRouting::GridXY, &peer); - let lucky = is_gossip_peer || - util::gen_ratio_rng( - util::MIN_GOSSIP_PEERS.saturating_sub(topology.len()), - util::MIN_GOSSIP_PEERS, - rng, - ); - - // Add entries for all relay-parents in the new view but not the old. - // Furthermore, send all statements we have for those relay parents. - let new_view = peer_data.view.difference(&old_view).copied().collect::>(); - for new in new_view.iter().copied() { - peer_data.view_knowledge.insert(new, Default::default()); - if !lucky { - continue - } - if let Some(active_head) = active_heads.get(&new) { - send_statements(peer.clone(), peer_data, ctx, new, active_head, metrics).await; - } - } -} - -#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] -async fn handle_network_update( - peers: &mut HashMap, - topology_storage: &mut SessionBoundGridTopologyStorage, - authorities: &mut HashMap, - active_heads: &mut HashMap, - recent_outdated_heads: &RecentOutdatedHeads, - ctx: &mut Context, - req_sender: &mpsc::Sender, - update: NetworkBridgeEvent, - metrics: &Metrics, - runtime: &mut RuntimeInfo, - rng: &mut R, -) where - R: rand::Rng, -{ - match update { - NetworkBridgeEvent::PeerConnected(peer, role, _, maybe_authority) => { - gum::trace!(target: LOG_TARGET, ?peer, ?role, "Peer connected"); - peers.insert( - peer, - PeerData { - view: Default::default(), - view_knowledge: Default::default(), - maybe_authority: maybe_authority.clone(), - }, - ); - if let Some(authority_ids) = maybe_authority { - authority_ids.into_iter().for_each(|a| { - authorities.insert(a, peer); - }); - } - }, - NetworkBridgeEvent::PeerDisconnected(peer) => { - gum::trace!(target: LOG_TARGET, ?peer, "Peer disconnected"); - if let Some(auth_ids) = peers.remove(&peer).and_then(|p| p.maybe_authority) { - auth_ids.into_iter().for_each(|a| { - authorities.remove(&a); - }); - } - }, - NetworkBridgeEvent::NewGossipTopology(topology) => { - let _ = metrics.time_network_bridge_update("new_gossip_topology"); - - let new_session_index = topology.session; - let new_topology: SessionGridTopology = topology.into(); - let old_topology = topology_storage.get_current_topology(); - let newly_added = new_topology.peers_diff(old_topology); - topology_storage.update_topology(new_session_index, new_topology); - for peer in newly_added { - if let Some(data) = peers.get_mut(&peer) { - let view = std::mem::take(&mut data.view); - update_peer_view_and_maybe_send_unlocked( - peer, - topology_storage.get_current_topology(), - data, - ctx, - &*active_heads, - view, - metrics, - rng, - ) - .await - } - } - }, - NetworkBridgeEvent::PeerMessage(peer, message) => { - handle_incoming_message_and_circulate( - peer, - topology_storage, - peers, - active_heads, - &*recent_outdated_heads, - ctx, - message, - req_sender, - metrics, - runtime, - rng, - ) - .await; - }, - NetworkBridgeEvent::PeerViewChange(peer, view) => { - let _ = metrics.time_network_bridge_update("peer_view_change"); - gum::trace!(target: LOG_TARGET, ?peer, ?view, "Peer view change"); - match peers.get_mut(&peer) { - Some(data) => - update_peer_view_and_maybe_send_unlocked( - peer, - topology_storage.get_current_topology(), - data, - ctx, - &*active_heads, - view, - metrics, - rng, - ) - .await, - None => (), - } - }, - NetworkBridgeEvent::OurViewChange(_view) => { - // handled by `ActiveLeavesUpdate` - }, - } } #[overseer::contextbounds(StatementDistribution, prefix = self::overseer)] @@ -1826,32 +118,26 @@ impl StatementDistributionSubsystem { /// Create a new Statement Distribution Subsystem pub fn new( keystore: SyncCryptoStorePtr, - req_receiver: IncomingRequestReceiver, + v1_req_receiver: IncomingRequestReceiver, metrics: Metrics, rng: R, ) -> Self { - Self { keystore, req_receiver: Some(req_receiver), metrics, rng } + Self { keystore, v1_req_receiver: Some(v1_req_receiver), metrics, rng } } async fn run(mut self, mut ctx: Context) -> std::result::Result<(), FatalError> { - let mut peers: HashMap = HashMap::new(); - let mut topology_storage: SessionBoundGridTopologyStorage = Default::default(); - let mut authorities: HashMap = HashMap::new(); - let mut active_heads: HashMap = HashMap::new(); - let mut recent_outdated_heads = RecentOutdatedHeads::default(); - - let mut runtime = RuntimeInfo::new(Some(self.keystore.clone())); + let mut legacy_v1_state = crate::legacy_v1::State::new(self.keystore.clone()); // Sender/Receiver for getting news from our statement fetching tasks. - let (req_sender, mut req_receiver) = mpsc::channel(1); + let (v1_req_sender, mut v1_req_receiver) = mpsc::channel(1); // Sender/Receiver for getting news from our responder task. - let (res_sender, mut res_receiver) = mpsc::channel(1); + let (v1_res_sender, mut v1_res_receiver) = mpsc::channel(1); ctx.spawn( "large-statement-responder", - respond( - self.req_receiver.take().expect("Mandatory argument to new. qed"), - res_sender.clone(), + v1_respond_task( + self.v1_req_receiver.take().expect("Mandatory argument to new. qed"), + v1_res_sender.clone(), ) .boxed(), ) @@ -1859,19 +145,14 @@ impl StatementDistributionSubsystem { loop { let message = - MuxedMessage::receive(&mut ctx, &mut req_receiver, &mut res_receiver).await; + MuxedMessage::receive(&mut ctx, &mut v1_req_receiver, &mut v1_res_receiver).await; match message { MuxedMessage::Subsystem(result) => { let result = self .handle_subsystem_message( &mut ctx, - &mut runtime, - &mut peers, - &mut topology_storage, - &mut authorities, - &mut active_heads, - &mut recent_outdated_heads, - &req_sender, + &mut legacy_v1_state, + &v1_req_sender, result?, ) .await; @@ -1881,29 +162,24 @@ impl StatementDistributionSubsystem { Err(jfyi) => gum::debug!(target: LOG_TARGET, error = ?jfyi), } }, - MuxedMessage::Requester(result) => { - let result = self - .handle_requester_message( - &mut ctx, - &topology_storage, - &mut peers, - &mut active_heads, - &recent_outdated_heads, - &req_sender, - &mut runtime, - result.ok_or(FatalError::RequesterReceiverFinished)?, - ) - .await; + MuxedMessage::V1Requester(result) => { + let result = crate::legacy_v1::handle_requester_message( + &mut ctx, + &mut legacy_v1_state, + &v1_req_sender, + &mut self.rng, + result.ok_or(FatalError::RequesterReceiverFinished)?, + &self.metrics, + ) + .await; log_error(result.map_err(From::from), "handle_requester_message")?; }, - MuxedMessage::Responder(result) => { - let result = self - .handle_responder_message( - &peers, - &mut active_heads, - result.ok_or(FatalError::ResponderReceiverFinished)?, - ) - .await; + MuxedMessage::V1Responder(result) => { + let result = crate::legacy_v1::handle_responder_message( + &mut legacy_v1_state, + result.ok_or(FatalError::ResponderReceiverFinished)?, + ) + .await; log_error(result.map_err(From::from), "handle_responder_message")?; }, }; @@ -1911,165 +187,11 @@ impl StatementDistributionSubsystem { Ok(()) } - /// Handle messages from responder background task. - async fn handle_responder_message( - &self, - peers: &HashMap, - active_heads: &mut HashMap, - message: ResponderMessage, - ) -> JfyiErrorResult<()> { - match message { - ResponderMessage::GetData { requesting_peer, relay_parent, candidate_hash, tx } => { - if !requesting_peer_knows_about_candidate( - peers, - &requesting_peer, - &relay_parent, - &candidate_hash, - )? { - return Err(JfyiError::RequestedUnannouncedCandidate( - requesting_peer, - candidate_hash, - )) - } - - let active_head = - active_heads.get(&relay_parent).ok_or(JfyiError::NoSuchHead(relay_parent))?; - - let committed = match active_head.waiting_large_statements.get(&candidate_hash) { - Some(LargeStatementStatus::FetchedOrShared(committed)) => committed.clone(), - _ => - return Err(JfyiError::NoSuchFetchedLargeStatement( - relay_parent, - candidate_hash, - )), - }; - - tx.send(committed).map_err(|_| JfyiError::ResponderGetDataCanceled)?; - }, - } - Ok(()) - } - - async fn handle_requester_message( - &mut self, - ctx: &mut Context, - topology_storage: &SessionBoundGridTopologyStorage, - peers: &mut HashMap, - active_heads: &mut HashMap, - recent_outdated_heads: &RecentOutdatedHeads, - req_sender: &mpsc::Sender, - runtime: &mut RuntimeInfo, - message: RequesterMessage, - ) -> JfyiErrorResult<()> { - match message { - RequesterMessage::Finished { - relay_parent, - candidate_hash, - from_peer, - response, - bad_peers, - } => { - for bad in bad_peers { - report_peer(ctx.sender(), bad, COST_FETCH_FAIL).await; - } - report_peer(ctx.sender(), from_peer, BENEFIT_VALID_RESPONSE).await; - - let active_head = active_heads - .get_mut(&relay_parent) - .ok_or(JfyiError::NoSuchHead(relay_parent))?; - - let status = active_head.waiting_large_statements.remove(&candidate_hash); - - let info = match status { - Some(LargeStatementStatus::Fetching(info)) => info, - Some(LargeStatementStatus::FetchedOrShared(_)) => { - // We are no longer interested in the data. - return Ok(()) - }, - None => - return Err(JfyiError::NoSuchLargeStatementStatus( - relay_parent, - candidate_hash, - )), - }; - - active_head - .waiting_large_statements - .insert(candidate_hash, LargeStatementStatus::FetchedOrShared(response)); - - // Cache is now populated, send all messages: - for (peer, messages) in info.available_peers { - for message in messages { - handle_incoming_message_and_circulate( - peer, - topology_storage, - peers, - active_heads, - recent_outdated_heads, - ctx, - message, - req_sender, - &self.metrics, - runtime, - &mut self.rng, - ) - .await; - } - } - }, - RequesterMessage::SendRequest(req) => { - ctx.send_message(NetworkBridgeTxMessage::SendRequests( - vec![req], - IfDisconnected::ImmediateError, - )) - .await; - }, - RequesterMessage::GetMorePeers { relay_parent, candidate_hash, tx } => { - let active_head = active_heads - .get_mut(&relay_parent) - .ok_or(JfyiError::NoSuchHead(relay_parent))?; - - let status = active_head.waiting_large_statements.get_mut(&candidate_hash); - - let info = match status { - Some(LargeStatementStatus::Fetching(info)) => info, - Some(LargeStatementStatus::FetchedOrShared(_)) => { - // This task is going to die soon - no need to send it anything. - gum::debug!(target: LOG_TARGET, "Zombie task wanted more peers."); - return Ok(()) - }, - None => - return Err(JfyiError::NoSuchLargeStatementStatus( - relay_parent, - candidate_hash, - )), - }; - - if info.peers_to_try.is_empty() { - info.peer_sender = Some(tx); - } else { - let peers_to_try = std::mem::take(&mut info.peers_to_try); - if let Err(peers) = tx.send(peers_to_try) { - // No longer interested for now - might want them later: - info.peers_to_try = peers; - } - } - }, - RequesterMessage::ReportPeer(peer, rep) => report_peer(ctx.sender(), peer, rep).await, - } - Ok(()) - } - async fn handle_subsystem_message( &mut self, ctx: &mut Context, - runtime: &mut RuntimeInfo, - peers: &mut HashMap, - topology_storage: &mut SessionBoundGridTopologyStorage, - authorities: &mut HashMap, - active_heads: &mut HashMap, - recent_outdated_heads: &mut RecentOutdatedHeads, - req_sender: &mpsc::Sender, + legacy_v1_state: &mut legacy_v1::State, + v1_req_sender: &mpsc::Sender, message: FromOrchestra, ) -> Result { let metrics = &self.metrics; @@ -2082,138 +204,66 @@ impl StatementDistributionSubsystem { let _timer = metrics.time_active_leaves_update(); for deactivated in deactivated { - if active_heads.remove(&deactivated).is_some() { - gum::trace!( - target: LOG_TARGET, - hash = ?deactivated, - "Deactivating leaf", - ); - - recent_outdated_heads.note_outdated(deactivated); - } + crate::legacy_v1::handle_deactivate_leaf(legacy_v1_state, deactivated); } for activated in activated { - let relay_parent = activated.hash; - let span = PerLeafSpan::new(activated.span, "statement-distribution"); - gum::trace!( - target: LOG_TARGET, - hash = ?relay_parent, - "New active leaf", - ); - - // Retrieve the parachain validators at the child of the head we track. - let session_index = - runtime.get_session_index_for_child(ctx.sender(), relay_parent).await?; - let info = runtime - .get_session_info_by_index(ctx.sender(), relay_parent, session_index) + // TODO [now]: legacy, activate only if no prospective parachains support. + crate::legacy_v1::handle_activated_leaf(ctx, legacy_v1_state, activated) .await?; - let session_info = &info.session_info; - - active_heads.entry(relay_parent).or_insert(ActiveHeadData::new( - session_info.validators.clone(), - session_index, - span, - )); } }, FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => { // do nothing }, FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(true), - FromOrchestra::Communication { msg } => match msg { - StatementDistributionMessage::Share(relay_parent, statement) => { - let _timer = metrics.time_share(); - - // Make sure we have data in cache: - if is_statement_large(&statement).0 { - if let Statement::Seconded(committed) = &statement.payload() { - let active_head = active_heads - .get_mut(&relay_parent) - // This should never be out-of-sync with our view if the view - // updates correspond to actual `StartWork` messages. - .ok_or(JfyiError::NoSuchHead(relay_parent))?; - active_head.waiting_large_statements.insert( - statement.payload().candidate_hash(), - LargeStatementStatus::FetchedOrShared(committed.clone()), - ); + FromOrchestra::Communication { msg } => + match msg { + StatementDistributionMessage::Share(relay_parent, statement) => { + let _timer = metrics.time_share(); + + // pass to legacy if legacy state contains head. + if legacy_v1_state.contains_relay_parent(&relay_parent) { + crate::legacy_v1::share_local_statement( + ctx, + legacy_v1_state, + relay_parent, + StatementWithPVD::drop_pvd_from_signed(statement), + &mut self.rng, + metrics, + ) + .await?; } - } - - let info = runtime.get_session_info(ctx.sender(), relay_parent).await?; - let session_info = &info.session_info; - let validator_info = &info.validator_info; - - // Get peers in our group, so we can make sure they get our statement - // directly: - let group_peers = { - if let Some(our_group) = validator_info.our_group { - let our_group = &session_info.validator_groups[our_group.0 as usize]; + }, + StatementDistributionMessage::NetworkBridgeUpdate(event) => { + // pass to legacy, but not if the message isn't + // v1. + let legacy = match &event { + &NetworkBridgeEvent::PeerMessage(_, ref message) => match message { + Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::V1Compatibility(_)) => true, + Versioned::V1(_) => true, + // TODO [now]: _ => false, + }, + _ => true, + }; - our_group - .into_iter() - .filter_map(|i| { - if Some(*i) == validator_info.our_index { - return None - } - let authority_id = &session_info.discovery_keys[i.0 as usize]; - authorities.get(authority_id).map(|p| *p) - }) - .collect() - } else { - Vec::new() + if legacy { + crate::legacy_v1::handle_network_update( + ctx, + legacy_v1_state, + v1_req_sender, + event, + &mut self.rng, + metrics, + ) + .await; } - }; - circulate_statement_and_dependents( - topology_storage, - peers, - active_heads, - ctx, - relay_parent, - statement, - group_peers, - metrics, - &mut self.rng, - ) - .await; - }, - StatementDistributionMessage::NetworkBridgeUpdate(event) => { - handle_network_update( - peers, - topology_storage, - authorities, - active_heads, - &*recent_outdated_heads, - ctx, - req_sender, - event, - metrics, - runtime, - &mut self.rng, - ) - .await; + + // TODO [now]: pass to vstaging, but not if the message is + // v1 or the connecting peer is v1. + }, }, - }, } Ok(false) } } - -/// Check whether a peer knows about a candidate from us. -/// -/// If not, it is deemed illegal for it to request corresponding data from us. -fn requesting_peer_knows_about_candidate( - peers: &HashMap, - requesting_peer: &PeerId, - relay_parent: &Hash, - candidate_hash: &CandidateHash, -) -> JfyiErrorResult { - let peer_data = peers - .get(requesting_peer) - .ok_or_else(|| JfyiError::NoSuchPeer(*requesting_peer))?; - let knowledge = peer_data - .view_knowledge - .get(relay_parent) - .ok_or_else(|| JfyiError::NoSuchHead(*relay_parent))?; - Ok(knowledge.sent_candidates.get(&candidate_hash).is_some()) -} diff --git a/node/overseer/src/tests.rs b/node/overseer/src/tests.rs index 78477ed0e2d4..59a7c3a57e98 100644 --- a/node/overseer/src/tests.rs +++ b/node/overseer/src/tests.rs @@ -930,7 +930,7 @@ fn test_prospective_parachains_msg() -> ProspectiveParachainsMessage { // Checks that `stop`, `broadcast_signal` and `broadcast_message` are implemented correctly. #[test] fn overseer_all_subsystems_receive_signals_and_messages() { - const NUM_SUBSYSTEMS: usize = 22; + const NUM_SUBSYSTEMS: usize = 23; // -4 for BitfieldSigning, GossipSupport, AvailabilityDistribution and PvfCheckerSubsystem. const NUM_SUBSYSTEMS_MESSAGED: usize = NUM_SUBSYSTEMS - 4; diff --git a/node/primitives/src/lib.rs b/node/primitives/src/lib.rs index cd3653ce4ae9..ae961701a8d6 100644 --- a/node/primitives/src/lib.rs +++ b/node/primitives/src/lib.rs @@ -180,6 +180,14 @@ impl Statement { Statement::Valid(hash) => CompactStatement::Valid(hash), } } + + /// Add the [`PersistedValidationData`] to the statement, if seconded. + pub fn supply_pvd(self, pvd: PersistedValidationData) -> StatementWithPVD { + match self { + Statement::Seconded(c) => StatementWithPVD::Seconded(c, pvd), + Statement::Valid(hash) => StatementWithPVD::Valid(hash), + } + } } impl From<&'_ Statement> for CompactStatement { diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 7c907506803c..2ff8454acc3a 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -732,7 +732,7 @@ impl RuntimeApiMessage { pub enum StatementDistributionMessage { /// We have originated a signed statement in the context of /// given relay-parent hash and it should be distributed to other validators. - Share(Hash, SignedFullStatement), + Share(Hash, SignedFullStatementWithPVD), /// Event from the network bridge. #[from] NetworkBridgeUpdate(NetworkBridgeEvent), diff --git a/roadmap/implementers-guide/src/node/backing/candidate-backing.md b/roadmap/implementers-guide/src/node/backing/candidate-backing.md index 6c3eace313c3..0eee0cc532ef 100644 --- a/roadmap/implementers-guide/src/node/backing/candidate-backing.md +++ b/roadmap/implementers-guide/src/node/backing/candidate-backing.md @@ -130,7 +130,7 @@ Dispatch a `CandidateValidationMessage::Validate(validation function, candidate, ### Distribute Signed Statement -Dispatch a [`StatementDistributionMessage`][SDM]`::Share(relay_parent, SignedFullStatement)`. +Dispatch a [`StatementDistributionMessage`][SDM]`::Share(relay_parent, SignedFullStatementWithPVD)`. [OverseerSignal]: ../../types/overseer-protocol.md#overseer-signal [Statement]: ../../types/backing.md#statement-type diff --git a/roadmap/implementers-guide/src/types/overseer-protocol.md b/roadmap/implementers-guide/src/types/overseer-protocol.md index b2559c4cfda7..f47fefe23097 100644 --- a/roadmap/implementers-guide/src/types/overseer-protocol.md +++ b/roadmap/implementers-guide/src/types/overseer-protocol.md @@ -753,7 +753,7 @@ enum StatementDistributionMessage { /// /// The statement distribution subsystem assumes that the statement should be correctly /// signed. - Share(Hash, SignedFullStatement), + Share(Hash, SignedFullStatementWithPVD), } ``` From 82ff9916a599c0e6ed871cdabb2bca3e41b6f606 Mon Sep 17 00:00:00 2001 From: Robert Klotzner Date: Mon, 26 Sep 2022 13:49:09 +0200 Subject: [PATCH 12/76] Version 3 is already live. --- node/core/backing/src/lib.rs | 2 +- node/core/provisioner/src/lib.rs | 2 +- node/subsystem-types/src/messages.rs | 5 +++++ 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index c79bfdada8cb..ffc80994ca3c 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -777,7 +777,7 @@ async fn prospective_parachains_mode( .map_err(Error::RuntimeApiUnavailable)? .map_err(Error::FetchRuntimeApiVersion)?; - if version == 3 { + if version >= RuntimeApiRequest::VALIDITY_CONSTRAINTS { Ok(ProspectiveParachainsMode::Enabled) } else { if version != 2 { diff --git a/node/core/provisioner/src/lib.rs b/node/core/provisioner/src/lib.rs index 6c3d59ce2ac7..6e86b160a3ee 100644 --- a/node/core/provisioner/src/lib.rs +++ b/node/core/provisioner/src/lib.rs @@ -201,7 +201,7 @@ async fn prospective_parachains_mode( let version = rx.await.map_err(Error::CanceledRuntimeApiVersion)?.map_err(Error::Runtime)?; - if version == 3 { + if version >= RuntimeApiRequest::VALIDITY_CONSTRAINTS { Ok(ProspectiveParachainsMode::Enabled) } else { if version != 2 { diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index faf6daa09357..854f73e43062 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -714,6 +714,11 @@ impl RuntimeApiRequest { /// `Disputes` pub const DISPUTES_RUNTIME_REQUIREMENT: u32 = 3; + + /// Minimum version for valididty constraints, required for async backing. + /// + /// 99 for now, should be adjusted to VSTAGING/actual runtime version once released. + pub const VALIDITY_CONSTRAINTS: u32 = 99; } /// A message to the Runtime API subsystem. From 614ee8c14626cd9bb7772b4b37b41bdf265faf2f Mon Sep 17 00:00:00 2001 From: Robert Klotzner Date: Wed, 28 Sep 2022 12:23:47 +0200 Subject: [PATCH 13/76] Fix tests (#6055) * Fix backing tests * Fix warnings. --- node/core/backing/src/lib.rs | 10 ++++++++-- node/core/backing/src/tests/prospective_parachains.rs | 2 +- node/core/provisioner/src/lib.rs | 4 ++-- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index ffc80994ca3c..e20960f15357 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -754,6 +754,12 @@ async fn handle_communication( CandidateBackingMessage::GetBackedCandidates(relay_parent, requested_candidates, tx) => if let Some(rp_state) = state.per_relay_parent.get(&relay_parent) { handle_get_backed_candidates_message(rp_state, requested_candidates, tx, metrics)?; + } else { + gum::debug!( + target: LOG_TARGET, + ?relay_parent, + "Received `GetBackedCandidates` for an unknown relay parent." + ); }, } @@ -780,10 +786,10 @@ async fn prospective_parachains_mode( if version >= RuntimeApiRequest::VALIDITY_CONSTRAINTS { Ok(ProspectiveParachainsMode::Enabled) } else { - if version != 2 { + if version < 2 { gum::warn!( target: LOG_TARGET, - "Runtime API version is {}, expected 2 or 3. Prospective parachains are disabled", + "Runtime API version is {}, it is expected to be at least 2. Prospective parachains are disabled", version ); } diff --git a/node/core/backing/src/tests/prospective_parachains.rs b/node/core/backing/src/tests/prospective_parachains.rs index 3f7065d7d5df..59db7f62b722 100644 --- a/node/core/backing/src/tests/prospective_parachains.rs +++ b/node/core/backing/src/tests/prospective_parachains.rs @@ -21,7 +21,7 @@ use polkadot_primitives::v2::{BlockNumber, Header}; use super::*; -const API_VERSION_PROSPECTIVE_ENABLED: u32 = 3; +const API_VERSION_PROSPECTIVE_ENABLED: u32 = RuntimeApiRequest::VALIDITY_CONSTRAINTS; struct TestLeaf { activated: ActivatedLeaf, diff --git a/node/core/provisioner/src/lib.rs b/node/core/provisioner/src/lib.rs index 6e86b160a3ee..d4461c2f9916 100644 --- a/node/core/provisioner/src/lib.rs +++ b/node/core/provisioner/src/lib.rs @@ -204,10 +204,10 @@ async fn prospective_parachains_mode( if version >= RuntimeApiRequest::VALIDITY_CONSTRAINTS { Ok(ProspectiveParachainsMode::Enabled) } else { - if version != 2 { + if version < 2 { gum::warn!( target: LOG_TARGET, - "Runtime API version is {}, expected 2 or 3. Prospective parachains are disabled", + "Runtime API version is {}, it is expected to be at least 2. Prospective parachains are disabled", version ); } From 04de8e503642b42036c842c2e0ab4a01515a5e51 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Fri, 7 Oct 2022 14:50:05 +0300 Subject: [PATCH 14/76] fmt --- node/core/backing/src/lib.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 6aca9d6efdcc..ef63178c8f26 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -651,7 +651,7 @@ async fn validate_and_make_available( let pov = match pov { PoVData::Ready(pov) => pov, - PoVData::FetchFromValidator { from_validator, candidate_hash, pov_hash } => { + PoVData::FetchFromValidator { from_validator, candidate_hash, pov_hash } => match request_pov( &mut sender, relay_parent, @@ -674,8 +674,7 @@ async fn validate_and_make_available( }, Err(err) => return Err(err), Ok(pov) => pov, - } - }, + }, }; let v = { From c23032acfd7b0bcb054f75918a5be1028adc73f0 Mon Sep 17 00:00:00 2001 From: Chris Sosnin <48099298+slumber@users.noreply.github.com> Date: Thu, 13 Oct 2022 01:59:31 +0400 Subject: [PATCH 15/76] collator-protocol: asynchronous backing changes (#5740) * Draft collator side changes * Start working on collations management * Handle peer's view change * Versioning on advertising * Versioned collation fetching request * Handle versioned messages * Improve docs for collation requests * Add spans * Add request receiver to overseer * Fix collator side tests * Extract relay parent mode to lib * Validator side draft * Add more checks for advertisement * Request pvd based on async backing mode * review * Validator side improvements * Make old tests green * More fixes * Collator side tests draft * Send collation test * fmt * Collator side network protocol versioning * cleanup * merge artifacts * Validator side net protocol versioning * Remove fragment tree membership request * Resolve todo * Collator side core state test * Improve net protocol compatibility * Validator side tests * more improvements * style fixes * downgrade log * Track implicit assignments * Limit the number of seconded candidates per para * Add a sanity check * Handle fetched candidate * fix tests * Retry fetch * Guard against dequeueing while already fetching * Reintegrate connection management * Timeout on advertisements * fmt * spellcheck * update tests after merge --- node/collation-generation/src/lib.rs | 10 +- .../src/collator_side/collation.rs | 162 ++ .../src/collator_side/metrics.rs | 4 +- .../src/collator_side/mod.rs | 1055 +++++++---- .../collator_side/{tests.rs => tests/mod.rs} | 443 ++++- .../tests/prospective_parachains.rs | 562 ++++++ .../src/collator_side/validators_buffer.rs | 82 +- node/network/collator-protocol/src/error.rs | 80 +- node/network/collator-protocol/src/lib.rs | 99 +- .../src/validator_side/collation.rs | 254 +++ .../src/validator_side/metrics.rs | 123 ++ .../src/validator_side/mod.rs | 1665 ++++++++++------- .../validator_side/{tests.rs => tests/mod.rs} | 426 ++++- .../tests/prospective_parachains.rs | 616 ++++++ node/network/protocol/src/lib.rs | 11 +- .../protocol/src/request_response/mod.rs | 28 +- .../protocol/src/request_response/outgoing.rs | 8 +- .../protocol/src/request_response/vstaging.rs | 40 + node/overseer/src/lib.rs | 2 + node/service/src/lib.rs | 9 +- node/service/src/overseer.rs | 30 +- node/subsystem-types/src/messages.rs | 10 +- .../src/backing_implicit_view.rs | 43 +- scripts/ci/gitlab/lingua.dic | 4 +- 24 files changed, 4434 insertions(+), 1332 deletions(-) create mode 100644 node/network/collator-protocol/src/collator_side/collation.rs rename node/network/collator-protocol/src/collator_side/{tests.rs => tests/mod.rs} (74%) create mode 100644 node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs create mode 100644 node/network/collator-protocol/src/validator_side/collation.rs create mode 100644 node/network/collator-protocol/src/validator_side/metrics.rs rename node/network/collator-protocol/src/validator_side/{tests.rs => tests/mod.rs} (73%) create mode 100644 node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs create mode 100644 node/network/protocol/src/request_response/vstaging.rs diff --git a/node/collation-generation/src/lib.rs b/node/collation-generation/src/lib.rs index 500b500636ba..c68ba26acc0b 100644 --- a/node/collation-generation/src/lib.rs +++ b/node/collation-generation/src/lib.rs @@ -286,6 +286,7 @@ async fn handle_new_activations( "collation-builder", Box::pin(async move { let persisted_validation_data_hash = validation_data.hash(); + let parent_head_data_hash = validation_data.parent_head.hash(); let (collation, result_sender) = match (task_config.collator)(relay_parent, &validation_data).await { @@ -385,8 +386,13 @@ async fn handle_new_activations( if let Err(err) = task_sender .send( - CollatorProtocolMessage::DistributeCollation(ccr, pov, result_sender) - .into(), + CollatorProtocolMessage::DistributeCollation( + ccr, + parent_head_data_hash, + pov, + result_sender, + ) + .into(), ) .await { diff --git a/node/network/collator-protocol/src/collator_side/collation.rs b/node/network/collator-protocol/src/collator_side/collation.rs new file mode 100644 index 000000000000..36cdc7794b68 --- /dev/null +++ b/node/network/collator-protocol/src/collator_side/collation.rs @@ -0,0 +1,162 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Primitives for tracking collations-related data. + +use std::collections::{HashSet, VecDeque}; + +use futures::{future::BoxFuture, stream::FuturesUnordered}; + +use polkadot_node_network_protocol::{ + request_response::{ + incoming::OutgoingResponse, v1 as protocol_v1, vstaging as protocol_vstaging, + IncomingRequest, + }, + PeerId, +}; +use polkadot_node_primitives::PoV; +use polkadot_primitives::v2::{CandidateHash, CandidateReceipt, Hash, Id as ParaId}; + +/// The status of a collation as seen from the collator. +pub enum CollationStatus { + /// The collation was created, but we did not advertise it to any validator. + Created, + /// The collation was advertised to at least one validator. + Advertised, + /// The collation was requested by at least one validator. + Requested, +} + +impl CollationStatus { + /// Advance to the [`Self::Advertised`] status. + /// + /// This ensures that `self` isn't already [`Self::Requested`]. + pub fn advance_to_advertised(&mut self) { + if !matches!(self, Self::Requested) { + *self = Self::Advertised; + } + } + + /// Advance to the [`Self::Requested`] status. + pub fn advance_to_requested(&mut self) { + *self = Self::Requested; + } +} + +/// A collation built by the collator. +pub struct Collation { + /// Candidate receipt. + pub receipt: CandidateReceipt, + /// Parent head-data hash. + pub parent_head_data_hash: Hash, + /// Proof to verify the state transition of the parachain. + pub pov: PoV, + /// Collation status. + pub status: CollationStatus, +} + +/// Stores the state for waiting collation fetches per relay parent. +#[derive(Default)] +pub struct WaitingCollationFetches { + /// A flag indicating that we have an ongoing request. + /// This limits the number of collations being sent at any moment + /// of time to 1 for each relay parent. + /// + /// If set to `true`, any new request will be queued. + pub collation_fetch_active: bool, + /// The collation fetches waiting to be fulfilled. + pub req_queue: VecDeque, + /// All peers that are waiting or actively uploading. + /// + /// We will not accept multiple requests from the same peer, otherwise our DoS protection of + /// moving on to the next peer after `MAX_UNSHARED_UPLOAD_TIME` would be pointless. + pub waiting_peers: HashSet<(PeerId, CandidateHash)>, +} + +/// Backwards-compatible wrapper for incoming collations requests. +pub enum VersionedCollationRequest { + V1(IncomingRequest), + VStaging(IncomingRequest), +} + +impl From> for VersionedCollationRequest { + fn from(req: IncomingRequest) -> Self { + Self::V1(req) + } +} + +impl From> + for VersionedCollationRequest +{ + fn from(req: IncomingRequest) -> Self { + Self::VStaging(req) + } +} + +impl VersionedCollationRequest { + /// Returns parachain id from the request payload. + pub fn para_id(&self) -> ParaId { + match self { + VersionedCollationRequest::V1(req) => req.payload.para_id, + VersionedCollationRequest::VStaging(req) => req.payload.para_id, + } + } + + /// Returns relay parent from the request payload. + pub fn relay_parent(&self) -> Hash { + match self { + VersionedCollationRequest::V1(req) => req.payload.relay_parent, + VersionedCollationRequest::VStaging(req) => req.payload.relay_parent, + } + } + + /// Returns id of the peer the request was received from. + pub fn peer_id(&self) -> PeerId { + match self { + VersionedCollationRequest::V1(req) => req.peer, + VersionedCollationRequest::VStaging(req) => req.peer, + } + } + + /// Sends the response back to requester. + pub fn send_outgoing_response( + self, + response: OutgoingResponse, + ) -> Result<(), ()> { + match self { + VersionedCollationRequest::V1(req) => req.send_outgoing_response(response), + VersionedCollationRequest::VStaging(req) => req.send_outgoing_response(response), + } + } +} + +/// Result of the finished background send-collation task. +/// +/// Note that if the timeout was hit the request doesn't get +/// aborted, it only indicates that we should start processing +/// the next one from the queue. +pub struct CollationSendResult { + /// Candidate's relay parent. + pub relay_parent: Hash, + /// Candidate hash. + pub candidate_hash: CandidateHash, + /// Peer id. + pub peer_id: PeerId, + /// Whether the max unshared timeout was hit. + pub timed_out: bool, +} + +pub type ActiveCollationFetches = FuturesUnordered>; diff --git a/node/network/collator-protocol/src/collator_side/metrics.rs b/node/network/collator-protocol/src/collator_side/metrics.rs index 85e00406b9ba..04a9806605ab 100644 --- a/node/network/collator-protocol/src/collator_side/metrics.rs +++ b/node/network/collator-protocol/src/collator_side/metrics.rs @@ -1,4 +1,4 @@ -// Copyright 2017-2022 Parity Technologies (UK) Ltd. +// Copyright 2022 Parity Technologies (UK) Ltd. // This file is part of Polkadot. // Polkadot is free software: you can redistribute it and/or modify @@ -20,7 +20,7 @@ use polkadot_node_subsystem_util::metrics::{self, prometheus}; pub struct Metrics(Option); impl Metrics { - pub fn on_advertisment_made(&self) { + pub fn on_advertisement_made(&self) { if let Some(metrics) = &self.0 { metrics.advertisements_made.inc(); } diff --git a/node/network/collator-protocol/src/collator_side/mod.rs b/node/network/collator-protocol/src/collator_side/mod.rs index 427c92f672de..79222fde117c 100644 --- a/node/network/collator-protocol/src/collator_side/mod.rs +++ b/node/network/collator-protocol/src/collator_side/mod.rs @@ -15,25 +15,26 @@ // along with Polkadot. If not, see . use std::{ - collections::{HashMap, HashSet, VecDeque}, - pin::Pin, - time::{Duration, Instant}, + collections::{HashMap, HashSet}, + convert::TryInto, + time::Duration, }; +use bitvec::{bitvec, vec::BitVec}; use futures::{ - channel::oneshot, pin_mut, select, stream::FuturesUnordered, Future, FutureExt, StreamExt, + channel::oneshot, future::Fuse, pin_mut, select, stream::FuturesUnordered, FutureExt, StreamExt, }; use sp_core::Pair; use polkadot_node_network_protocol::{ self as net_protocol, - peer_set::PeerSet, + peer_set::{CollationVersion, PeerSet}, request_response::{ incoming::{self, OutgoingResponse}, - v1::{self as request_v1, CollationFetchingRequest, CollationFetchingResponse}, - IncomingRequest, IncomingRequestReceiver, + v1 as request_v1, vstaging as request_vstaging, IncomingRequestReceiver, }, - v1 as protocol_v1, OurView, PeerId, UnifiedReputationChange as Rep, Versioned, View, + v1 as protocol_v1, vstaging as protocol_vstaging, OurView, PeerId, + UnifiedReputationChange as Rep, Versioned, View, }; use polkadot_node_primitives::{CollationSecondedSignal, PoV, Statement}; use polkadot_node_subsystem::{ @@ -41,9 +42,10 @@ use polkadot_node_subsystem::{ messages::{ CollatorProtocolMessage, NetworkBridgeEvent, NetworkBridgeTxMessage, RuntimeApiMessage, }, - overseer, FromOrchestra, OverseerSignal, PerLeafSpan, + overseer, CollatorProtocolSenderTrait, FromOrchestra, OverseerSignal, PerLeafSpan, }; use polkadot_node_subsystem_util::{ + backing_implicit_view::View as ImplicitView, runtime::{get_availability_cores, get_group_rotation_info, RuntimeInfo}, TimeoutExt, }; @@ -52,20 +54,30 @@ use polkadot_primitives::v2::{ GroupIndex, Hash, Id as ParaId, SessionIndex, }; -use super::LOG_TARGET; -use crate::error::{log_error, Error, FatalError, Result}; -use fatality::Split; +use super::{ + prospective_parachains_mode, ProspectiveParachainsMode, LOG_TARGET, MAX_CANDIDATE_DEPTH, +}; +use crate::{ + error::{log_error, Error, FatalError, Result}, + modify_reputation, +}; +mod collation; mod metrics; +#[cfg(test)] +mod tests; mod validators_buffer; -use validators_buffer::{ValidatorGroupsBuffer, VALIDATORS_BUFFER_CAPACITY}; +use collation::{ + ActiveCollationFetches, Collation, CollationSendResult, CollationStatus, + VersionedCollationRequest, WaitingCollationFetches, +}; +use validators_buffer::{ + ResetInterestTimeout, ValidatorGroupsBuffer, RESET_INTEREST_TIMEOUT, VALIDATORS_BUFFER_CAPACITY, +}; pub use metrics::Metrics; -#[cfg(test)] -mod tests; - const COST_INVALID_REQUEST: Rep = Rep::CostMajor("Peer sent unparsable request"); const COST_UNEXPECTED_MESSAGE: Rep = Rep::CostMinor("An unexpected message"); const COST_APPARENT_FLOOD: Rep = @@ -87,108 +99,112 @@ const MAX_UNSHARED_UPLOAD_TIME: Duration = Duration::from_millis(150); /// Validators are obtained from [`ValidatorGroupsBuffer::validators_to_connect`]. const RECONNECT_TIMEOUT: Duration = Duration::from_secs(12); -/// How often to check for reconnect timeout. -const RECONNECT_POLL: Duration = Duration::from_secs(1); +/// Future that when resolved indicates that we should update reserved peer-set +/// of validators we want to be connected to. +/// +/// `Pending` variant never finishes and should be used when there're no peers +/// connected. +type ReconnectTimeout = Fuse; /// Info about validators we are currently connected to. /// /// It keeps track to which validators we advertised our collation. -#[derive(Debug)] +#[derive(Debug, Default)] struct ValidatorGroup { - /// All [`ValidatorId`]'s of the current group to that we advertised our collation. - advertised_to: HashSet, + /// Validators discovery ids. Lazily initialized when first + /// distributing a collation. + validators: Vec, + + /// Bits indicating which validators have already seen the announcement + /// per candidate. + advertised_to: HashMap, } impl ValidatorGroup { - /// Create a new `ValidatorGroup` - /// - /// without any advertisements. - fn new() -> Self { - Self { advertised_to: HashSet::new() } - } - /// Returns `true` if we should advertise our collation to the given peer. fn should_advertise_to( &self, + candidate_hash: &CandidateHash, peer_ids: &HashMap>, peer: &PeerId, ) -> bool { - match peer_ids.get(peer) { - Some(discovery_ids) => !discovery_ids.iter().any(|d| self.advertised_to.contains(d)), - None => false, + let authority_ids = match peer_ids.get(peer) { + Some(authority_ids) => authority_ids, + None => return false, + }; + + for id in authority_ids { + // One peer id may correspond to different discovery ids across sessions, + // having a non-empty intersection is sufficient to assume that this peer + // belongs to this particular validator group. + let validator_index = match self.validators.iter().position(|v| v == id) { + Some(idx) => idx, + None => continue, + }; + + // Either the candidate is unseen by this validator group + // or the corresponding bit is not set. + if self + .advertised_to + .get(candidate_hash) + .map_or(true, |advertised| !advertised[validator_index]) + { + return true + } } + + false } /// Should be called after we advertised our collation to the given `peer` to keep track of it. fn advertised_to_peer( &mut self, + candidate_hash: &CandidateHash, peer_ids: &HashMap>, peer: &PeerId, ) { if let Some(authority_ids) = peer_ids.get(peer) { - authority_ids.iter().for_each(|a| { - self.advertised_to.insert(a.clone()); - }); - } - } -} - -/// The status of a collation as seen from the collator. -enum CollationStatus { - /// The collation was created, but we did not advertise it to any validator. - Created, - /// The collation was advertised to at least one validator. - Advertised, - /// The collation was requested by at least one validator. - Requested, -} - -impl CollationStatus { - /// Advance to the [`Self::Advertised`] status. - /// - /// This ensures that `self` isn't already [`Self::Requested`]. - fn advance_to_advertised(&mut self) { - if !matches!(self, Self::Requested) { - *self = Self::Advertised; + for id in authority_ids { + let validator_index = match self.validators.iter().position(|v| v == id) { + Some(idx) => idx, + None => continue, + }; + self.advertised_to + .entry(*candidate_hash) + .or_insert_with(|| bitvec![0; self.validators.len()]) + .set(validator_index, true); + } } } - - /// Advance to the [`Self::Requested`] status. - fn advance_to_requested(&mut self) { - *self = Self::Requested; - } } -/// A collation built by the collator. -struct Collation { - receipt: CandidateReceipt, - pov: PoV, - status: CollationStatus, +#[derive(Debug)] +struct PeerData { + /// Peer's view. + view: View, + /// Network protocol version. + version: CollationVersion, } -/// Stores the state for waiting collation fetches. -#[derive(Default)] -struct WaitingCollationFetches { - /// Is there currently a collation getting fetched? - collation_fetch_active: bool, - /// The collation fetches waiting to be fulfilled. - waiting: VecDeque>, - /// All peers that are waiting or actively uploading. - /// - /// We will not accept multiple requests from the same peer, otherwise our DoS protection of - /// moving on to the next peer after `MAX_UNSHARED_UPLOAD_TIME` would be pointless. - waiting_peers: HashSet, +struct PerRelayParent { + prospective_parachains_mode: ProspectiveParachainsMode, + /// Validators group responsible for backing candidates built + /// on top of this relay parent. + validator_group: ValidatorGroup, + /// Distributed collations. + collations: HashMap, } -struct CollationSendResult { - relay_parent: Hash, - peer_id: PeerId, - timed_out: bool, +impl PerRelayParent { + fn new(mode: ProspectiveParachainsMode) -> Self { + Self { + prospective_parachains_mode: mode, + validator_group: ValidatorGroup::default(), + collations: HashMap::new(), + } + } } -type ActiveCollationFetches = - FuturesUnordered + Send + 'static>>>; - struct State { /// Our network peer id. local_peer_id: PeerId, @@ -202,25 +218,34 @@ struct State { /// Track all active peers and their views /// to determine what is relevant to them. - peer_views: HashMap, + peer_data: HashMap, - /// Our own view. - view: OurView, + /// Leaves that do support asynchronous backing along with + /// implicit ancestry. Leaves from the implicit view are present in + /// `active_leaves`, the opposite doesn't hold true. + /// + /// Relay-chain blocks which don't support prospective parachains are + /// never included in the fragment trees of active leaves which do. In + /// particular, this means that if a given relay parent belongs to implicit + /// ancestry of some active leaf, then it does support prospective parachains. + implicit_view: ImplicitView, + + /// All active leaves observed by us, including both that do and do not + /// support prospective parachains. This mapping works as a replacement for + /// [`polkadot_node_network_protocol::View`] and can be dropped once the transition + /// to asynchronous backing is done. + active_leaves: HashMap, + + /// Validators and distributed collations tracked for each relay parent from + /// our view, including both leaves and implicit ancestry. + per_relay_parent: HashMap, /// Span per relay parent. span_per_relay_parent: HashMap, - /// Possessed collations. - /// - /// We will keep up to one local collation per relay-parent. - collations: HashMap, - /// The result senders per collation. collation_result_senders: HashMap>, - /// Our validator groups per active leaf. - our_validators_groups: HashMap, - /// The mapping from [`PeerId`] to [`HashSet`]. This is filled over time as we learn the [`PeerId`]'s /// by `PeerConnected` events. peer_ids: HashMap>, @@ -228,9 +253,9 @@ struct State { /// Tracks which validators we want to stay connected to. validator_groups_buf: ValidatorGroupsBuffer, - /// Timestamp of the last connection request to a non-empty list of validators, - /// `None` otherwise. - last_connected_at: Option, + /// Timeout-future that enforces collator to update the peer-set at least once + /// every [`RECONNECT_TIMEOUT`] seconds. + reconnect_timeout: ReconnectTimeout, /// Metrics. metrics: Metrics, @@ -245,6 +270,14 @@ struct State { /// /// Each future returns the relay parent of the finished collation fetch. active_collation_fetches: ActiveCollationFetches, + + /// Time limits for validators to fetch the collation once the advertisement + /// was sent. + /// + /// Given an implicit view a collation may stay in memory for significant amount + /// of time, if we don't timeout validators the node will keep attempting to connect + /// to unneeded peers. + advertisement_timeouts: FuturesUnordered, } impl State { @@ -256,28 +289,20 @@ impl State { collator_pair, metrics, collating_on: Default::default(), - peer_views: Default::default(), - view: Default::default(), + peer_data: Default::default(), + implicit_view: Default::default(), + active_leaves: Default::default(), + per_relay_parent: Default::default(), span_per_relay_parent: Default::default(), - collations: Default::default(), collation_result_senders: Default::default(), - our_validators_groups: Default::default(), peer_ids: Default::default(), validator_groups_buf: ValidatorGroupsBuffer::with_capacity(VALIDATORS_BUFFER_CAPACITY), - last_connected_at: None, + reconnect_timeout: Fuse::terminated(), waiting_collation_fetches: Default::default(), active_collation_fetches: Default::default(), + advertisement_timeouts: Default::default(), } } - - /// Get all peers which have the given relay parent in their view. - fn peers_interested_in_leaf(&self, relay_parent: &Hash) -> Vec { - self.peer_views - .iter() - .filter(|(_, v)| v.contains(relay_parent)) - .map(|(peer, _)| *peer) - .collect() - } } /// Distribute a collation. @@ -295,52 +320,77 @@ async fn distribute_collation( state: &mut State, id: ParaId, receipt: CandidateReceipt, + parent_head_data_hash: Hash, pov: PoV, result_sender: Option>, ) -> Result<()> { - let relay_parent = receipt.descriptor.relay_parent; + let candidate_relay_parent = receipt.descriptor.relay_parent; let candidate_hash = receipt.hash(); - // This collation is not in the active-leaves set. - if !state.view.contains(&relay_parent) { - gum::warn!( + let per_relay_parent = match state.per_relay_parent.get_mut(&candidate_relay_parent) { + Some(per_relay_parent) => per_relay_parent, + None => { + gum::debug!( + target: LOG_TARGET, + para_id = %id, + candidate_relay_parent = %candidate_relay_parent, + candidate_hash = ?candidate_hash, + "Candidate relay parent is out of our view", + ); + return Ok(()) + }, + }; + let relay_parent_mode = per_relay_parent.prospective_parachains_mode; + + let collations_limit = match relay_parent_mode { + ProspectiveParachainsMode::Disabled => 1, + ProspectiveParachainsMode::Enabled => MAX_CANDIDATE_DEPTH + 1, + }; + + if per_relay_parent.collations.len() >= collations_limit { + gum::debug!( target: LOG_TARGET, - ?relay_parent, - "distribute collation message parent is outside of our view", + ?candidate_relay_parent, + ?relay_parent_mode, + "The limit of {} collations per relay parent is already reached", + collations_limit, ); - return Ok(()) } // We have already seen collation for this relay parent. - if state.collations.contains_key(&relay_parent) { + if per_relay_parent.collations.contains_key(&candidate_hash) { gum::debug!( target: LOG_TARGET, - ?relay_parent, - "Already seen collation for this relay parent", + ?candidate_relay_parent, + ?candidate_hash, + "Already seen this candidate", ); return Ok(()) } // Determine which core the para collated-on is assigned to. // If it is not scheduled then ignore the message. - let (our_core, num_cores) = match determine_core(ctx.sender(), id, relay_parent).await? { - Some(core) => core, - None => { - gum::warn!( - target: LOG_TARGET, - para_id = %id, - ?relay_parent, - "looks like no core is assigned to {} at {}", id, relay_parent, - ); + let (our_core, num_cores) = + match determine_core(ctx.sender(), id, candidate_relay_parent, relay_parent_mode).await? { + Some(core) => core, + None => { + gum::warn!( + target: LOG_TARGET, + para_id = %id, + "looks like no core is assigned to {} at {}", id, candidate_relay_parent, + ); - return Ok(()) - }, - }; + return Ok(()) + }, + }; // Determine the group on that core. + // + // When prospective parachains are disabled, candidate relay parent here is + // guaranteed to be an active leaf. let GroupValidators { validators, session_index, group_index } = - determine_our_validators(ctx, runtime, our_core, num_cores, relay_parent).await?; + determine_our_validators(ctx, runtime, our_core, num_cores, candidate_relay_parent).await?; if validators.is_empty() { gum::warn!( @@ -352,13 +402,13 @@ async fn distribute_collation( return Ok(()) } - // It's important to insert new collation bits **before** + // It's important to insert new collation interests **before** // issuing a connection request. // // If a validator managed to fetch all the relevant collations // but still assigned to our core, we keep the connection alive. state.validator_groups_buf.note_collation_advertised( - relay_parent, + candidate_hash, session_index, group_index, &validators, @@ -367,7 +417,8 @@ async fn distribute_collation( gum::debug!( target: LOG_TARGET, para_id = %id, - relay_parent = %relay_parent, + candidate_relay_parent = %candidate_relay_parent, + relay_parent_mode = ?relay_parent_mode, ?candidate_hash, pov_hash = ?pov.hash(), core = ?our_core, @@ -375,23 +426,56 @@ async fn distribute_collation( "Accepted collation, connecting to validators." ); - // Update a set of connected validators if necessary. - state.last_connected_at = connect_to_validators(ctx, &state.validator_groups_buf).await; + let validators_at_relay_parent = &mut per_relay_parent.validator_group.validators; + if validators_at_relay_parent.is_empty() { + *validators_at_relay_parent = validators; + } - state.our_validators_groups.insert(relay_parent, ValidatorGroup::new()); + // Update a set of connected validators if necessary. + state.reconnect_timeout = connect_to_validators(ctx, &state.validator_groups_buf).await; if let Some(result_sender) = result_sender { state.collation_result_senders.insert(candidate_hash, result_sender); } - state - .collations - .insert(relay_parent, Collation { receipt, pov, status: CollationStatus::Created }); + per_relay_parent.collations.insert( + candidate_hash, + Collation { receipt, parent_head_data_hash, pov, status: CollationStatus::Created }, + ); + + // If prospective parachains are disabled, a leaf should be known to peer. + // Otherwise, it should be present in allowed ancestry of some leaf. + // + // It's collation-producer responsibility to verify that there exists + // a hypothetical membership in a fragment tree for candidate. + let interested = + state + .peer_data + .iter() + .filter(|(_, PeerData { view: v, .. })| match relay_parent_mode { + ProspectiveParachainsMode::Disabled => v.contains(&candidate_relay_parent), + ProspectiveParachainsMode::Enabled => v.iter().any(|block_hash| { + state + .implicit_view + .known_allowed_relay_parents_under(block_hash, Some(id)) + .unwrap_or_default() + .contains(&candidate_relay_parent) + }), + }); - let interested = state.peers_interested_in_leaf(&relay_parent); // Make sure already connected peers get collations: - for peer_id in interested { - advertise_collation(ctx, state, relay_parent, peer_id).await; + for (peer_id, peer_data) in interested { + advertise_collation( + ctx, + candidate_relay_parent, + per_relay_parent, + peer_id, + peer_data.version, + &state.peer_ids, + &mut state.advertisement_timeouts, + &state.metrics, + ) + .await; } Ok(()) @@ -403,14 +487,26 @@ async fn determine_core( sender: &mut impl overseer::SubsystemSender, para_id: ParaId, relay_parent: Hash, + relay_parent_mode: ProspectiveParachainsMode, ) -> Result> { let cores = get_availability_cores(sender, relay_parent).await?; for (idx, core) in cores.iter().enumerate() { - if let CoreState::Scheduled(occupied) = core { - if occupied.para_id == para_id { - return Ok(Some(((idx as u32).into(), cores.len()))) - } + let core_para_id = match core { + CoreState::Scheduled(scheduled) => Some(scheduled.para_id), + CoreState::Occupied(occupied) => + if relay_parent_mode.is_enabled() { + // With async backing we don't care about the core state, + // it is only needed for figuring our validators group. + Some(occupied.candidate_descriptor.para_id) + } else { + None + }, + CoreState::Free => None, + }; + + if core_para_id == Some(para_id) { + return Ok(Some(((idx as u32).into(), cores.len()))) } } @@ -467,36 +563,62 @@ async fn determine_our_validators( Ok(current_validators) } -/// Issue a `Declare` collation message to the given `peer`. -#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)] -async fn declare(ctx: &mut Context, state: &mut State, peer: PeerId) { - let declare_signature_payload = protocol_v1::declare_signature_payload(&state.local_peer_id); - - if let Some(para_id) = state.collating_on { - let wire_message = protocol_v1::CollatorProtocolMessage::Declare( - state.collator_pair.public(), - para_id, - state.collator_pair.sign(&declare_signature_payload), - ); +/// Construct the declare message to be sent to validator depending on its +/// network protocol version. +fn declare_message( + state: &mut State, + version: CollationVersion, +) -> Option> { + let para_id = state.collating_on?; + Some(match version { + CollationVersion::V1 => { + let declare_signature_payload = + protocol_v1::declare_signature_payload(&state.local_peer_id); + let wire_message = protocol_v1::CollatorProtocolMessage::Declare( + state.collator_pair.public(), + para_id, + state.collator_pair.sign(&declare_signature_payload), + ); + Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol(wire_message)) + }, + CollationVersion::VStaging => { + let declare_signature_payload = + protocol_vstaging::declare_signature_payload(&state.local_peer_id); + let wire_message = protocol_vstaging::CollatorProtocolMessage::Declare( + state.collator_pair.public(), + para_id, + state.collator_pair.sign(&declare_signature_payload), + ); + Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol( + wire_message, + )) + }, + }) +} - ctx.send_message(NetworkBridgeTxMessage::SendCollationMessage( - vec![peer], - Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol(wire_message)), - )) - .await; +/// Issue versioned `Declare` collation message to the given `peer`. +#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)] +async fn declare( + ctx: &mut Context, + state: &mut State, + peer: &PeerId, + version: CollationVersion, +) { + if let Some(wire_message) = declare_message(state, version) { + ctx.send_message(NetworkBridgeTxMessage::SendCollationMessage(vec![*peer], wire_message)) + .await; } } /// Updates a set of connected validators based on their advertisement-bits /// in a validators buffer. /// -/// Returns current timestamp if the connection request was non-empty, `None` -/// otherwise. +/// Should be called again once a returned future resolves. #[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)] async fn connect_to_validators( ctx: &mut Context, validator_groups_buf: &ValidatorGroupsBuffer, -) -> Option { +) -> ReconnectTimeout { let validator_ids = validator_groups_buf.validators_to_connect(); let is_disconnect = validator_ids.is_empty(); @@ -510,69 +632,105 @@ async fn connect_to_validators( }) .await; - (!is_disconnect).then_some(Instant::now()) + if is_disconnect { + gum::trace!(target: LOG_TARGET, "Disconnecting from all peers"); + // Never resolves. + Fuse::terminated() + } else { + futures_timer::Delay::new(RECONNECT_TIMEOUT).fuse() + } } /// Advertise collation to the given `peer`. /// -/// This will only advertise a collation if there exists one for the given `relay_parent` and the given `peer` is -/// set as validator for our para at the given `relay_parent`. +/// This will only advertise a collation if there exists at least one for the given +/// `relay_parent` and the given `peer` is set as validator for our para at the given `relay_parent`. +/// +/// We also make sure not to advertise the same collation multiple times to the same validator. #[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)] async fn advertise_collation( ctx: &mut Context, - state: &mut State, relay_parent: Hash, - peer: PeerId, + per_relay_parent: &mut PerRelayParent, + peer: &PeerId, + protocol_version: CollationVersion, + peer_ids: &HashMap>, + advertisement_timeouts: &mut FuturesUnordered, + metrics: &Metrics, ) { - let should_advertise = state - .our_validators_groups - .get(&relay_parent) - .map(|g| g.should_advertise_to(&state.peer_ids, &peer)) - .unwrap_or(false); + for (candidate_hash, collation) in per_relay_parent.collations.iter_mut() { + // Check that peer will be able to request the collation. + if let CollationVersion::V1 = protocol_version { + if per_relay_parent.prospective_parachains_mode.is_enabled() { + gum::trace!( + target: LOG_TARGET, + ?relay_parent, + peer_id = %peer, + "Skipping advertising to validator, incorrect network protocol version", + ); + return + } + } - match (state.collations.get_mut(&relay_parent), should_advertise) { - (None, _) => { - gum::trace!( - target: LOG_TARGET, - ?relay_parent, - peer_id = %peer, - "No collation to advertise.", - ); - return - }, - (_, false) => { - gum::debug!( - target: LOG_TARGET, - ?relay_parent, - peer_id = %peer, - "Not advertising collation as we already advertised it to this validator.", - ); - return - }, - (Some(collation), true) => { + let should_advertise = + per_relay_parent + .validator_group + .should_advertise_to(candidate_hash, peer_ids, &peer); + + if !should_advertise { gum::debug!( target: LOG_TARGET, ?relay_parent, peer_id = %peer, - "Advertising collation.", + "Not advertising collation since validator is not interested", ); - collation.status.advance_to_advertised() - }, - } + continue + } - let wire_message = protocol_v1::CollatorProtocolMessage::AdvertiseCollation(relay_parent); + gum::debug!( + target: LOG_TARGET, + ?relay_parent, + peer_id = %peer, + "Advertising collation.", + ); + collation.status.advance_to_advertised(); + + let collation_message = match protocol_version { + CollationVersion::VStaging => { + let wire_message = protocol_vstaging::CollatorProtocolMessage::AdvertiseCollation { + relay_parent, + candidate_hash: *candidate_hash, + parent_head_data_hash: collation.parent_head_data_hash, + }; + Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol( + wire_message, + )) + }, + CollationVersion::V1 => { + let wire_message = + protocol_v1::CollatorProtocolMessage::AdvertiseCollation(relay_parent); + Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol(wire_message)) + }, + }; - ctx.send_message(NetworkBridgeTxMessage::SendCollationMessage( - vec![peer.clone()], - Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol(wire_message)), - )) - .await; + ctx.send_message(NetworkBridgeTxMessage::SendCollationMessage( + vec![peer.clone()], + collation_message, + )) + .await; - if let Some(validators) = state.our_validators_groups.get_mut(&relay_parent) { - validators.advertised_to_peer(&state.peer_ids, &peer); - } + per_relay_parent + .validator_group + .advertised_to_peer(candidate_hash, &peer_ids, peer); + + advertisement_timeouts.push(ResetInterestTimeout::new( + *candidate_hash, + *peer, + RESET_INTEREST_TIMEOUT, + )); - state.metrics.on_advertisment_made(); + metrics.on_advertisement_made(); + } } /// The main incoming message dispatching switch. @@ -589,12 +747,13 @@ async fn process_msg( CollateOn(id) => { state.collating_on = Some(id); }, - DistributeCollation(receipt, pov, result_sender) => { + DistributeCollation(receipt, parent_head_data_hash, pov, result_sender) => { let _span1 = state .span_per_relay_parent .get(&receipt.descriptor.relay_parent) .map(|s| s.child("distributing-collation")); let _span2 = jaeger::Span::new(&pov, "distributing-collation"); + match state.collating_on { Some(id) if receipt.descriptor.para_id != id => { // If the ParaId of a collation requested to be distributed does not match @@ -608,8 +767,17 @@ async fn process_msg( }, Some(id) => { let _ = state.metrics.time_collation_distribution("distribute"); - distribute_collation(ctx, runtime, state, id, receipt, pov, result_sender) - .await?; + distribute_collation( + ctx, + runtime, + state, + id, + receipt, + parent_head_data_hash, + pov, + result_sender, + ) + .await?; }, None => { gum::warn!( @@ -647,17 +815,20 @@ async fn process_msg( /// Issue a response to a previously requested collation. async fn send_collation( state: &mut State, - request: IncomingRequest, + request: VersionedCollationRequest, receipt: CandidateReceipt, pov: PoV, ) { let (tx, rx) = oneshot::channel(); - let relay_parent = request.payload.relay_parent; - let peer_id = request.peer; + let relay_parent = request.relay_parent(); + let peer_id = request.peer_id(); + let candidate_hash = receipt.hash(); + // The response payload is the same for both versions of protocol + // and doesn't have vstaging alias for simplicity. let response = OutgoingResponse { - result: Ok(CollationFetchingResponse::Collation(receipt, pov)), + result: Ok(request_v1::CollationFetchingResponse::Collation(receipt, pov)), reputation_changes: Vec::new(), sent_feedback: Some(tx), }; @@ -671,7 +842,7 @@ async fn send_collation( let r = rx.timeout(MAX_UNSHARED_UPLOAD_TIME).await; let timed_out = r.is_none(); - CollationSendResult { relay_parent, peer_id, timed_out } + CollationSendResult { relay_parent, candidate_hash, peer_id, timed_out } } .boxed(), ); @@ -686,12 +857,16 @@ async fn handle_incoming_peer_message( runtime: &mut RuntimeInfo, state: &mut State, origin: PeerId, - msg: protocol_v1::CollatorProtocolMessage, + msg: Versioned< + protocol_v1::CollatorProtocolMessage, + protocol_vstaging::CollatorProtocolMessage, + >, ) -> Result<()> { - use protocol_v1::CollatorProtocolMessage::*; + use protocol_v1::CollatorProtocolMessage as V1; + use protocol_vstaging::CollatorProtocolMessage as VStaging; match msg { - Declare(_, _, _) => { + Versioned::V1(V1::Declare(..)) | Versioned::VStaging(VStaging::Declare(..)) => { gum::trace!( target: LOG_TARGET, ?origin, @@ -702,24 +877,22 @@ async fn handle_incoming_peer_message( ctx.send_message(NetworkBridgeTxMessage::DisconnectPeer(origin, PeerSet::Collation)) .await; }, - AdvertiseCollation(_) => { + Versioned::V1(V1::AdvertiseCollation(_)) | + Versioned::VStaging(VStaging::AdvertiseCollation { .. }) => { gum::trace!( target: LOG_TARGET, ?origin, "AdvertiseCollation message is not expected on the collator side of the protocol", ); - ctx.send_message(NetworkBridgeTxMessage::ReportPeer( - origin.clone(), - COST_UNEXPECTED_MESSAGE, - )) - .await; + modify_reputation(ctx.sender(), origin, COST_UNEXPECTED_MESSAGE).await; // If we are advertised to, this is another collator, and we should disconnect. ctx.send_message(NetworkBridgeTxMessage::DisconnectPeer(origin, PeerSet::Collation)) .await; }, - CollationSeconded(relay_parent, statement) => { + Versioned::V1(V1::CollationSeconded(relay_parent, statement)) | + Versioned::VStaging(VStaging::CollationSeconded(relay_parent, statement)) => { if !matches!(statement.unchecked_payload(), Statement::Seconded(_)) { gum::warn!( target: LOG_TARGET, @@ -764,48 +937,82 @@ async fn handle_incoming_peer_message( async fn handle_incoming_request( ctx: &mut Context, state: &mut State, - req: IncomingRequest, + req: std::result::Result, ) -> Result<()> { + let req = req?; + let relay_parent = req.relay_parent(); + let peer_id = req.peer_id(); + let para_id = req.para_id(); + let _span = state .span_per_relay_parent - .get(&req.payload.relay_parent) + .get(&relay_parent) .map(|s| s.child("request-collation")); match state.collating_on { - Some(our_para_id) if our_para_id == req.payload.para_id => { - let (receipt, pov) = - if let Some(collation) = state.collations.get_mut(&req.payload.relay_parent) { - collation.status.advance_to_requested(); - (collation.receipt.clone(), collation.pov.clone()) - } else { + Some(our_para_id) if our_para_id == para_id => { + let per_relay_parent = match state.per_relay_parent.get_mut(&relay_parent) { + Some(per_relay_parent) => per_relay_parent, + None => { + gum::debug!( + target: LOG_TARGET, + relay_parent = %relay_parent, + "received a `RequestCollation` for a relay parent out of our view", + ); + + return Ok(()) + }, + }; + let mode = per_relay_parent.prospective_parachains_mode; + + let collation = match &req { + VersionedCollationRequest::V1(_) if !mode.is_enabled() => + per_relay_parent.collations.values_mut().next(), + VersionedCollationRequest::VStaging(req) => + per_relay_parent.collations.get_mut(&req.payload.candidate_hash), + _ => { gum::warn!( target: LOG_TARGET, - relay_parent = %req.payload.relay_parent, - "received a `RequestCollation` for a relay parent we don't have collation stored.", + relay_parent = %relay_parent, + prospective_parachains_mode = ?mode, + ?peer_id, + "Collation request version is invalid", ); return Ok(()) - }; + }, + }; + let (receipt, pov) = if let Some(collation) = collation { + collation.status.advance_to_requested(); + (collation.receipt.clone(), collation.pov.clone()) + } else { + gum::warn!( + target: LOG_TARGET, + relay_parent = %relay_parent, + "received a `RequestCollation` for a relay parent we don't have collation stored.", + ); + + return Ok(()) + }; state.metrics.on_collation_sent_requested(); let _span = _span.as_ref().map(|s| s.child("sending")); - let waiting = - state.waiting_collation_fetches.entry(req.payload.relay_parent).or_default(); + let waiting = state.waiting_collation_fetches.entry(relay_parent).or_default(); + let candidate_hash = receipt.hash(); - if !waiting.waiting_peers.insert(req.peer) { + if !waiting.waiting_peers.insert((peer_id, candidate_hash)) { gum::debug!( target: LOG_TARGET, "Dropping incoming request as peer has a request in flight already." ); - ctx.send_message(NetworkBridgeTxMessage::ReportPeer(req.peer, COST_APPARENT_FLOOD)) - .await; + modify_reputation(ctx.sender(), peer_id, COST_APPARENT_FLOOD).await; return Ok(()) } if waiting.collation_fetch_active { - waiting.waiting.push_back(req); + waiting.req_queue.push_back(req); } else { waiting.collation_fetch_active = true; // Obtain a timer for sending collation @@ -816,7 +1023,7 @@ async fn handle_incoming_request( Some(our_para_id) => { gum::warn!( target: LOG_TARGET, - for_para_id = %req.payload.para_id, + for_para_id = %para_id, our_para_id = %our_para_id, "received a `CollationFetchingRequest` for unexpected para_id", ); @@ -824,7 +1031,7 @@ async fn handle_incoming_request( None => { gum::warn!( target: LOG_TARGET, - for_para_id = %req.payload.para_id, + for_para_id = %para_id, "received a `RequestCollation` while not collating on any para", ); }, @@ -832,7 +1039,8 @@ async fn handle_incoming_request( Ok(()) } -/// Our view has changed. +/// Peer's view has changed. Send advertisements for new relay parents +/// if there're any. #[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)] async fn handle_peer_view_change( ctx: &mut Context, @@ -840,14 +1048,54 @@ async fn handle_peer_view_change( peer_id: PeerId, view: View, ) { - let current = state.peer_views.entry(peer_id.clone()).or_default(); + let PeerData { view: current, version } = match state.peer_data.get_mut(&peer_id) { + Some(peer_data) => peer_data, + None => return, + }; let added: Vec = view.difference(&*current).cloned().collect(); *current = view; for added in added.into_iter() { - advertise_collation(ctx, state, added, peer_id.clone()).await; + let block_hashes = match state + .per_relay_parent + .get(&added) + .map(|per_relay_parent| per_relay_parent.prospective_parachains_mode) + { + Some(ProspectiveParachainsMode::Disabled) => std::slice::from_ref(&added), + Some(ProspectiveParachainsMode::Enabled) => state + .implicit_view + .known_allowed_relay_parents_under(&added, state.collating_on) + .unwrap_or_default(), + None => { + gum::trace!( + target: LOG_TARGET, + ?peer_id, + new_leaf = ?added, + "New leaf in peer's view is unknown", + ); + continue + }, + }; + + for block_hash in block_hashes { + let per_relay_parent = match state.per_relay_parent.get_mut(block_hash) { + Some(per_relay_parent) => per_relay_parent, + None => continue, + }; + advertise_collation( + ctx, + *block_hash, + per_relay_parent, + &peer_id, + *version, + &state.peer_ids, + &mut state.advertisement_timeouts, + &state.metrics, + ) + .await; + } } } @@ -862,10 +1110,30 @@ async fn handle_network_msg( use NetworkBridgeEvent::*; match bridge_message { - PeerConnected(peer_id, observed_role, _, maybe_authority) => { + PeerConnected(peer_id, observed_role, protocol_version, maybe_authority) => { // If it is possible that a disconnected validator would attempt a reconnect // it should be handled here. gum::trace!(target: LOG_TARGET, ?peer_id, ?observed_role, "Peer connected"); + + let version = match protocol_version.try_into() { + Ok(version) => version, + Err(err) => { + // Network bridge is expected to handle this. + gum::error!( + target: LOG_TARGET, + ?peer_id, + ?observed_role, + ?err, + "Unsupported protocol version" + ); + return Ok(()) + }, + }; + state + .peer_data + .entry(peer_id) + .or_insert_with(|| PeerData { view: View::default(), version }); + if let Some(authority_ids) = maybe_authority { gum::trace!( target: LOG_TARGET, @@ -875,7 +1143,7 @@ async fn handle_network_msg( ); state.peer_ids.insert(peer_id, authority_ids); - declare(ctx, state, peer_id).await; + declare(ctx, state, &peer_id, version).await; } }, PeerViewChange(peer_id, view) => { @@ -884,17 +1152,16 @@ async fn handle_network_msg( }, PeerDisconnected(peer_id) => { gum::trace!(target: LOG_TARGET, ?peer_id, "Peer disconnected"); - state.peer_views.remove(&peer_id); + state.peer_data.remove(&peer_id); state.peer_ids.remove(&peer_id); }, OurViewChange(view) => { gum::trace!(target: LOG_TARGET, ?view, "Own view change"); - handle_our_view_change(state, view).await?; + handle_our_view_change(ctx.sender(), state, view).await?; }, - PeerMessage(remote, Versioned::V1(msg)) => { + PeerMessage(remote, msg) => { handle_incoming_peer_message(ctx, runtime, state, remote, msg).await?; }, - PeerMessage(_, Versioned::VStaging(_msg)) => {}, NewGossipTopology { .. } => { // impossible! }, @@ -904,42 +1171,99 @@ async fn handle_network_msg( } /// Handles our view changes. -async fn handle_our_view_change(state: &mut State, view: OurView) -> Result<()> { - for removed in state.view.difference(&view) { - gum::debug!(target: LOG_TARGET, relay_parent = ?removed, "Removing relay parent because our view changed."); +async fn handle_our_view_change( + sender: &mut Sender, + state: &mut State, + view: OurView, +) -> Result<()> +where + Sender: CollatorProtocolSenderTrait, +{ + let current_leaves = state.active_leaves.clone(); - if let Some(collation) = state.collations.remove(removed) { - state.collation_result_senders.remove(&collation.receipt.hash()); + let removed = current_leaves.iter().filter(|(h, _)| !view.contains(*h)); + let added = view.iter().filter(|h| !current_leaves.contains_key(h)); - match collation.status { - CollationStatus::Created => gum::warn!( - target: LOG_TARGET, - candidate_hash = ?collation.receipt.hash(), - pov_hash = ?collation.pov.hash(), - "Collation wasn't advertised to any validator.", - ), - CollationStatus::Advertised => gum::debug!( - target: LOG_TARGET, - candidate_hash = ?collation.receipt.hash(), - pov_hash = ?collation.pov.hash(), - "Collation was advertised but not requested by any validator.", - ), - CollationStatus::Requested => gum::debug!( - target: LOG_TARGET, - candidate_hash = ?collation.receipt.hash(), - pov_hash = ?collation.pov.hash(), - "Collation was requested.", - ), + for leaf in added { + let mode = prospective_parachains_mode(sender, *leaf).await?; + + if let Some(span) = view.span_per_head().get(leaf).cloned() { + let per_leaf_span = PerLeafSpan::new(span, "collator-side"); + state.span_per_relay_parent.insert(*leaf, per_leaf_span); + } + + state.active_leaves.insert(*leaf, mode); + state.per_relay_parent.insert(*leaf, PerRelayParent::new(mode)); + + if mode.is_enabled() { + state + .implicit_view + .activate_leaf(sender, *leaf) + .await + .map_err(Error::ImplicitViewFetchError)?; + + let allowed_ancestry = state + .implicit_view + .known_allowed_relay_parents_under(leaf, state.collating_on) + .unwrap_or_default(); + for block_hash in allowed_ancestry { + state + .per_relay_parent + .entry(*block_hash) + .or_insert_with(|| PerRelayParent::new(ProspectiveParachainsMode::Enabled)); } } - state.our_validators_groups.remove(removed); - state.span_per_relay_parent.remove(removed); - state.waiting_collation_fetches.remove(removed); - state.validator_groups_buf.remove_relay_parent(removed); } - state.view = view; - + for (leaf, mode) in removed { + state.active_leaves.remove(leaf); + // If the leaf is deactivated it still may stay in the view as a part + // of implicit ancestry. Only update the state after the hash is actually + // pruned from the block info storage. + let pruned = if mode.is_enabled() { + state.implicit_view.deactivate_leaf(*leaf) + } else { + vec![*leaf] + }; + + for removed in &pruned { + gum::debug!(target: LOG_TARGET, relay_parent = ?removed, "Removing relay parent because our view changed."); + + let collations = state + .per_relay_parent + .remove(removed) + .map(|per_relay_parent| per_relay_parent.collations) + .unwrap_or_default(); + for collation in collations.into_values() { + let candidate_hash = collation.receipt.hash(); + state.collation_result_senders.remove(&candidate_hash); + state.validator_groups_buf.remove_candidate(&candidate_hash); + + match collation.status { + CollationStatus::Created => gum::warn!( + target: LOG_TARGET, + candidate_hash = ?collation.receipt.hash(), + pov_hash = ?collation.pov.hash(), + "Collation wasn't advertised to any validator.", + ), + CollationStatus::Advertised => gum::debug!( + target: LOG_TARGET, + candidate_hash = ?collation.receipt.hash(), + pov_hash = ?collation.pov.hash(), + "Collation was advertised but not requested by any validator.", + ), + CollationStatus::Requested => gum::debug!( + target: LOG_TARGET, + candidate_hash = ?collation.receipt.hash(), + pov_hash = ?collation.pov.hash(), + "Collation was requested.", + ), + } + } + state.span_per_relay_parent.remove(removed); + state.waiting_collation_fetches.remove(removed); + } + } Ok(()) } @@ -949,7 +1273,8 @@ pub(crate) async fn run( mut ctx: Context, local_peer_id: PeerId, collator_pair: CollatorPair, - mut req_receiver: IncomingRequestReceiver, + mut req_v1_receiver: IncomingRequestReceiver, + mut req_v2_receiver: IncomingRequestReceiver, metrics: Metrics, ) -> std::result::Result<(), FatalError> { use OverseerSignal::*; @@ -957,12 +1282,14 @@ pub(crate) async fn run( let mut state = State::new(local_peer_id, collator_pair, metrics); let mut runtime = RuntimeInfo::new(None); - let reconnect_stream = super::tick_stream(RECONNECT_POLL); - pin_mut!(reconnect_stream); - loop { - let recv_req = req_receiver.recv(|| vec![COST_INVALID_REQUEST]).fuse(); - pin_mut!(recv_req); + let reputation_changes = || vec![COST_INVALID_REQUEST]; + let recv_req_v1 = req_v1_receiver.recv(reputation_changes).fuse(); + let recv_req_v2 = req_v2_receiver.recv(reputation_changes).fuse(); + pin_mut!(recv_req_v1); + pin_mut!(recv_req_v2); + + let mut reconnect_timeout = &mut state.reconnect_timeout; select! { msg = ctx.recv().fuse() => match msg.map_err(FatalError::SubsystemReceive)? { FromOrchestra::Communication { msg } => { @@ -975,28 +1302,30 @@ pub(crate) async fn run( FromOrchestra::Signal(BlockFinalized(..)) => {} FromOrchestra::Signal(Conclude) => return Ok(()), }, - CollationSendResult { - relay_parent, - peer_id, - timed_out, - } = state.active_collation_fetches.select_next_some() => { - if timed_out { - gum::debug!( - target: LOG_TARGET, - ?relay_parent, - ?peer_id, - "Sending collation to validator timed out, carrying on with next validator", - ); - } else { - for authority_id in state.peer_ids.get(&peer_id).into_iter().flatten() { - // Timeout not hit, this peer is no longer interested in this relay parent. - state.validator_groups_buf.reset_validator_interest(relay_parent, authority_id); + CollationSendResult { relay_parent, candidate_hash, peer_id, timed_out } = + state.active_collation_fetches.select_next_some() => { + let next = if let Some(waiting) = state.waiting_collation_fetches.get_mut(&relay_parent) { + if timed_out { + gum::debug!( + target: LOG_TARGET, + ?relay_parent, + ?peer_id, + ?candidate_hash, + "Sending collation to validator timed out, carrying on with next validator." + ); + // We try to throttle requests per relay parent to give validators + // more bandwidth, but if the collation is not received within the + // timeout, we simply start processing next request. + // The request it still alive, it should be kept in a waiting queue. + } else { + for authority_id in state.peer_ids.get(&peer_id).into_iter().flatten() { + // Timeout not hit, this peer is no longer interested in this relay parent. + state.validator_groups_buf.reset_validator_interest(candidate_hash, authority_id); + } + waiting.waiting_peers.remove(&(peer_id, candidate_hash)); } - } - let next = if let Some(waiting) = state.waiting_collation_fetches.get_mut(&relay_parent) { - waiting.waiting_peers.remove(&peer_id); - if let Some(next) = waiting.waiting.pop_front() { + if let Some(next) = waiting.req_queue.pop_front() { next } else { waiting.collation_fetch_active = false; @@ -1007,53 +1336,69 @@ pub(crate) async fn run( continue }; - if let Some(collation) = state.collations.get(&relay_parent) { + let next_collation = { + let per_relay_parent = match state.per_relay_parent.get(&relay_parent) { + Some(per_relay_parent) => per_relay_parent, + None => continue, + }; + + match (per_relay_parent.prospective_parachains_mode, &next) { + (ProspectiveParachainsMode::Disabled, VersionedCollationRequest::V1(_)) => { + per_relay_parent.collations.values().next() + }, + (ProspectiveParachainsMode::Enabled, VersionedCollationRequest::VStaging(req)) => { + per_relay_parent.collations.get(&req.payload.candidate_hash) + }, + _ => { + // Request version is checked in `handle_incoming_request`. + continue + }, + } + }; + + if let Some(collation) = next_collation { let receipt = collation.receipt.clone(); let pov = collation.pov.clone(); send_collation(&mut state, next, receipt, pov).await; } }, - _ = reconnect_stream.next() => { - let now = Instant::now(); - if state - .last_connected_at - .map_or(false, |timestamp| now - timestamp > RECONNECT_TIMEOUT) - { - // Remove all advertisements from the buffer if the timeout was hit. - // Usually, it shouldn't be necessary as leaves get deactivated, rather - // serves as a safeguard against finality lags. - state.validator_groups_buf.clear_advertisements(); - // Returns `None` if connection request is empty. - state.last_connected_at = - connect_to_validators(&mut ctx, &state.validator_groups_buf).await; - - gum::debug!( - target: LOG_TARGET, - timeout = ?RECONNECT_TIMEOUT, - "Timeout hit, sent a connection request. Disconnected from all validators = {}", - state.last_connected_at.is_none(), - ); + (candidate_hash, peer_id) = state.advertisement_timeouts.select_next_some() => { + // NOTE: it doesn't necessarily mean that a validator gets disconnected, + // it only will if there're no other advertisements we want to send. + // + // No-op if the collation was already fetched or went out of view. + for authority_id in state.peer_ids.get(&peer_id).into_iter().flatten() { + state + .validator_groups_buf + .reset_validator_interest(candidate_hash, &authority_id); } + } + _ = reconnect_timeout => { + state.reconnect_timeout = + connect_to_validators(&mut ctx, &state.validator_groups_buf).await; + + gum::trace!( + target: LOG_TARGET, + timeout = ?RECONNECT_TIMEOUT, + "Peer-set updated due to a timeout" + ); }, - in_req = recv_req => { - match in_req { - Ok(req) => { - log_error( - handle_incoming_request(&mut ctx, &mut state, req).await, - "Handling incoming request" - )?; - } - Err(error) => { - let jfyi = error.split().map_err(incoming::Error::from)?; - gum::debug!( - target: LOG_TARGET, - error = ?jfyi, - "Decoding incoming request failed" - ); - continue - } - } + in_req = recv_req_v1 => { + let request = in_req.map(VersionedCollationRequest::from); + + log_error( + handle_incoming_request(&mut ctx, &mut state, request).await, + "Handling incoming collation fetch request V1" + )?; + } + in_req = recv_req_v2 => { + let request = in_req.map(VersionedCollationRequest::from); + + log_error( + handle_incoming_request(&mut ctx, &mut state, request).await, + "Handling incoming collation fetch request VStaging" + )?; } } } diff --git a/node/network/collator-protocol/src/collator_side/tests.rs b/node/network/collator-protocol/src/collator_side/tests/mod.rs similarity index 74% rename from node/network/collator-protocol/src/collator_side/tests.rs rename to node/network/collator-protocol/src/collator_side/tests/mod.rs index c20a2d6c97a5..032d1782812e 100644 --- a/node/network/collator-protocol/src/collator_side/tests.rs +++ b/node/network/collator-protocol/src/collator_side/tests/mod.rs @@ -44,11 +44,15 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_primitives::v2::{ - AuthorityDiscoveryId, CollatorPair, GroupRotationInfo, ScheduledCore, SessionIndex, + AuthorityDiscoveryId, CollatorPair, GroupIndex, GroupRotationInfo, ScheduledCore, SessionIndex, SessionInfo, ValidatorId, ValidatorIndex, }; use polkadot_primitives_test_helpers::TestCandidateBuilder; +mod prospective_parachains; + +const API_VERSION_PROSPECTIVE_DISABLED: u32 = 2; + #[derive(Clone)] struct TestState { para_id: ParaId, @@ -184,6 +188,17 @@ impl TestState { )), ) .await; + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::Version(tx) + )) => { + assert_eq!(relay_parent, self.relay_parent); + tx.send(Ok(API_VERSION_PROSPECTIVE_DISABLED)).unwrap(); + } + ); } } @@ -191,7 +206,8 @@ type VirtualOverseer = test_helpers::TestSubsystemContextHandle>( @@ -212,15 +228,24 @@ fn test_harness>( let genesis_hash = Hash::repeat_byte(0xff); let req_protocol_names = ReqProtocolNames::new(&genesis_hash, None); - let (collation_req_receiver, req_cfg) = + let (collation_req_receiver, req_v1_cfg) = + IncomingRequest::get_config_receiver(&req_protocol_names); + let (collation_req_vstaging_receiver, req_vstaging_cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); let subsystem = async { - run(context, local_peer_id, collator_pair, collation_req_receiver, Default::default()) - .await - .unwrap(); + run( + context, + local_peer_id, + collator_pair, + collation_req_receiver, + collation_req_vstaging_receiver, + Default::default(), + ) + .await + .unwrap(); }; - let test_fut = test(TestHarness { virtual_overseer, req_cfg }); + let test_fut = test(TestHarness { virtual_overseer, req_v1_cfg, req_vstaging_cfg }); futures::pin_mut!(test_fut); futures::pin_mut!(subsystem); @@ -294,6 +319,17 @@ async fn setup_system(virtual_overseer: &mut VirtualOverseer, test_state: &TestS ])), ) .await; + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::Version(tx) + )) => { + assert_eq!(relay_parent, test_state.relay_parent); + tx.send(Ok(API_VERSION_PROSPECTIVE_DISABLED)).unwrap(); + } + ); } /// Result of [`distribute_collation`] @@ -302,29 +338,23 @@ struct DistributeCollation { pov_block: PoV, } -/// Create some PoV and distribute it. -async fn distribute_collation( +async fn distribute_collation_with_receipt( virtual_overseer: &mut VirtualOverseer, test_state: &TestState, - // whether or not we expect a connection request or not. + relay_parent: Hash, should_connect: bool, + candidate: CandidateReceipt, + pov: PoV, + parent_head_data_hash: Hash, ) -> DistributeCollation { - // Now we want to distribute a `PoVBlock` - let pov_block = PoV { block_data: BlockData(vec![42, 43, 44]) }; - - let pov_hash = pov_block.hash(); - - let candidate = TestCandidateBuilder { - para_id: test_state.para_id, - relay_parent: test_state.relay_parent, - pov_hash, - ..Default::default() - } - .build(); - overseer_send( virtual_overseer, - CollatorProtocolMessage::DistributeCollation(candidate.clone(), pov_block.clone(), None), + CollatorProtocolMessage::DistributeCollation( + candidate.clone(), + parent_head_data_hash, + pov.clone(), + None, + ), ) .await; @@ -332,10 +362,10 @@ async fn distribute_collation( assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( - relay_parent, + _relay_parent, RuntimeApiRequest::AvailabilityCores(tx) )) => { - assert_eq!(relay_parent, test_state.relay_parent); + assert_eq!(relay_parent, _relay_parent); tx.send(Ok(test_state.availability_cores.clone())).unwrap(); } ); @@ -347,7 +377,7 @@ async fn distribute_collation( relay_parent, RuntimeApiRequest::SessionIndexForChild(tx), )) => { - assert_eq!(relay_parent, test_state.relay_parent); + assert_eq!(relay_parent, relay_parent); tx.send(Ok(test_state.current_session_index())).unwrap(); }, @@ -355,17 +385,17 @@ async fn distribute_collation( relay_parent, RuntimeApiRequest::SessionInfo(index, tx), )) => { - assert_eq!(relay_parent, test_state.relay_parent); + assert_eq!(relay_parent, relay_parent); assert_eq!(index, test_state.current_session_index()); tx.send(Ok(Some(test_state.session_info.clone()))).unwrap(); }, AllMessages::RuntimeApi(RuntimeApiMessage::Request( - relay_parent, + _relay_parent, RuntimeApiRequest::ValidatorGroups(tx), )) => { - assert_eq!(relay_parent, test_state.relay_parent); + assert_eq!(_relay_parent, relay_parent); tx.send(Ok(( test_state.session_info.validator_groups.clone(), test_state.group_rotation_info.clone(), @@ -389,13 +419,48 @@ async fn distribute_collation( ); } - DistributeCollation { candidate, pov_block } + DistributeCollation { candidate, pov_block: pov } +} + +/// Create some PoV and distribute it. +async fn distribute_collation( + virtual_overseer: &mut VirtualOverseer, + test_state: &TestState, + relay_parent: Hash, + // whether or not we expect a connection request or not. + should_connect: bool, +) -> DistributeCollation { + // Now we want to distribute a `PoVBlock` + let pov_block = PoV { block_data: BlockData(vec![42, 43, 44]) }; + + let pov_hash = pov_block.hash(); + let parent_head_data_hash = Hash::zero(); + + let candidate = TestCandidateBuilder { + para_id: test_state.para_id, + relay_parent, + pov_hash, + ..Default::default() + } + .build(); + + distribute_collation_with_receipt( + virtual_overseer, + test_state, + relay_parent, + should_connect, + candidate, + pov_block, + parent_head_data_hash, + ) + .await } /// Connect a peer async fn connect_peer( virtual_overseer: &mut VirtualOverseer, peer: PeerId, + version: CollationVersion, authority_id: Option, ) { overseer_send( @@ -403,7 +468,7 @@ async fn connect_peer( CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerConnected( peer.clone(), polkadot_node_network_protocol::ObservedRole::Authority, - CollationVersion::V1.into(), + version.into(), authority_id.map(|v| HashSet::from([v])), )), ) @@ -463,30 +528,65 @@ async fn expect_declare_msg( } /// Check that the next received message is a collation advertisement message. +/// +/// Expects vstaging message if `expected_candidate_hashes` is `Some`, v1 otherwise. async fn expect_advertise_collation_msg( virtual_overseer: &mut VirtualOverseer, peer: &PeerId, expected_relay_parent: Hash, + expected_candidate_hashes: Option>, ) { - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::NetworkBridgeTx( - NetworkBridgeTxMessage::SendCollationMessage( - to, - Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol(wire_message)), - ) - ) => { - assert_eq!(to[0], *peer); - assert_matches!( - wire_message, - protocol_v1::CollatorProtocolMessage::AdvertiseCollation( - relay_parent, - ) => { - assert_eq!(relay_parent, expected_relay_parent); + let mut candidate_hashes: Option> = + expected_candidate_hashes.map(|hashes| hashes.into_iter().collect()); + let iter_num = candidate_hashes.as_ref().map(|hashes| hashes.len()).unwrap_or(1); + + for _ in 0..iter_num { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::NetworkBridgeTx( + NetworkBridgeTxMessage::SendCollationMessage( + to, + wire_message, + ) + ) => { + assert_eq!(to[0], *peer); + match (candidate_hashes.as_mut(), wire_message) { + (None, Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol(wire_message))) => { + assert_matches!( + wire_message, + protocol_v1::CollatorProtocolMessage::AdvertiseCollation( + relay_parent, + ) => { + assert_eq!(relay_parent, expected_relay_parent); + } + ); + }, + ( + Some(candidate_hashes), + Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol( + wire_message, + )), + ) => { + assert_matches!( + wire_message, + protocol_vstaging::CollatorProtocolMessage::AdvertiseCollation { + relay_parent, + candidate_hash, + .. + } => { + assert_eq!(relay_parent, expected_relay_parent); + assert!(candidate_hashes.contains(&candidate_hash)); + + // Drop the hash we've already seen. + candidate_hashes.remove(&candidate_hash); + } + ); + }, + _ => panic!("Invalid advertisement"), } - ); - } - ); + } + ); + } } /// Send a message that the given peer's view changed. @@ -513,19 +613,27 @@ fn advertise_and_send_collation() { test_harness(local_peer_id, collator_pair, |test_harness| async move { let mut virtual_overseer = test_harness.virtual_overseer; - let mut req_cfg = test_harness.req_cfg; + let mut req_v1_cfg = test_harness.req_v1_cfg; + let req_vstaging_cfg = test_harness.req_vstaging_cfg; setup_system(&mut virtual_overseer, &test_state).await; let DistributeCollation { candidate, pov_block } = - distribute_collation(&mut virtual_overseer, &test_state, true).await; + distribute_collation(&mut virtual_overseer, &test_state, test_state.relay_parent, true) + .await; for (val, peer) in test_state .current_group_validator_authority_ids() .into_iter() .zip(test_state.current_group_validator_peer_ids()) { - connect_peer(&mut virtual_overseer, peer.clone(), Some(val.clone())).await; + connect_peer( + &mut virtual_overseer, + peer.clone(), + CollationVersion::V1, + Some(val.clone()), + ) + .await; } // We declare to the connected validators that we are a collator. @@ -542,17 +650,18 @@ fn advertise_and_send_collation() { // The peer is interested in a leaf that we have a collation for; // advertise it. - expect_advertise_collation_msg(&mut virtual_overseer, &peer, test_state.relay_parent).await; + expect_advertise_collation_msg(&mut virtual_overseer, &peer, test_state.relay_parent, None) + .await; // Request a collation. let (pending_response, rx) = oneshot::channel(); - req_cfg + req_v1_cfg .inbound_queue .as_mut() .unwrap() .send(RawIncomingRequest { peer, - payload: CollationFetchingRequest { + payload: request_v1::CollationFetchingRequest { relay_parent: test_state.relay_parent, para_id: test_state.para_id, } @@ -565,13 +674,13 @@ fn advertise_and_send_collation() { { let (pending_response, rx) = oneshot::channel(); - req_cfg + req_v1_cfg .inbound_queue .as_mut() .unwrap() .send(RawIncomingRequest { peer, - payload: CollationFetchingRequest { + payload: request_v1::CollationFetchingRequest { relay_parent: test_state.relay_parent, para_id: test_state.para_id, } @@ -596,8 +705,8 @@ fn advertise_and_send_collation() { assert_matches!( rx.await, Ok(full_response) => { - let CollationFetchingResponse::Collation(receipt, pov): CollationFetchingResponse - = CollationFetchingResponse::decode( + let request_v1::CollationFetchingResponse::Collation(receipt, pov): request_v1::CollationFetchingResponse + = request_v1::CollationFetchingResponse::decode( &mut full_response.result .expect("We should have a proper answer").as_ref() ) @@ -615,13 +724,13 @@ fn advertise_and_send_collation() { // Re-request a collation. let (pending_response, rx) = oneshot::channel(); - req_cfg + req_v1_cfg .inbound_queue .as_mut() .unwrap() .send(RawIncomingRequest { peer, - payload: CollationFetchingRequest { + payload: request_v1::CollationFetchingRequest { relay_parent: old_relay_parent, para_id: test_state.para_id, } @@ -635,7 +744,8 @@ fn advertise_and_send_collation() { assert!(overseer_recv_with_timeout(&mut virtual_overseer, TIMEOUT).await.is_none()); - distribute_collation(&mut virtual_overseer, &test_state, true).await; + distribute_collation(&mut virtual_overseer, &test_state, test_state.relay_parent, true) + .await; // Send info about peer's view. overseer_send( @@ -647,8 +757,87 @@ fn advertise_and_send_collation() { ) .await; - expect_advertise_collation_msg(&mut virtual_overseer, &peer, test_state.relay_parent).await; - TestHarness { virtual_overseer, req_cfg } + expect_advertise_collation_msg(&mut virtual_overseer, &peer, test_state.relay_parent, None) + .await; + TestHarness { virtual_overseer, req_v1_cfg, req_vstaging_cfg } + }); +} + +/// Tests that collator side works with vstaging network protocol +/// before async backing is enabled. +#[test] +fn advertise_collation_vstaging_protocol() { + let test_state = TestState::default(); + let local_peer_id = test_state.local_peer_id.clone(); + let collator_pair = test_state.collator_pair.clone(); + + test_harness(local_peer_id, collator_pair, |mut test_harness| async move { + let virtual_overseer = &mut test_harness.virtual_overseer; + + setup_system(virtual_overseer, &test_state).await; + + let DistributeCollation { candidate, .. } = + distribute_collation(virtual_overseer, &test_state, test_state.relay_parent, true) + .await; + + let validators = test_state.current_group_validator_authority_ids(); + assert!(validators.len() >= 2); + let peer_ids = test_state.current_group_validator_peer_ids(); + + // Connect first peer with v1. + connect_peer( + virtual_overseer, + peer_ids[0], + CollationVersion::V1, + Some(validators[0].clone()), + ) + .await; + // The rest with vstaging. + for (val, peer) in validators.iter().zip(peer_ids.iter()).skip(1) { + connect_peer( + virtual_overseer, + peer.clone(), + CollationVersion::VStaging, + Some(val.clone()), + ) + .await; + } + + // Declare messages. + expect_declare_msg(virtual_overseer, &test_state, &peer_ids[0]).await; + for peer_id in peer_ids.iter().skip(1) { + prospective_parachains::expect_declare_msg_vstaging( + virtual_overseer, + &test_state, + &peer_id, + ) + .await; + } + + // Send info about peers view. + for peer in peer_ids.iter() { + send_peer_view_change(virtual_overseer, peer, vec![test_state.relay_parent]).await; + } + + // Versioned advertisements work. + expect_advertise_collation_msg( + virtual_overseer, + &peer_ids[0], + test_state.relay_parent, + None, + ) + .await; + for peer_id in peer_ids.iter().skip(1) { + expect_advertise_collation_msg( + virtual_overseer, + peer_id, + test_state.relay_parent, + Some(vec![candidate.hash()]), // This is `Some`, advertisement is vstaging. + ) + .await; + } + + test_harness }); } @@ -688,7 +877,13 @@ fn collators_declare_to_connected_peers() { setup_system(&mut test_harness.virtual_overseer, &test_state).await; // A validator connected to us - connect_peer(&mut test_harness.virtual_overseer, peer.clone(), Some(validator_id)).await; + connect_peer( + &mut test_harness.virtual_overseer, + peer.clone(), + CollationVersion::V1, + Some(validator_id), + ) + .await; expect_declare_msg(&mut test_harness.virtual_overseer, &test_state, &peer).await; test_harness }) @@ -712,10 +907,12 @@ fn collations_are_only_advertised_to_validators_with_correct_view() { setup_system(virtual_overseer, &test_state).await; // A validator connected to us - connect_peer(virtual_overseer, peer.clone(), Some(validator_id)).await; + connect_peer(virtual_overseer, peer.clone(), CollationVersion::V1, Some(validator_id)) + .await; // Connect the second validator - connect_peer(virtual_overseer, peer2.clone(), Some(validator_id2)).await; + connect_peer(virtual_overseer, peer2.clone(), CollationVersion::V1, Some(validator_id2)) + .await; expect_declare_msg(virtual_overseer, &test_state, &peer).await; expect_declare_msg(virtual_overseer, &test_state, &peer2).await; @@ -723,15 +920,17 @@ fn collations_are_only_advertised_to_validators_with_correct_view() { // And let it tell us that it is has the same view. send_peer_view_change(virtual_overseer, &peer2, vec![test_state.relay_parent]).await; - distribute_collation(virtual_overseer, &test_state, true).await; + distribute_collation(virtual_overseer, &test_state, test_state.relay_parent, true).await; - expect_advertise_collation_msg(virtual_overseer, &peer2, test_state.relay_parent).await; + expect_advertise_collation_msg(virtual_overseer, &peer2, test_state.relay_parent, None) + .await; // The other validator announces that it changed its view. send_peer_view_change(virtual_overseer, &peer, vec![test_state.relay_parent]).await; // After changing the view we should receive the advertisement - expect_advertise_collation_msg(virtual_overseer, &peer, test_state.relay_parent).await; + expect_advertise_collation_msg(virtual_overseer, &peer, test_state.relay_parent, None) + .await; test_harness }) } @@ -754,29 +953,32 @@ fn collate_on_two_different_relay_chain_blocks() { setup_system(virtual_overseer, &test_state).await; // A validator connected to us - connect_peer(virtual_overseer, peer.clone(), Some(validator_id)).await; + connect_peer(virtual_overseer, peer.clone(), CollationVersion::V1, Some(validator_id)) + .await; // Connect the second validator - connect_peer(virtual_overseer, peer2.clone(), Some(validator_id2)).await; + connect_peer(virtual_overseer, peer2.clone(), CollationVersion::V1, Some(validator_id2)) + .await; expect_declare_msg(virtual_overseer, &test_state, &peer).await; expect_declare_msg(virtual_overseer, &test_state, &peer2).await; - distribute_collation(virtual_overseer, &test_state, true).await; + distribute_collation(virtual_overseer, &test_state, test_state.relay_parent, true).await; let old_relay_parent = test_state.relay_parent; // Advance to a new round, while informing the subsystem that the old and the new relay parent are active. test_state.advance_to_new_round(virtual_overseer, true).await; - distribute_collation(virtual_overseer, &test_state, true).await; + distribute_collation(virtual_overseer, &test_state, test_state.relay_parent, true).await; send_peer_view_change(virtual_overseer, &peer, vec![old_relay_parent]).await; - expect_advertise_collation_msg(virtual_overseer, &peer, old_relay_parent).await; + expect_advertise_collation_msg(virtual_overseer, &peer, old_relay_parent, None).await; send_peer_view_change(virtual_overseer, &peer2, vec![test_state.relay_parent]).await; - expect_advertise_collation_msg(virtual_overseer, &peer2, test_state.relay_parent).await; + expect_advertise_collation_msg(virtual_overseer, &peer2, test_state.relay_parent, None) + .await; test_harness }) } @@ -796,17 +998,25 @@ fn validator_reconnect_does_not_advertise_a_second_time() { setup_system(virtual_overseer, &test_state).await; // A validator connected to us - connect_peer(virtual_overseer, peer.clone(), Some(validator_id.clone())).await; + connect_peer( + virtual_overseer, + peer.clone(), + CollationVersion::V1, + Some(validator_id.clone()), + ) + .await; expect_declare_msg(virtual_overseer, &test_state, &peer).await; - distribute_collation(virtual_overseer, &test_state, true).await; + distribute_collation(virtual_overseer, &test_state, test_state.relay_parent, true).await; send_peer_view_change(virtual_overseer, &peer, vec![test_state.relay_parent]).await; - expect_advertise_collation_msg(virtual_overseer, &peer, test_state.relay_parent).await; + expect_advertise_collation_msg(virtual_overseer, &peer, test_state.relay_parent, None) + .await; // Disconnect and reconnect directly disconnect_peer(virtual_overseer, peer.clone()).await; - connect_peer(virtual_overseer, peer.clone(), Some(validator_id)).await; + connect_peer(virtual_overseer, peer.clone(), CollationVersion::V1, Some(validator_id)) + .await; expect_declare_msg(virtual_overseer, &test_state, &peer).await; send_peer_view_change(virtual_overseer, &peer, vec![test_state.relay_parent]).await; @@ -832,7 +1042,8 @@ fn collators_reject_declare_messages() { setup_system(virtual_overseer, &test_state).await; // A validator connected to us - connect_peer(virtual_overseer, peer.clone(), Some(validator_id)).await; + connect_peer(virtual_overseer, peer.clone(), CollationVersion::V1, Some(validator_id)) + .await; expect_declare_msg(virtual_overseer, &test_state, &peer).await; overseer_send( @@ -879,19 +1090,21 @@ where test_harness(local_peer_id, collator_pair, |mut test_harness| async move { let virtual_overseer = &mut test_harness.virtual_overseer; - let req_cfg = &mut test_harness.req_cfg; + let req_cfg = &mut test_harness.req_v1_cfg; setup_system(virtual_overseer, &test_state).await; let DistributeCollation { candidate, pov_block } = - distribute_collation(virtual_overseer, &test_state, true).await; + distribute_collation(virtual_overseer, &test_state, test_state.relay_parent, true) + .await; for (val, peer) in test_state .current_group_validator_authority_ids() .into_iter() .zip(test_state.current_group_validator_peer_ids()) { - connect_peer(virtual_overseer, peer.clone(), Some(val.clone())).await; + connect_peer(virtual_overseer, peer.clone(), CollationVersion::V1, Some(val.clone())) + .await; } // We declare to the connected validators that we are a collator. @@ -910,10 +1123,20 @@ where // The peer is interested in a leaf that we have a collation for; // advertise it. - expect_advertise_collation_msg(virtual_overseer, &validator_0, test_state.relay_parent) - .await; - expect_advertise_collation_msg(virtual_overseer, &validator_1, test_state.relay_parent) - .await; + expect_advertise_collation_msg( + virtual_overseer, + &validator_0, + test_state.relay_parent, + None, + ) + .await; + expect_advertise_collation_msg( + virtual_overseer, + &validator_1, + test_state.relay_parent, + None, + ) + .await; // Request a collation. let (pending_response, rx) = oneshot::channel(); @@ -923,7 +1146,7 @@ where .unwrap() .send(RawIncomingRequest { peer: validator_0, - payload: CollationFetchingRequest { + payload: request_v1::CollationFetchingRequest { relay_parent: test_state.relay_parent, para_id: test_state.para_id, } @@ -937,8 +1160,8 @@ where let feedback_tx = assert_matches!( rx.await, Ok(full_response) => { - let CollationFetchingResponse::Collation(receipt, pov): CollationFetchingResponse - = CollationFetchingResponse::decode( + let request_v1::CollationFetchingResponse::Collation(receipt, pov): request_v1::CollationFetchingResponse + = request_v1::CollationFetchingResponse::decode( &mut full_response.result .expect("We should have a proper answer").as_ref() ) @@ -958,7 +1181,7 @@ where .unwrap() .send(RawIncomingRequest { peer: validator_1, - payload: CollationFetchingRequest { + payload: request_v1::CollationFetchingRequest { relay_parent: test_state.relay_parent, para_id: test_state.para_id, } @@ -974,8 +1197,8 @@ where assert_matches!( rx.await, Ok(full_response) => { - let CollationFetchingResponse::Collation(receipt, pov): CollationFetchingResponse - = CollationFetchingResponse::decode( + let request_v1::CollationFetchingResponse::Collation(receipt, pov): request_v1::CollationFetchingResponse + = request_v1::CollationFetchingResponse::decode( &mut full_response.result .expect("We should have a proper answer").as_ref() ) @@ -999,7 +1222,8 @@ fn connect_to_buffered_groups() { test_harness(local_peer_id, collator_pair, |test_harness| async move { let mut virtual_overseer = test_harness.virtual_overseer; - let mut req_cfg = test_harness.req_cfg; + let mut req_cfg = test_harness.req_v1_cfg; + let req_vstaging_cfg = test_harness.req_vstaging_cfg; setup_system(&mut virtual_overseer, &test_state).await; @@ -1007,7 +1231,8 @@ fn connect_to_buffered_groups() { let peers_a = test_state.current_group_validator_peer_ids(); assert!(group_a.len() > 1); - distribute_collation(&mut virtual_overseer, &test_state, false).await; + distribute_collation(&mut virtual_overseer, &test_state, test_state.relay_parent, false) + .await; assert_matches!( overseer_recv(&mut virtual_overseer).await, @@ -1021,7 +1246,13 @@ fn connect_to_buffered_groups() { let head_a = test_state.relay_parent; for (val, peer) in group_a.iter().zip(&peers_a) { - connect_peer(&mut virtual_overseer, peer.clone(), Some(val.clone())).await; + connect_peer( + &mut virtual_overseer, + peer.clone(), + CollationVersion::V1, + Some(val.clone()), + ) + .await; } for peer_id in &peers_a { @@ -1031,7 +1262,7 @@ fn connect_to_buffered_groups() { // Update views. for peed_id in &peers_a { send_peer_view_change(&mut virtual_overseer, peed_id, vec![head_a]).await; - expect_advertise_collation_msg(&mut virtual_overseer, peed_id, head_a).await; + expect_advertise_collation_msg(&mut virtual_overseer, peed_id, head_a, None).await; } let peer = peers_a[0]; @@ -1043,7 +1274,7 @@ fn connect_to_buffered_groups() { .unwrap() .send(RawIncomingRequest { peer, - payload: CollationFetchingRequest { + payload: request_v1::CollationFetchingRequest { relay_parent: head_a, para_id: test_state.para_id, } @@ -1055,14 +1286,17 @@ fn connect_to_buffered_groups() { assert_matches!( rx.await, Ok(full_response) => { - let CollationFetchingResponse::Collation(..): CollationFetchingResponse = - CollationFetchingResponse::decode( + let request_v1::CollationFetchingResponse::Collation(..) = + request_v1::CollationFetchingResponse::decode( &mut full_response.result.expect("We should have a proper answer").as_ref(), ) .expect("Decoding should work"); } ); + // Let the subsystem process process the collation event. + test_helpers::Yield::new().await; + test_state.advance_to_new_round(&mut virtual_overseer, true).await; test_state.group_rotation_info = test_state.group_rotation_info.bump_rotation(); @@ -1071,7 +1305,8 @@ fn connect_to_buffered_groups() { assert_ne!(head_a, head_b); assert_ne!(group_a, group_b); - distribute_collation(&mut virtual_overseer, &test_state, false).await; + distribute_collation(&mut virtual_overseer, &test_state, test_state.relay_parent, false) + .await; // Should be connected to both groups except for the validator that fetched advertised // collation. @@ -1088,6 +1323,6 @@ fn connect_to_buffered_groups() { } ); - TestHarness { virtual_overseer, req_cfg } + TestHarness { virtual_overseer, req_v1_cfg: req_cfg, req_vstaging_cfg } }); } diff --git a/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs b/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs new file mode 100644 index 000000000000..d98db5b8eb82 --- /dev/null +++ b/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs @@ -0,0 +1,562 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Tests for the collator side with enabled prospective parachains. + +use super::*; + +use polkadot_node_subsystem::messages::{ChainApiMessage, ProspectiveParachainsMessage}; +use polkadot_primitives::v2::{Header, OccupiedCore}; + +const ALLOWED_ANCESTRY: u32 = 3; + +fn get_parent_hash(hash: Hash) -> Hash { + Hash::from_low_u64_be(hash.to_low_u64_be() + 1) +} + +/// Handle a view update. +async fn update_view( + virtual_overseer: &mut VirtualOverseer, + test_state: &TestState, + new_view: Vec<(Hash, u32)>, // Hash and block number. + activated: u8, // How many new heads does this update contain? +) { + let new_view: HashMap = HashMap::from_iter(new_view); + + let our_view = + OurView::new(new_view.keys().map(|hash| (*hash, Arc::new(jaeger::Span::Disabled))), 0); + + overseer_send( + virtual_overseer, + CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange(our_view)), + ) + .await; + + let mut next_overseer_message = None; + for _ in 0..activated { + let (leaf_hash, leaf_number) = assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::Version(tx), + )) => { + tx.send(Ok(RuntimeApiRequest::VALIDITY_CONSTRAINTS)).unwrap(); + (parent, new_view.get(&parent).copied().expect("Unknown parent requested")) + } + ); + + let min_number = leaf_number.saturating_sub(ALLOWED_ANCESTRY); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetMinimumRelayParents(parent, tx), + ) if parent == leaf_hash => { + tx.send(vec![(test_state.para_id, min_number)]).unwrap(); + } + ); + + let ancestry_len = leaf_number + 1 - min_number; + let ancestry_hashes = std::iter::successors(Some(leaf_hash), |h| Some(get_parent_hash(*h))) + .take(ancestry_len as usize); + let ancestry_numbers = (min_number..=leaf_number).rev(); + let mut ancestry_iter = ancestry_hashes.clone().zip(ancestry_numbers).peekable(); + + loop { + let (hash, number) = match ancestry_iter.next() { + Some((hash, number)) => (hash, number), + None => break, + }; + + // May be `None` for the last element. + let parent_hash = + ancestry_iter.peek().map(|(h, _)| *h).unwrap_or_else(|| get_parent_hash(hash)); + + let msg = match next_overseer_message.take() { + Some(msg) => Some(msg), + None => + overseer_recv_with_timeout(virtual_overseer, Duration::from_millis(50)).await, + }; + + let msg = match msg { + Some(msg) => msg, + None => { + // We're done. + return + }, + }; + + if !matches!( + &msg, + AllMessages::ChainApi(ChainApiMessage::BlockHeader(_hash, ..)) + if *_hash == hash + ) { + // Ancestry has already been cached for this leaf. + next_overseer_message.replace(msg); + break + } + + assert_matches!( + msg, + AllMessages::ChainApi(ChainApiMessage::BlockHeader(.., tx)) => { + let header = Header { + parent_hash, + number, + state_root: Hash::zero(), + extrinsics_root: Hash::zero(), + digest: Default::default(), + }; + + tx.send(Ok(Some(header))).unwrap(); + } + ); + } + } +} + +/// Check that the next received message is a `Declare` message. +pub(super) async fn expect_declare_msg_vstaging( + virtual_overseer: &mut VirtualOverseer, + test_state: &TestState, + peer: &PeerId, +) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendCollationMessage( + to, + Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol( + wire_message, + )), + )) => { + assert_eq!(to[0], *peer); + assert_matches!( + wire_message, + protocol_vstaging::CollatorProtocolMessage::Declare( + collator_id, + para_id, + signature, + ) => { + assert!(signature.verify( + &*protocol_vstaging::declare_signature_payload(&test_state.local_peer_id), + &collator_id), + ); + assert_eq!(collator_id, test_state.collator_pair.public()); + assert_eq!(para_id, test_state.para_id); + } + ); + } + ); +} + +/// Test that a collator distributes a collation from the allowed ancestry +/// to correct validators group. +#[test] +fn distribute_collation_from_implicit_view() { + let head_a = Hash::from_low_u64_be(126); + let head_a_num: u32 = 66; + + // Grandparent of head `a`. + let head_b = Hash::from_low_u64_be(128); + let head_b_num: u32 = 64; + + // Grandparent of head `b`. + let head_c = Hash::from_low_u64_be(130); + let head_c_num = 62; + + let group_rotation_info = GroupRotationInfo { + session_start_block: head_c_num - 2, + group_rotation_frequency: 3, + now: head_c_num, + }; + + let mut test_state = TestState::default(); + test_state.group_rotation_info = group_rotation_info; + + let local_peer_id = test_state.local_peer_id; + let collator_pair = test_state.collator_pair.clone(); + + test_harness(local_peer_id, collator_pair, |mut test_harness| async move { + let virtual_overseer = &mut test_harness.virtual_overseer; + + // Set collating para id. + overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id)) + .await; + // Activated leaf is `b`, but the collation will be based on `c`. + update_view(virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + + let validator_peer_ids = test_state.current_group_validator_peer_ids(); + for (val, peer) in test_state + .current_group_validator_authority_ids() + .into_iter() + .zip(validator_peer_ids.clone()) + { + connect_peer( + virtual_overseer, + peer.clone(), + CollationVersion::VStaging, + Some(val.clone()), + ) + .await; + } + + // Collator declared itself to each peer. + for peer_id in &validator_peer_ids { + expect_declare_msg_vstaging(virtual_overseer, &test_state, peer_id).await; + } + + let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; + let parent_head_data_hash = Hash::repeat_byte(0xAA); + let candidate = TestCandidateBuilder { + para_id: test_state.para_id, + relay_parent: head_c, + pov_hash: pov.hash(), + ..Default::default() + } + .build(); + let DistributeCollation { candidate, pov_block: _ } = distribute_collation_with_receipt( + virtual_overseer, + &test_state, + head_c, + false, // Check the group manually. + candidate, + pov, + parent_head_data_hash, + ) + .await; + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::NetworkBridgeTx( + NetworkBridgeTxMessage::ConnectToValidators { validator_ids, .. } + ) => { + let expected_validators = test_state.current_group_validator_authority_ids(); + + assert_eq!(expected_validators, validator_ids); + } + ); + + let candidate_hash = candidate.hash(); + + // Update peer views. + for peed_id in &validator_peer_ids { + send_peer_view_change(virtual_overseer, peed_id, vec![head_b]).await; + expect_advertise_collation_msg( + virtual_overseer, + peed_id, + head_c, + Some(vec![candidate_hash]), + ) + .await; + } + + // Head `c` goes out of view. + // Build a different candidate for this relay parent and attempt to distribute it. + update_view(virtual_overseer, &test_state, vec![(head_a, head_a_num)], 1).await; + + let pov = PoV { block_data: BlockData(vec![4, 5, 6]) }; + let parent_head_data_hash = Hash::repeat_byte(0xBB); + let candidate = TestCandidateBuilder { + para_id: test_state.para_id, + relay_parent: head_c, + pov_hash: pov.hash(), + ..Default::default() + } + .build(); + overseer_send( + virtual_overseer, + CollatorProtocolMessage::DistributeCollation( + candidate.clone(), + parent_head_data_hash, + pov.clone(), + None, + ), + ) + .await; + + // Parent out of view, nothing happens. + assert!(overseer_recv_with_timeout(virtual_overseer, Duration::from_millis(100)) + .await + .is_none()); + + test_harness + }) +} + +/// Tests that collator can distribute up to `MAX_CANDIDATE_DEPTH + 1` candidates +/// per relay parent. +#[test] +fn distribute_collation_up_to_limit() { + let test_state = TestState::default(); + + let local_peer_id = test_state.local_peer_id; + let collator_pair = test_state.collator_pair.clone(); + + test_harness(local_peer_id, collator_pair, |mut test_harness| async move { + let virtual_overseer = &mut test_harness.virtual_overseer; + + let head_a = Hash::from_low_u64_be(128); + let head_a_num: u32 = 64; + + // Grandparent of head `a`. + let head_b = Hash::from_low_u64_be(130); + + // Set collating para id. + overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id)) + .await; + // Activated leaf is `a`, but the collation will be based on `b`. + update_view(virtual_overseer, &test_state, vec![(head_a, head_a_num)], 1).await; + + for i in 0..(MAX_CANDIDATE_DEPTH + 1) { + let pov = PoV { block_data: BlockData(vec![i as u8]) }; + let parent_head_data_hash = Hash::repeat_byte(0xAA); + let candidate = TestCandidateBuilder { + para_id: test_state.para_id, + relay_parent: head_b, + pov_hash: pov.hash(), + ..Default::default() + } + .build(); + distribute_collation_with_receipt( + virtual_overseer, + &test_state, + head_b, + true, + candidate, + pov, + parent_head_data_hash, + ) + .await; + } + + let pov = PoV { block_data: BlockData(vec![10, 12, 6]) }; + let parent_head_data_hash = Hash::repeat_byte(0xBB); + let candidate = TestCandidateBuilder { + para_id: test_state.para_id, + relay_parent: head_b, + pov_hash: pov.hash(), + ..Default::default() + } + .build(); + overseer_send( + virtual_overseer, + CollatorProtocolMessage::DistributeCollation( + candidate.clone(), + parent_head_data_hash, + pov.clone(), + None, + ), + ) + .await; + + // Limit has been reached. + assert!(overseer_recv_with_timeout(virtual_overseer, Duration::from_millis(100)) + .await + .is_none()); + + test_harness + }) +} + +/// Tests that collator correctly handles peer V2 requests. +#[test] +fn advertise_and_send_collation_by_hash() { + let test_state = TestState::default(); + + let local_peer_id = test_state.local_peer_id; + let collator_pair = test_state.collator_pair.clone(); + + test_harness(local_peer_id, collator_pair, |test_harness| async move { + let mut virtual_overseer = test_harness.virtual_overseer; + let req_v1_cfg = test_harness.req_v1_cfg; + let mut req_vstaging_cfg = test_harness.req_vstaging_cfg; + + let head_a = Hash::from_low_u64_be(128); + let head_a_num: u32 = 64; + + // Parent of head `a`. + let head_b = Hash::from_low_u64_be(129); + let head_b_num: u32 = 63; + + // Set collating para id. + overseer_send( + &mut virtual_overseer, + CollatorProtocolMessage::CollateOn(test_state.para_id), + ) + .await; + update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + update_view(&mut virtual_overseer, &test_state, vec![(head_a, head_a_num)], 1).await; + + let candidates: Vec<_> = (0..2) + .map(|i| { + let pov = PoV { block_data: BlockData(vec![i as u8]) }; + let candidate = TestCandidateBuilder { + para_id: test_state.para_id, + relay_parent: head_b, + pov_hash: pov.hash(), + ..Default::default() + } + .build(); + (candidate, pov) + }) + .collect(); + for (candidate, pov) in &candidates { + distribute_collation_with_receipt( + &mut virtual_overseer, + &test_state, + head_b, + true, + candidate.clone(), + pov.clone(), + Hash::zero(), + ) + .await; + } + + let peer = test_state.validator_peer_id[0].clone(); + let validator_id = test_state.current_group_validator_authority_ids()[0].clone(); + connect_peer( + &mut virtual_overseer, + peer.clone(), + CollationVersion::VStaging, + Some(validator_id.clone()), + ) + .await; + expect_declare_msg_vstaging(&mut virtual_overseer, &test_state, &peer).await; + + // Head `b` is not a leaf, but both advertisements are still relevant. + send_peer_view_change(&mut virtual_overseer, &peer, vec![head_b]).await; + let hashes: Vec<_> = candidates.iter().map(|(candidate, _)| candidate.hash()).collect(); + expect_advertise_collation_msg(&mut virtual_overseer, &peer, head_b, Some(hashes)).await; + + for (candidate, pov_block) in candidates { + let (pending_response, rx) = oneshot::channel(); + req_vstaging_cfg + .inbound_queue + .as_mut() + .unwrap() + .send(RawIncomingRequest { + peer, + payload: request_vstaging::CollationFetchingRequest { + relay_parent: head_b, + para_id: test_state.para_id, + candidate_hash: candidate.hash(), + } + .encode(), + pending_response, + }) + .await + .unwrap(); + + assert_matches!( + rx.await, + Ok(full_response) => { + // Response is the same for vstaging. + let request_v1::CollationFetchingResponse::Collation(receipt, pov): request_v1::CollationFetchingResponse + = request_v1::CollationFetchingResponse::decode( + &mut full_response.result + .expect("We should have a proper answer").as_ref() + ) + .expect("Decoding should work"); + assert_eq!(receipt, candidate); + assert_eq!(pov, pov_block); + } + ); + } + + TestHarness { virtual_overseer, req_v1_cfg, req_vstaging_cfg } + }) +} + +/// Tests that collator distributes collation built on top of occupied core. +#[test] +fn advertise_core_occupied() { + let mut test_state = TestState::default(); + let candidate = + TestCandidateBuilder { para_id: test_state.para_id, ..Default::default() }.build(); + test_state.availability_cores[0] = CoreState::Occupied(OccupiedCore { + next_up_on_available: None, + occupied_since: 0, + time_out_at: 0, + next_up_on_time_out: None, + availability: BitVec::default(), + group_responsible: GroupIndex(0), + candidate_hash: candidate.hash(), + candidate_descriptor: candidate.descriptor, + }); + + let local_peer_id = test_state.local_peer_id; + let collator_pair = test_state.collator_pair.clone(); + + test_harness(local_peer_id, collator_pair, |mut test_harness| async move { + let virtual_overseer = &mut test_harness.virtual_overseer; + + let head_a = Hash::from_low_u64_be(128); + let head_a_num: u32 = 64; + + // Grandparent of head `a`. + let head_b = Hash::from_low_u64_be(130); + + // Set collating para id. + overseer_send(virtual_overseer, CollatorProtocolMessage::CollateOn(test_state.para_id)) + .await; + // Activated leaf is `a`, but the collation will be based on `b`. + update_view(virtual_overseer, &test_state, vec![(head_a, head_a_num)], 1).await; + + let pov = PoV { block_data: BlockData(vec![1, 2, 3]) }; + let candidate = TestCandidateBuilder { + para_id: test_state.para_id, + relay_parent: head_b, + pov_hash: pov.hash(), + ..Default::default() + } + .build(); + let candidate_hash = candidate.hash(); + distribute_collation_with_receipt( + virtual_overseer, + &test_state, + head_b, + true, + candidate, + pov, + Hash::zero(), + ) + .await; + + let validators = test_state.current_group_validator_authority_ids(); + let peer_ids = test_state.current_group_validator_peer_ids(); + + connect_peer( + virtual_overseer, + peer_ids[0], + CollationVersion::VStaging, + Some(validators[0].clone()), + ) + .await; + expect_declare_msg_vstaging(virtual_overseer, &test_state, &peer_ids[0]).await; + // Peer is aware of the leaf. + send_peer_view_change(virtual_overseer, &peer_ids[0], vec![head_a]).await; + + // Collation is advertised. + expect_advertise_collation_msg( + virtual_overseer, + &peer_ids[0], + head_b, + Some(vec![candidate_hash]), + ) + .await; + + test_harness + }) +} diff --git a/node/network/collator-protocol/src/collator_side/validators_buffer.rs b/node/network/collator-protocol/src/collator_side/validators_buffer.rs index 5bb31c72d6c5..9f1817aa2051 100644 --- a/node/network/collator-protocol/src/collator_side/validators_buffer.rs +++ b/node/network/collator-protocol/src/collator_side/validators_buffer.rs @@ -23,21 +23,27 @@ //! We keep a simple FIFO buffer of N validator groups and a bitvec for each advertisement, //! 1 indicating we want to be connected to i-th validator in a buffer, 0 otherwise. //! -//! The bit is set to 1 for the whole **group** whenever it's inserted into the buffer. Given a relay -//! parent, one can reset a bit back to 0 for particular **validator**. For example, if a collation +//! The bit is set to 1 for the whole **group** whenever it's inserted into the buffer. Given a candidate +//! hash, one can reset a bit back to 0 for particular **validator**. For example, if a collation //! was fetched or some timeout has been hit. //! //! The bitwise OR over known advertisements gives us validators indices for connection request. use std::{ collections::{HashMap, VecDeque}, + future::Future, num::NonZeroUsize, ops::Range, + pin::Pin, + task::{Context, Poll}, + time::Duration, }; use bitvec::{bitvec, vec::BitVec}; +use futures::FutureExt; -use polkadot_primitives::v2::{AuthorityDiscoveryId, GroupIndex, Hash, SessionIndex}; +use polkadot_node_network_protocol::PeerId; +use polkadot_primitives::v2::{AuthorityDiscoveryId, CandidateHash, GroupIndex, SessionIndex}; /// The ring buffer stores at most this many unique validator groups. /// @@ -66,9 +72,9 @@ pub struct ValidatorGroupsBuffer { group_infos: VecDeque, /// Continuous buffer of validators discovery keys. validators: VecDeque, - /// Mapping from relay-parent to bit-vectors with bits for all `validators`. + /// Mapping from candidate hashes to bit-vectors with bits for all `validators`. /// Invariants kept: All bit-vectors are guaranteed to have the same size. - should_be_connected: HashMap, + should_be_connected: HashMap, /// Buffer capacity, limits the number of **groups** tracked. cap: NonZeroUsize, } @@ -107,7 +113,7 @@ impl ValidatorGroupsBuffer { /// of the buffer. pub fn note_collation_advertised( &mut self, - relay_parent: Hash, + candidate_hash: CandidateHash, session_index: SessionIndex, group_index: GroupIndex, validators: &[AuthorityDiscoveryId], @@ -121,19 +127,19 @@ impl ValidatorGroupsBuffer { }) { Some((idx, group)) => { let group_start_idx = self.group_lengths_iter().take(idx).sum(); - self.set_bits(relay_parent, group_start_idx..(group_start_idx + group.len)); + self.set_bits(candidate_hash, group_start_idx..(group_start_idx + group.len)); }, - None => self.push(relay_parent, session_index, group_index, validators), + None => self.push(candidate_hash, session_index, group_index, validators), } } /// Note that a validator is no longer interested in a given relay parent. pub fn reset_validator_interest( &mut self, - relay_parent: Hash, + candidate_hash: CandidateHash, authority_id: &AuthorityDiscoveryId, ) { - let bits = match self.should_be_connected.get_mut(&relay_parent) { + let bits = match self.should_be_connected.get_mut(&candidate_hash) { Some(bits) => bits, None => return, }; @@ -145,17 +151,12 @@ impl ValidatorGroupsBuffer { } } - /// Remove relay parent from the buffer. + /// Remove advertised candidate from the buffer. /// /// The buffer will no longer track which validators are interested in a corresponding /// advertisement. - pub fn remove_relay_parent(&mut self, relay_parent: &Hash) { - self.should_be_connected.remove(relay_parent); - } - - /// Removes all advertisements from the buffer. - pub fn clear_advertisements(&mut self) { - self.should_be_connected.clear(); + pub fn remove_candidate(&mut self, candidate_hash: &CandidateHash) { + self.should_be_connected.remove(candidate_hash); } /// Pushes a new group to the buffer along with advertisement, setting all validators @@ -164,7 +165,7 @@ impl ValidatorGroupsBuffer { /// If the buffer is full, drops group from the tail. fn push( &mut self, - relay_parent: Hash, + candidate_hash: CandidateHash, session_index: SessionIndex, group_index: GroupIndex, validators: &[AuthorityDiscoveryId], @@ -193,17 +194,17 @@ impl ValidatorGroupsBuffer { self.should_be_connected .values_mut() .for_each(|bits| bits.resize(new_len, false)); - self.set_bits(relay_parent, group_start_idx..(group_start_idx + validators.len())); + self.set_bits(candidate_hash, group_start_idx..(group_start_idx + validators.len())); } /// Sets advertisement bits to 1 in a given range (usually corresponding to some group). /// If the relay parent is unknown, inserts 0-initialized bitvec first. /// /// The range must be ensured to be within bounds. - fn set_bits(&mut self, relay_parent: Hash, range: Range) { + fn set_bits(&mut self, candidate_hash: CandidateHash, range: Range) { let bits = self .should_be_connected - .entry(relay_parent) + .entry(candidate_hash) .or_insert_with(|| bitvec![0; self.validators.len()]); bits[range].fill(true); @@ -217,9 +218,40 @@ impl ValidatorGroupsBuffer { } } +/// A timeout for resetting validators' interests in collations. +pub const RESET_INTEREST_TIMEOUT: Duration = Duration::from_secs(6); + +/// A future that returns a candidate hash along with validator discovery +/// keys once a timeout hit. +/// +/// If a validator doesn't manage to fetch a collation within this timeout +/// we should reset its interest in this advertisement in a buffer. For example, +/// when the PoV was already requested from another peer. +pub struct ResetInterestTimeout { + fut: futures_timer::Delay, + candidate_hash: CandidateHash, + peer_id: PeerId, +} + +impl ResetInterestTimeout { + /// Returns new `ResetInterestTimeout` that resolves after given timeout. + pub fn new(candidate_hash: CandidateHash, peer_id: PeerId, delay: Duration) -> Self { + Self { fut: futures_timer::Delay::new(delay), candidate_hash, peer_id } + } +} + +impl Future for ResetInterestTimeout { + type Output = (CandidateHash, PeerId); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.fut.poll_unpin(cx).map(|_| (self.candidate_hash, self.peer_id)) + } +} + #[cfg(test)] mod tests { use super::*; + use polkadot_primitives::v2::Hash; use sp_keyring::Sr25519Keyring; #[test] @@ -227,8 +259,8 @@ mod tests { let cap = NonZeroUsize::new(1).unwrap(); let mut buf = ValidatorGroupsBuffer::with_capacity(cap); - let hash_a = Hash::repeat_byte(0x1); - let hash_b = Hash::repeat_byte(0x2); + let hash_a = CandidateHash(Hash::repeat_byte(0x1)); + let hash_b = CandidateHash(Hash::repeat_byte(0x2)); let validators: Vec<_> = [ Sr25519Keyring::Alice, @@ -263,7 +295,7 @@ mod tests { let cap = NonZeroUsize::new(3).unwrap(); let mut buf = ValidatorGroupsBuffer::with_capacity(cap); - let hashes: Vec<_> = (0..5).map(Hash::repeat_byte).collect(); + let hashes: Vec<_> = (0..5).map(|i| CandidateHash(Hash::repeat_byte(i))).collect(); let validators: Vec<_> = [ Sr25519Keyring::Alice, diff --git a/node/network/collator-protocol/src/error.rs b/node/network/collator-protocol/src/error.rs index b1c86fa81c5a..4003ac438c92 100644 --- a/node/network/collator-protocol/src/error.rs +++ b/node/network/collator-protocol/src/error.rs @@ -17,10 +17,12 @@ //! Error handling related code and Error/Result definitions. +use futures::channel::oneshot; + use polkadot_node_network_protocol::request_response::incoming; use polkadot_node_primitives::UncheckedSignedFullStatement; -use polkadot_node_subsystem::errors::SubsystemError; -use polkadot_node_subsystem_util::runtime; +use polkadot_node_subsystem::{errors::SubsystemError, RuntimeApiError}; +use polkadot_node_subsystem_util::{backing_implicit_view, runtime}; use crate::LOG_TARGET; @@ -44,10 +46,84 @@ pub enum Error { #[error("Error while accessing runtime information")] Runtime(#[from] runtime::Error), + #[error("Error while accessing Runtime API")] + RuntimeApi(#[from] RuntimeApiError), + + #[error(transparent)] + ImplicitViewFetchError(backing_implicit_view::FetchError), + + #[error("Response receiver for Runtime API version request cancelled")] + CancelledRuntimeApiVersion(oneshot::Canceled), + + #[error("Response receiver for active validators request cancelled")] + CancelledActiveValidators(oneshot::Canceled), + + #[error("Response receiver for validator groups request cancelled")] + CancelledValidatorGroups(oneshot::Canceled), + + #[error("Response receiver for availability cores request cancelled")] + CancelledAvailabilityCores(oneshot::Canceled), + #[error("CollationSeconded contained statement with invalid signature")] InvalidStatementSignature(UncheckedSignedFullStatement), } +/// An error happened on the validator side of the protocol when attempting +/// to start seconding a candidate. +#[derive(Debug, thiserror::Error)] +pub enum SecondingError { + #[error("Failed to fetch a collation")] + FailedToFetch(#[from] oneshot::Canceled), + + #[error("Error while accessing Runtime API")] + RuntimeApi(#[from] RuntimeApiError), + + #[error("Response receiver for persisted validation data request cancelled")] + CancelledRuntimePersistedValidationData(oneshot::Canceled), + + #[error("Response receiver for prospective validation data request cancelled")] + CancelledProspectiveValidationData(oneshot::Canceled), + + #[error("Persisted validation data is not available")] + PersistedValidationDataNotFound, + + #[error("Persisted validation data hash doesn't match one in the candidate receipt.")] + PersistedValidationDataMismatch, + + #[error("Candidate hash doesn't match the advertisement")] + CandidateHashMismatch, + + #[error("Received duplicate collation from the peer")] + Duplicate, +} + +impl SecondingError { + /// Returns true if an error indicates that a peer is malicious. + pub fn is_malicious(&self) -> bool { + use SecondingError::*; + matches!(self, PersistedValidationDataMismatch | CandidateHashMismatch | Duplicate) + } +} + +/// A validator failed to request a collation due to an error. +#[derive(Debug, thiserror::Error)] +pub enum FetchError { + #[error("Collation was not previously advertised")] + NotAdvertised, + + #[error("Peer is unknown")] + UnknownPeer, + + #[error("Collation was already requested")] + AlreadyRequested, + + #[error("Relay parent went out of view")] + RelayParentOutOfView, + + #[error("Peer's protocol doesn't match the advertisement")] + ProtocolMismatch, +} + /// Utility for eating top level errors and log them. /// /// We basically always want to try and continue on error. This utility function is meant to diff --git a/node/network/collator-protocol/src/lib.rs b/node/network/collator-protocol/src/lib.rs index b71acc127c88..85f560ceaa6e 100644 --- a/node/network/collator-protocol/src/lib.rs +++ b/node/network/collator-protocol/src/lib.rs @@ -31,13 +31,15 @@ use futures::{ use sp_keystore::SyncCryptoStorePtr; use polkadot_node_network_protocol::{ - request_response::{v1 as request_v1, IncomingRequestReceiver}, + request_response::{v1 as request_v1, vstaging as protocol_vstaging, IncomingRequestReceiver}, PeerId, UnifiedReputationChange as Rep, }; -use polkadot_primitives::v2::CollatorPair; +use polkadot_primitives::v2::{CollatorPair, Hash}; use polkadot_node_subsystem::{ - errors::SubsystemError, messages::NetworkBridgeTxMessage, overseer, SpawnedSubsystem, + errors::SubsystemError, + messages::{NetworkBridgeTxMessage, RuntimeApiMessage, RuntimeApiRequest}, + overseer, SpawnedSubsystem, }; mod error; @@ -47,6 +49,15 @@ mod validator_side; const LOG_TARGET: &'static str = "parachain::collator-protocol"; +/// The maximum depth a candidate can occupy for any relay parent. +/// 'depth' is defined as the amount of blocks between the para +/// head in a relay-chain block's state and a candidate with a +/// particular relay-parent. +/// +/// This value is only used for limiting the number of candidates +/// we accept and distribute per relay parent. +const MAX_CANDIDATE_DEPTH: usize = 4; + /// A collator eviction policy - how fast to evict collators which are inactive. #[derive(Debug, Clone, Copy)] pub struct CollatorEvictionPolicy { @@ -77,12 +88,19 @@ pub enum ProtocolSide { metrics: validator_side::Metrics, }, /// Collators operate on a parachain. - Collator( - PeerId, - CollatorPair, - IncomingRequestReceiver, - collator_side::Metrics, - ), + Collator { + /// Local peer id. + peer_id: PeerId, + /// Parachain collator pair. + collator_pair: CollatorPair, + /// Receiver for v1 collation fetching requests. + request_receiver_v1: IncomingRequestReceiver, + /// Receiver for vstaging collation fetching requests. + request_receiver_vstaging: + IncomingRequestReceiver, + /// Metrics. + metrics: collator_side::Metrics, + }, } /// The collator protocol subsystem. @@ -104,8 +122,22 @@ impl CollatorProtocolSubsystem { match self.protocol_side { ProtocolSide::Validator { keystore, eviction_policy, metrics } => validator_side::run(ctx, keystore, eviction_policy, metrics).await, - ProtocolSide::Collator(local_peer_id, collator_pair, req_receiver, metrics) => - collator_side::run(ctx, local_peer_id, collator_pair, req_receiver, metrics).await, + ProtocolSide::Collator { + peer_id, + collator_pair, + request_receiver_v1, + request_receiver_vstaging, + metrics, + } => + collator_side::run( + ctx, + peer_id, + collator_pair, + request_receiver_v1, + request_receiver_vstaging, + metrics, + ) + .await, } } } @@ -157,3 +189,48 @@ fn tick_stream(period: Duration) -> impl FusedStream { }) .fuse() } + +#[derive(Debug, Clone, Copy, PartialEq)] +enum ProspectiveParachainsMode { + // v2 runtime API: no prospective parachains. + Disabled, + // vstaging runtime API: prospective parachains. + Enabled, +} + +impl ProspectiveParachainsMode { + fn is_enabled(&self) -> bool { + matches!(self, Self::Enabled) + } +} + +async fn prospective_parachains_mode( + sender: &mut Sender, + leaf_hash: Hash, +) -> Result +where + Sender: polkadot_node_subsystem::CollatorProtocolSenderTrait, +{ + let (tx, rx) = futures::channel::oneshot::channel(); + sender + .send_message(RuntimeApiMessage::Request(leaf_hash, RuntimeApiRequest::Version(tx))) + .await; + + let version = rx + .await + .map_err(error::Error::CancelledRuntimeApiVersion)? + .map_err(error::Error::RuntimeApi)?; + + if version >= RuntimeApiRequest::VALIDITY_CONSTRAINTS { + Ok(ProspectiveParachainsMode::Enabled) + } else { + if version < 2 { + gum::warn!( + target: LOG_TARGET, + "Runtime API version is {}, it is expected to be at least 2. Prospective parachains are disabled", + version + ); + } + Ok(ProspectiveParachainsMode::Disabled) + } +} diff --git a/node/network/collator-protocol/src/validator_side/collation.rs b/node/network/collator-protocol/src/validator_side/collation.rs new file mode 100644 index 000000000000..c5cb848d2815 --- /dev/null +++ b/node/network/collator-protocol/src/validator_side/collation.rs @@ -0,0 +1,254 @@ +// Copyright 2017-2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Primitives for tracking collations-related data. +//! +//! Usually a path of collations is as follows: +//! 1. First, collation must be advertised by collator. +//! 2. If the advertisement was accepted, it's queued for fetch (per relay parent). +//! 3. Once it's requested, the collation is said to be Pending. +//! 4. Pending collation becomes Fetched once received, we send it to backing for validation. +//! 5. If it turns to be invalid or async backing allows seconding another candidate, carry on with +//! the next advertisement, otherwise we're done with this relay parent. +//! +//! ┌──────────────────────────────────────────┐ +//! └─▶Advertised ─▶ Pending ─▶ Fetched ─▶ Validated + +use futures::channel::oneshot; +use std::collections::{HashMap, VecDeque}; + +use polkadot_node_network_protocol::PeerId; +use polkadot_node_primitives::PoV; +use polkadot_primitives::v2::{ + CandidateHash, CandidateReceipt, CollatorId, Hash, Id as ParaId, PersistedValidationData, +}; + +use crate::{error::SecondingError, ProspectiveParachainsMode, LOG_TARGET, MAX_CANDIDATE_DEPTH}; + +/// Candidate supplied with a para head it's built on top of. +#[derive(Debug, Copy, Clone, Hash, Eq, PartialEq)] +pub struct ProspectiveCandidate { + /// Candidate hash. + pub candidate_hash: CandidateHash, + /// Parent head-data hash as supplied in advertisement. + pub parent_head_data_hash: Hash, +} + +impl ProspectiveCandidate { + pub fn candidate_hash(&self) -> CandidateHash { + self.candidate_hash + } +} + +/// Identifier of a fetched collation. +#[derive(Debug, Clone, Hash, Eq, PartialEq)] +pub struct FetchedCollation { + /// Candidate's relay parent. + pub relay_parent: Hash, + /// Parachain id. + pub para_id: ParaId, + /// Candidate hash. + pub candidate_hash: CandidateHash, + /// Id of the collator the collation was fetched from. + pub collator_id: CollatorId, +} + +impl From<&CandidateReceipt> for FetchedCollation { + fn from(receipt: &CandidateReceipt) -> Self { + let descriptor = receipt.descriptor(); + Self { + relay_parent: descriptor.relay_parent, + para_id: descriptor.para_id, + candidate_hash: receipt.hash(), + collator_id: descriptor.collator.clone(), + } + } +} + +/// Identifier of a collation being requested. +#[derive(Debug, Copy, Clone, Hash, Eq, PartialEq)] +pub struct PendingCollation { + /// Candidate's relay parent. + pub relay_parent: Hash, + /// Parachain id. + pub para_id: ParaId, + /// Peer that advertised this collation. + pub peer_id: PeerId, + /// Optional candidate hash and parent head-data hash if were + /// supplied in advertisement. + pub prospective_candidate: Option, + /// Hash of the candidate's commitments. + pub commitments_hash: Option, +} + +impl PendingCollation { + pub fn new( + relay_parent: Hash, + para_id: ParaId, + peer_id: &PeerId, + prospective_candidate: Option, + ) -> Self { + Self { + relay_parent, + para_id, + peer_id: peer_id.clone(), + prospective_candidate, + commitments_hash: None, + } + } +} + +/// Performs a sanity check between advertised and fetched collations. +/// +/// Since the persisted validation data is constructed using the advertised +/// parent head data hash, the latter doesn't require an additional check. +pub fn fetched_collation_sanity_check( + advertised: &PendingCollation, + fetched: &CandidateReceipt, + persisted_validation_data: &PersistedValidationData, +) -> Result<(), SecondingError> { + if persisted_validation_data.hash() != fetched.descriptor().persisted_validation_data_hash { + Err(SecondingError::PersistedValidationDataMismatch) + } else if advertised + .prospective_candidate + .map_or(false, |pc| pc.candidate_hash() != fetched.hash()) + { + Err(SecondingError::CandidateHashMismatch) + } else { + Ok(()) + } +} + +pub type CollationEvent = (CollatorId, PendingCollation); + +pub type PendingCollationFetch = + (CollationEvent, std::result::Result<(CandidateReceipt, PoV), oneshot::Canceled>); + +/// The status of the collations in [`CollationsPerRelayParent`]. +#[derive(Debug, Clone, Copy)] +pub enum CollationStatus { + /// We are waiting for a collation to be advertised to us. + Waiting, + /// We are currently fetching a collation. + Fetching, + /// We are waiting that a collation is being validated. + WaitingOnValidation, + /// We have seconded a collation. + Seconded, +} + +impl Default for CollationStatus { + fn default() -> Self { + Self::Waiting + } +} + +impl CollationStatus { + /// Downgrades to `Waiting`, but only if `self != Seconded`. + fn back_to_waiting(&mut self, relay_parent_mode: ProspectiveParachainsMode) { + match self { + Self::Seconded => + if relay_parent_mode.is_enabled() { + // With async backing enabled it's allowed to + // second more candidates. + *self = Self::Waiting + }, + _ => *self = Self::Waiting, + } + } +} + +/// Information about collations per relay parent. +#[derive(Default)] +pub struct Collations { + /// What is the current status in regards to a collation for this relay parent? + pub status: CollationStatus, + /// Collator we're fetching from, optionally which candidate was requested. + /// + /// This is the currently last started fetch, which did not exceed `MAX_UNSHARED_DOWNLOAD_TIME` + /// yet. + pub fetching_from: Option<(CollatorId, Option)>, + /// Collation that were advertised to us, but we did not yet fetch. + pub waiting_queue: VecDeque<(PendingCollation, CollatorId)>, + /// How many collations have been seconded per parachain. + pub seconded_count: HashMap, +} + +impl Collations { + /// Note a seconded collation for a given para. + pub(super) fn note_seconded(&mut self, para_id: ParaId) { + *self.seconded_count.entry(para_id).or_insert(0) += 1 + } + + /// Returns the next collation to fetch from the `waiting_queue`. + /// + /// This will reset the status back to `Waiting` using [`CollationStatus::back_to_waiting`]. + /// + /// Returns `Some(_)` if there is any collation to fetch, the `status` is not `Seconded` and + /// the passed in `finished_one` is the currently `waiting_collation`. + pub(super) fn get_next_collation_to_fetch( + &mut self, + finished_one: &(CollatorId, Option), + relay_parent_mode: ProspectiveParachainsMode, + ) -> Option<(PendingCollation, CollatorId)> { + // If finished one does not match waiting_collation, then we already dequeued another fetch + // to replace it. + if let Some((collator_id, maybe_candidate_hash)) = self.fetching_from.as_ref() { + // If a candidate hash was saved previously, `finished_one` must include this too. + if collator_id != &finished_one.0 && + maybe_candidate_hash.map_or(true, |hash| Some(&hash) != finished_one.1.as_ref()) + { + gum::trace!( + target: LOG_TARGET, + waiting_collation = ?self.fetching_from, + ?finished_one, + "Not proceeding to the next collation - has already been done." + ); + return None + } + } + self.status.back_to_waiting(relay_parent_mode); + + match self.status { + // We don't need to fetch any other collation when we already have seconded one. + CollationStatus::Seconded => None, + CollationStatus::Waiting => { + while let Some(next) = self.waiting_queue.pop_front() { + let para_id = next.0.para_id; + if !self.is_seconded_limit_reached(relay_parent_mode, para_id) { + continue + } + + return Some(next) + } + None + }, + CollationStatus::WaitingOnValidation | CollationStatus::Fetching => + unreachable!("We have reset the status above!"), + } + } + + /// Checks the limit of seconded candidates for a given para. + pub(super) fn is_seconded_limit_reached( + &self, + relay_parent_mode: ProspectiveParachainsMode, + para_id: ParaId, + ) -> bool { + let seconded_limit = + if relay_parent_mode.is_enabled() { MAX_CANDIDATE_DEPTH + 1 } else { 1 }; + self.seconded_count.get(¶_id).map_or(true, |&num| num < seconded_limit) + } +} diff --git a/node/network/collator-protocol/src/validator_side/metrics.rs b/node/network/collator-protocol/src/validator_side/metrics.rs new file mode 100644 index 000000000000..a011a5f3b43e --- /dev/null +++ b/node/network/collator-protocol/src/validator_side/metrics.rs @@ -0,0 +1,123 @@ +// Copyright 2017-2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use polkadot_node_subsystem_util::metrics::{self, prometheus}; + +#[derive(Clone, Default)] +pub struct Metrics(Option); + +impl Metrics { + pub fn on_request(&self, succeeded: std::result::Result<(), ()>) { + if let Some(metrics) = &self.0 { + match succeeded { + Ok(()) => metrics.collation_requests.with_label_values(&["succeeded"]).inc(), + Err(()) => metrics.collation_requests.with_label_values(&["failed"]).inc(), + } + } + } + + /// Provide a timer for `process_msg` which observes on drop. + pub fn time_process_msg(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.process_msg.start_timer()) + } + + /// Provide a timer for `handle_collation_request_result` which observes on drop. + pub fn time_handle_collation_request_result( + &self, + ) -> Option { + self.0 + .as_ref() + .map(|metrics| metrics.handle_collation_request_result.start_timer()) + } + + /// Note the current number of collator peers. + pub fn note_collator_peer_count(&self, collator_peers: usize) { + self.0 + .as_ref() + .map(|metrics| metrics.collator_peer_count.set(collator_peers as u64)); + } + + /// Provide a timer for `PerRequest` structure which observes on drop. + pub fn time_collation_request_duration( + &self, + ) -> Option { + self.0.as_ref().map(|metrics| metrics.collation_request_duration.start_timer()) + } +} + +#[derive(Clone)] +struct MetricsInner { + collation_requests: prometheus::CounterVec, + process_msg: prometheus::Histogram, + handle_collation_request_result: prometheus::Histogram, + collator_peer_count: prometheus::Gauge, + collation_request_duration: prometheus::Histogram, +} + +impl metrics::Metrics for Metrics { + fn try_register( + registry: &prometheus::Registry, + ) -> std::result::Result { + let metrics = MetricsInner { + collation_requests: prometheus::register( + prometheus::CounterVec::new( + prometheus::Opts::new( + "polkadot_parachain_collation_requests_total", + "Number of collations requested from Collators.", + ), + &["success"], + )?, + registry, + )?, + process_msg: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "polkadot_parachain_collator_protocol_validator_process_msg", + "Time spent within `collator_protocol_validator::process_msg`", + ) + )?, + registry, + )?, + handle_collation_request_result: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "polkadot_parachain_collator_protocol_validator_handle_collation_request_result", + "Time spent within `collator_protocol_validator::handle_collation_request_result`", + ) + )?, + registry, + )?, + collator_peer_count: prometheus::register( + prometheus::Gauge::new( + "polkadot_parachain_collator_peer_count", + "Amount of collator peers connected", + )?, + registry, + )?, + collation_request_duration: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "polkadot_parachain_collator_protocol_validator_collation_request_duration", + "Lifetime of the `PerRequest` structure", + ).buckets(vec![0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.75, 0.9, 1.0, 1.2, 1.5, 1.75]), + )?, + registry, + )?, + }; + + Ok(Metrics(Some(metrics))) + } +} diff --git a/node/network/collator-protocol/src/validator_side/mod.rs b/node/network/collator-protocol/src/validator_side/mod.rs index 58e2eb5b6c3f..2454a23e3581 100644 --- a/node/network/collator-protocol/src/validator_side/mod.rs +++ b/node/network/collator-protocol/src/validator_side/mod.rs @@ -25,7 +25,8 @@ use futures::{ use futures_timer::Delay; use std::{ collections::{hash_map::Entry, HashMap, HashSet}, - sync::Arc, + convert::TryInto, + iter::FromIterator, task::Poll, time::{Duration, Instant}, }; @@ -34,37 +35,52 @@ use sp_keystore::SyncCryptoStorePtr; use polkadot_node_network_protocol::{ self as net_protocol, - peer_set::PeerSet, + peer_set::{CollationVersion, PeerSet}, request_response as req_res, request_response::{ outgoing::{Recipient, RequestError}, - v1::{CollationFetchingRequest, CollationFetchingResponse}, - OutgoingRequest, Requests, + v1 as request_v1, vstaging as request_vstaging, OutgoingRequest, Requests, }, - v1 as protocol_v1, OurView, PeerId, UnifiedReputationChange as Rep, Versioned, View, + v1 as protocol_v1, vstaging as protocol_vstaging, OurView, PeerId, + UnifiedReputationChange as Rep, Versioned, View, }; -use polkadot_node_primitives::{PoV, SignedFullStatement}; +use polkadot_node_primitives::{PoV, SignedFullStatement, Statement}; use polkadot_node_subsystem::{ jaeger, messages::{ CandidateBackingMessage, CollatorProtocolMessage, IfDisconnected, NetworkBridgeEvent, - NetworkBridgeTxMessage, RuntimeApiMessage, + NetworkBridgeTxMessage, ProspectiveParachainsMessage, ProspectiveValidationDataRequest, }, - overseer, FromOrchestra, OverseerSignal, PerLeafSpan, SubsystemSender, + overseer, CollatorProtocolSenderTrait, FromOrchestra, OverseerSignal, PerLeafSpan, +}; +use polkadot_node_subsystem_util::{ + backing_implicit_view::View as ImplicitView, metrics::prometheus::prometheus::HistogramTimer, }; -use polkadot_node_subsystem_util::metrics::{self, prometheus}; use polkadot_primitives::v2::{ - CandidateReceipt, CollatorId, Hash, Id as ParaId, OccupiedCoreAssumption, - PersistedValidationData, + CandidateHash, CandidateReceipt, CollatorId, CoreState, Hash, Id as ParaId, + OccupiedCoreAssumption, PersistedValidationData, }; -use crate::error::Result; +use crate::error::{Error, FetchError, Result, SecondingError}; + +use super::{ + modify_reputation, prospective_parachains_mode, tick_stream, ProspectiveParachainsMode, + LOG_TARGET, MAX_CANDIDATE_DEPTH, +}; -use super::{modify_reputation, tick_stream, LOG_TARGET}; +mod collation; +mod metrics; + +use collation::{ + fetched_collation_sanity_check, CollationEvent, CollationStatus, Collations, FetchedCollation, + PendingCollation, PendingCollationFetch, ProspectiveCandidate, +}; #[cfg(test)] mod tests; +pub use metrics::Metrics; + const COST_UNEXPECTED_MESSAGE: Rep = Rep::CostMinor("An unexpected message"); /// Message could not be decoded properly. const COST_CORRUPTED_MESSAGE: Rep = Rep::CostMinor("Message was corrupt"); @@ -104,131 +120,33 @@ const ACTIVITY_POLL: Duration = Duration::from_millis(10); // How often to poll collation responses. // This is a hack that should be removed in a refactoring. // See https://github.com/paritytech/polkadot/issues/4182 -const CHECK_COLLATIONS_POLL: Duration = Duration::from_millis(50); - -#[derive(Clone, Default)] -pub struct Metrics(Option); - -impl Metrics { - fn on_request(&self, succeeded: std::result::Result<(), ()>) { - if let Some(metrics) = &self.0 { - match succeeded { - Ok(()) => metrics.collation_requests.with_label_values(&["succeeded"]).inc(), - Err(()) => metrics.collation_requests.with_label_values(&["failed"]).inc(), - } - } - } - - /// Provide a timer for `process_msg` which observes on drop. - fn time_process_msg(&self) -> Option { - self.0.as_ref().map(|metrics| metrics.process_msg.start_timer()) - } - - /// Provide a timer for `handle_collation_request_result` which observes on drop. - fn time_handle_collation_request_result( - &self, - ) -> Option { - self.0 - .as_ref() - .map(|metrics| metrics.handle_collation_request_result.start_timer()) - } - - /// Note the current number of collator peers. - fn note_collator_peer_count(&self, collator_peers: usize) { - self.0 - .as_ref() - .map(|metrics| metrics.collator_peer_count.set(collator_peers as u64)); - } - - /// Provide a timer for `PerRequest` structure which observes on drop. - fn time_collation_request_duration( - &self, - ) -> Option { - self.0.as_ref().map(|metrics| metrics.collation_request_duration.start_timer()) - } -} - -#[derive(Clone)] -struct MetricsInner { - collation_requests: prometheus::CounterVec, - process_msg: prometheus::Histogram, - handle_collation_request_result: prometheus::Histogram, - collator_peer_count: prometheus::Gauge, - collation_request_duration: prometheus::Histogram, -} - -impl metrics::Metrics for Metrics { - fn try_register( - registry: &prometheus::Registry, - ) -> std::result::Result { - let metrics = MetricsInner { - collation_requests: prometheus::register( - prometheus::CounterVec::new( - prometheus::Opts::new( - "polkadot_parachain_collation_requests_total", - "Number of collations requested from Collators.", - ), - &["success"], - )?, - registry, - )?, - process_msg: prometheus::register( - prometheus::Histogram::with_opts( - prometheus::HistogramOpts::new( - "polkadot_parachain_collator_protocol_validator_process_msg", - "Time spent within `collator_protocol_validator::process_msg`", - ) - )?, - registry, - )?, - handle_collation_request_result: prometheus::register( - prometheus::Histogram::with_opts( - prometheus::HistogramOpts::new( - "polkadot_parachain_collator_protocol_validator_handle_collation_request_result", - "Time spent within `collator_protocol_validator::handle_collation_request_result`", - ) - )?, - registry, - )?, - collator_peer_count: prometheus::register( - prometheus::Gauge::new( - "polkadot_parachain_collator_peer_count", - "Amount of collator peers connected", - )?, - registry, - )?, - collation_request_duration: prometheus::register( - prometheus::Histogram::with_opts( - prometheus::HistogramOpts::new( - "polkadot_parachain_collator_protocol_validator_collation_request_duration", - "Lifetime of the `PerRequest` structure", - ).buckets(vec![0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.75, 0.9, 1.0, 1.2, 1.5, 1.75]), - )?, - registry, - )?, - }; - - Ok(Metrics(Some(metrics))) - } -} +const CHECK_COLLATIONS_POLL: Duration = Duration::from_millis(5); struct PerRequest { /// Responses from collator. - from_collator: Fuse>>, + /// + /// The response payload is the same for both versions of protocol + /// and doesn't have vstaging alias for simplicity. + from_collator: + Fuse>>, /// Sender to forward to initial requester. to_requester: oneshot::Sender<(CandidateReceipt, PoV)>, /// A jaeger span corresponding to the lifetime of the request. span: Option, /// A metric histogram for the lifetime of the request - _lifetime_timer: Option, + _lifetime_timer: Option, } #[derive(Debug)] struct CollatingPeerState { collator_id: CollatorId, para_id: ParaId, - // Advertised relay parents. - advertisements: HashSet, + /// Collations advertised by peer per relay parent. + /// + /// V1 network protocol doesn't include candidate hash in + /// advertisements, we store an empty set in this case to occupy + /// a slot in map. + advertisements: HashMap>, last_active: Instant, } @@ -241,38 +159,85 @@ enum PeerState { } #[derive(Debug)] -enum AdvertisementError { +enum InsertAdvertisementError { + /// Advertisement is already known. Duplicate, + /// Collation relay parent is out of our view. OutOfOurView, + /// No prior declare message received. UndeclaredCollator, + /// A limit for announcements per peer is reached. + PeerLimitReached, + /// Mismatch of relay parent mode and advertisement arguments. + /// An internal error that should not happen. + ProtocolMismatch, } #[derive(Debug)] struct PeerData { view: View, state: PeerState, + version: CollationVersion, } impl PeerData { - fn new(view: View) -> Self { - PeerData { view, state: PeerState::Connected(Instant::now()) } - } - /// Update the view, clearing all advertisements that are no longer in the /// current view. - fn update_view(&mut self, new_view: View) { + fn update_view( + &mut self, + implicit_view: &ImplicitView, + active_leaves: &HashMap, + per_relay_parent: &HashMap, + new_view: View, + ) { let old_view = std::mem::replace(&mut self.view, new_view); if let PeerState::Collating(ref mut peer_state) = self.state { for removed in old_view.difference(&self.view) { - let _ = peer_state.advertisements.remove(&removed); + // Remove relay parent advertisements if it went out + // of our (implicit) view. + let keep = per_relay_parent + .get(removed) + .map(|s| { + is_relay_parent_in_implicit_view( + removed, + s.prospective_parachains_mode, + implicit_view, + active_leaves, + peer_state.para_id, + ) + }) + .unwrap_or(false); + + if !keep { + peer_state.advertisements.remove(&removed); + } } } } /// Prune old advertisements relative to our view. - fn prune_old_advertisements(&mut self, our_view: &View) { + fn prune_old_advertisements( + &mut self, + implicit_view: &ImplicitView, + active_leaves: &HashMap, + per_relay_parent: &HashMap, + ) { if let PeerState::Collating(ref mut peer_state) = self.state { - peer_state.advertisements.retain(|a| our_view.contains(a)); + peer_state.advertisements.retain(|hash, _| { + // Either + // - Relay parent is an active leaf + // - It belongs to allowed ancestry under some leaf + // Discard otherwise. + per_relay_parent.get(hash).map_or(false, |s| { + is_relay_parent_in_implicit_view( + hash, + s.prospective_parachains_mode, + implicit_view, + active_leaves, + peer_state.para_id, + ) + }) + }); } } @@ -282,18 +247,54 @@ impl PeerData { fn insert_advertisement( &mut self, on_relay_parent: Hash, - our_view: &View, - ) -> std::result::Result<(CollatorId, ParaId), AdvertisementError> { + relay_parent_mode: ProspectiveParachainsMode, + candidate_hash: Option, + implicit_view: &ImplicitView, + active_leaves: &HashMap, + ) -> std::result::Result<(CollatorId, ParaId), InsertAdvertisementError> { match self.state { - PeerState::Connected(_) => Err(AdvertisementError::UndeclaredCollator), - _ if !our_view.contains(&on_relay_parent) => Err(AdvertisementError::OutOfOurView), - PeerState::Collating(ref mut state) => - if state.advertisements.insert(on_relay_parent) { - state.last_active = Instant::now(); - Ok((state.collator_id.clone(), state.para_id.clone())) - } else { - Err(AdvertisementError::Duplicate) - }, + PeerState::Connected(_) => Err(InsertAdvertisementError::UndeclaredCollator), + PeerState::Collating(ref mut state) => { + if !is_relay_parent_in_implicit_view( + &on_relay_parent, + relay_parent_mode, + implicit_view, + active_leaves, + state.para_id, + ) { + return Err(InsertAdvertisementError::OutOfOurView) + } + + match (relay_parent_mode, candidate_hash) { + (ProspectiveParachainsMode::Disabled, candidate_hash) => { + if state.advertisements.contains_key(&on_relay_parent) { + return Err(InsertAdvertisementError::Duplicate) + } + state + .advertisements + .insert(on_relay_parent, HashSet::from_iter(candidate_hash)); + }, + (ProspectiveParachainsMode::Enabled, Some(candidate_hash)) => { + if state + .advertisements + .get(&on_relay_parent) + .map_or(false, |candidates| candidates.contains(&candidate_hash)) + { + return Err(InsertAdvertisementError::Duplicate) + } + let candidates = state.advertisements.entry(on_relay_parent).or_default(); + + if candidates.len() >= MAX_CANDIDATE_DEPTH + 1 { + return Err(InsertAdvertisementError::PeerLimitReached) + } + candidates.insert(candidate_hash); + }, + _ => return Err(InsertAdvertisementError::ProtocolMismatch), + } + + state.last_active = Instant::now(); + Ok((state.collator_id.clone(), state.para_id)) + }, } } @@ -305,7 +306,7 @@ impl PeerData { } } - /// Note that a peer is now collating with the given collator and para ids. + /// Note that a peer is now collating with the given collator and para id. /// /// This will overwrite any previous call to `set_collating` and should only be called /// if `is_collating` is false. @@ -313,7 +314,7 @@ impl PeerData { self.state = PeerState::Collating(CollatingPeerState { collator_id, para_id, - advertisements: HashSet::new(), + advertisements: HashMap::new(), last_active: Instant::now(), }); } @@ -333,10 +334,23 @@ impl PeerData { } /// Whether the peer has advertised the given collation. - fn has_advertised(&self, relay_parent: &Hash) -> bool { - match self.state { - PeerState::Connected(_) => false, - PeerState::Collating(ref state) => state.advertisements.contains(relay_parent), + fn has_advertised( + &self, + relay_parent: &Hash, + maybe_candidate_hash: Option, + ) -> bool { + let collating_state = match self.state { + PeerState::Connected(_) => return false, + PeerState::Collating(ref state) => state, + }; + + if let Some(ref candidate_hash) = maybe_candidate_hash { + collating_state + .advertisements + .get(relay_parent) + .map_or(false, |candidates| candidates.contains(candidate_hash)) + } else { + collating_state.advertisements.contains_key(relay_parent) } } @@ -350,236 +364,45 @@ impl PeerData { } } -impl Default for PeerData { - fn default() -> Self { - PeerData::new(Default::default()) - } -} - -struct GroupAssignments { - current: Option, -} - -#[derive(Default)] -struct ActiveParas { - relay_parent_assignments: HashMap, - current_assignments: HashMap, -} - -impl ActiveParas { - async fn assign_incoming( - &mut self, - sender: &mut impl SubsystemSender, - keystore: &SyncCryptoStorePtr, - new_relay_parents: impl IntoIterator, - ) { - for relay_parent in new_relay_parents { - let mv = polkadot_node_subsystem_util::request_validators(relay_parent, sender) - .await - .await - .ok() - .map(|x| x.ok()) - .flatten(); - - let mg = polkadot_node_subsystem_util::request_validator_groups(relay_parent, sender) - .await - .await - .ok() - .map(|x| x.ok()) - .flatten(); - - let mc = polkadot_node_subsystem_util::request_availability_cores(relay_parent, sender) - .await - .await - .ok() - .map(|x| x.ok()) - .flatten(); - - let (validators, groups, rotation_info, cores) = match (mv, mg, mc) { - (Some(v), Some((g, r)), Some(c)) => (v, g, r, c), - _ => { - gum::debug!( - target: LOG_TARGET, - ?relay_parent, - "Failed to query runtime API for relay-parent", - ); - - continue - }, - }; - - let para_now = - match polkadot_node_subsystem_util::signing_key_and_index(&validators, keystore) - .await - .and_then(|(_, index)| { - polkadot_node_subsystem_util::find_validator_group(&groups, index) - }) { - Some(group) => { - let core_now = rotation_info.core_for_group(group, cores.len()); - - cores.get(core_now.0 as usize).and_then(|c| c.para_id()) - }, - None => { - gum::trace!(target: LOG_TARGET, ?relay_parent, "Not a validator"); - - continue - }, - }; - - // This code won't work well, if at all for parathreads. For parathreads we'll - // have to be aware of which core the parathread claim is going to be multiplexed - // onto. The parathread claim will also have a known collator, and we should always - // allow an incoming connection from that collator. If not even connecting to them - // directly. - // - // However, this'll work fine for parachains, as each parachain gets a dedicated - // core. - if let Some(para_now) = para_now { - let entry = self.current_assignments.entry(para_now).or_default(); - *entry += 1; - if *entry == 1 { - gum::debug!( - target: LOG_TARGET, - ?relay_parent, - para_id = ?para_now, - "Assigned to a parachain", - ); - } - } - - self.relay_parent_assignments - .insert(relay_parent, GroupAssignments { current: para_now }); - } - } - - fn remove_outgoing(&mut self, old_relay_parents: impl IntoIterator) { - for old_relay_parent in old_relay_parents { - if let Some(assignments) = self.relay_parent_assignments.remove(&old_relay_parent) { - let GroupAssignments { current } = assignments; - - if let Some(cur) = current { - if let Entry::Occupied(mut occupied) = self.current_assignments.entry(cur) { - *occupied.get_mut() -= 1; - if *occupied.get() == 0 { - occupied.remove_entry(); - gum::debug!( - target: LOG_TARGET, - para_id = ?cur, - "Unassigned from a parachain", - ); - } - } - } - } - } - } - - fn is_current(&self, id: &ParaId) -> bool { - self.current_assignments.contains_key(id) - } -} - -#[derive(Debug, Clone, Hash, Eq, PartialEq)] -struct PendingCollation { - relay_parent: Hash, - para_id: ParaId, - peer_id: PeerId, - commitments_hash: Option, +#[derive(Debug, Copy, Clone)] +enum AssignedCoreState { + Scheduled, + Occupied, } -impl PendingCollation { - fn new(relay_parent: Hash, para_id: &ParaId, peer_id: &PeerId) -> Self { - Self { - relay_parent, - para_id: para_id.clone(), - peer_id: peer_id.clone(), - commitments_hash: None, - } +impl AssignedCoreState { + fn is_occupied(&self) -> bool { + matches!(self, AssignedCoreState::Occupied) } } -type CollationEvent = (CollatorId, PendingCollation); - -type PendingCollationFetch = - (CollationEvent, std::result::Result<(CandidateReceipt, PoV), oneshot::Canceled>); - -/// The status of the collations in [`CollationsPerRelayParent`]. -#[derive(Debug, Clone, Copy)] -enum CollationStatus { - /// We are waiting for a collation to be advertised to us. - Waiting, - /// We are currently fetching a collation. - Fetching, - /// We are waiting that a collation is being validated. - WaitingOnValidation, - /// We have seconded a collation. - Seconded, -} - -impl Default for CollationStatus { - fn default() -> Self { - Self::Waiting - } -} - -impl CollationStatus { - /// Downgrades to `Waiting`, but only if `self != Seconded`. - fn back_to_waiting(&mut self) { - match self { - Self::Seconded => {}, - _ => *self = Self::Waiting, - } - } +#[derive(Debug, Clone)] +struct GroupAssignments { + /// Current assignment. + current: Option<(ParaId, AssignedCoreState)>, + /// Paras we're implicitly assigned to with respect to ancestry. + /// This only includes paras from children relay chain blocks assignments. + /// + /// Implicit assignments are not reference-counted since they're accumulated + /// from the most recent leaf. + /// + /// Should be relatively small depending on the group rotation frequency and + /// allowed ancestry length. + implicit: Vec, } -/// Information about collations per relay parent. -#[derive(Default)] -struct CollationsPerRelayParent { - /// What is the current status in regards to a collation for this relay parent? - status: CollationStatus, - /// Collation currently being fetched. - /// - /// This is the currently last started fetch, which did not exceed `MAX_UNSHARED_DOWNLOAD_TIME` - /// yet. - waiting_collation: Option, - /// Collation that were advertised to us, but we did not yet fetch. - unfetched_collations: Vec<(PendingCollation, CollatorId)>, +struct PerRelayParent { + prospective_parachains_mode: ProspectiveParachainsMode, + assignment: GroupAssignments, + collations: Collations, } -impl CollationsPerRelayParent { - /// Returns the next collation to fetch from the `unfetched_collations`. - /// - /// This will reset the status back to `Waiting` using [`CollationStatus::back_to_waiting`]. - /// - /// Returns `Some(_)` if there is any collation to fetch, the `status` is not `Seconded` and - /// the passed in `finished_one` is the currently `waiting_collation`. - pub fn get_next_collation_to_fetch( - &mut self, - finished_one: Option<&CollatorId>, - ) -> Option<(PendingCollation, CollatorId)> { - // If finished one does not match waiting_collation, then we already dequeued another fetch - // to replace it. - if self.waiting_collation.as_ref() != finished_one { - gum::trace!( - target: LOG_TARGET, - waiting_collation = ?self.waiting_collation, - ?finished_one, - "Not proceeding to the next collation - has already been done." - ); - return None - } - self.status.back_to_waiting(); - - match self.status { - // We don't need to fetch any other collation when we already have seconded one. - CollationStatus::Seconded => None, - CollationStatus::Waiting => { - let next = self.unfetched_collations.pop(); - self.waiting_collation = next.as_ref().map(|(_, collator_id)| collator_id.clone()); - next - }, - CollationStatus::WaitingOnValidation | CollationStatus::Fetching => - unreachable!("We have reset the status above!"), +impl PerRelayParent { + fn new(mode: ProspectiveParachainsMode) -> Self { + Self { + prospective_parachains_mode: mode, + assignment: GroupAssignments { current: None, implicit: Vec::new() }, + collations: Collations::default(), } } } @@ -587,15 +410,32 @@ impl CollationsPerRelayParent { /// All state relevant for the validator side of the protocol lives here. #[derive(Default)] struct State { - /// Our own view. - view: OurView, + /// Leaves that do support asynchronous backing along with + /// implicit ancestry. Leaves from the implicit view are present in + /// `active_leaves`, the opposite doesn't hold true. + /// + /// Relay-chain blocks which don't support prospective parachains are + /// never included in the fragment trees of active leaves which do. In + /// particular, this means that if a given relay parent belongs to implicit + /// ancestry of some active leaf, then it does support prospective parachains. + implicit_view: ImplicitView, + + /// All active leaves observed by us, including both that do and do not + /// support prospective parachains. This mapping works as a replacement for + /// [`polkadot_node_network_protocol::View`] and can be dropped once the transition + /// to asynchronous backing is done. + active_leaves: HashMap, - /// Active paras based on our view. We only accept collators from these paras. - active_paras: ActiveParas, + /// State tracked per relay parent. + per_relay_parent: HashMap, /// Track all active collators and their data. peer_data: HashMap, + /// Parachains we're currently assigned to. With async backing enabled + /// this includes assignments from the implicit view. + current_assignments: HashMap, + /// The collations we have requested by relay parent and para id. /// /// For each relay parent and para id we may be connected to a number @@ -617,13 +457,124 @@ struct State { /// /// A triggering timer means that the fetching took too long for our taste and we should give /// another collator the chance to be faster (dequeue next fetch request as well). - collation_fetch_timeouts: FuturesUnordered>, + collation_fetch_timeouts: + FuturesUnordered, Hash)>>, - /// Information about the collations per relay parent. - collations_per_relay_parent: HashMap, + /// Collations that we have successfully requested from peers and waiting + /// on validation. + fetched_candidates: HashMap, +} - /// Keep track of all pending candidate collations - pending_candidates: HashMap, +fn is_relay_parent_in_implicit_view( + relay_parent: &Hash, + relay_parent_mode: ProspectiveParachainsMode, + implicit_view: &ImplicitView, + active_leaves: &HashMap, + para_id: ParaId, +) -> bool { + match relay_parent_mode { + ProspectiveParachainsMode::Disabled => active_leaves.contains_key(relay_parent), + ProspectiveParachainsMode::Enabled => active_leaves.iter().any(|(hash, mode)| { + mode.is_enabled() && + implicit_view + .known_allowed_relay_parents_under(hash, Some(para_id)) + .unwrap_or_default() + .contains(relay_parent) + }), + } +} + +async fn assign_incoming( + sender: &mut Sender, + group_assignment: &mut GroupAssignments, + current_assignments: &mut HashMap, + keystore: &SyncCryptoStorePtr, + relay_parent: Hash, +) -> Result<()> +where + Sender: CollatorProtocolSenderTrait, +{ + let validators = polkadot_node_subsystem_util::request_validators(relay_parent, sender) + .await + .await + .map_err(Error::CancelledActiveValidators)??; + + let (groups, rotation_info) = + polkadot_node_subsystem_util::request_validator_groups(relay_parent, sender) + .await + .await + .map_err(Error::CancelledValidatorGroups)??; + + let cores = polkadot_node_subsystem_util::request_availability_cores(relay_parent, sender) + .await + .await + .map_err(Error::CancelledAvailabilityCores)??; + + let para_now = match polkadot_node_subsystem_util::signing_key_and_index(&validators, keystore) + .await + .and_then(|(_, index)| polkadot_node_subsystem_util::find_validator_group(&groups, index)) + { + Some(group) => { + let core_now = rotation_info.core_for_group(group, cores.len()); + + cores.get(core_now.0 as usize).and_then(|c| match c { + CoreState::Occupied(core) => Some((core.para_id(), AssignedCoreState::Occupied)), + CoreState::Scheduled(core) => Some((core.para_id, AssignedCoreState::Scheduled)), + CoreState::Free => None, + }) + }, + None => { + gum::trace!(target: LOG_TARGET, ?relay_parent, "Not a validator"); + + return Ok(()) + }, + }; + + // This code won't work well, if at all for parathreads. For parathreads we'll + // have to be aware of which core the parathread claim is going to be multiplexed + // onto. The parathread claim will also have a known collator, and we should always + // allow an incoming connection from that collator. If not even connecting to them + // directly. + // + // However, this'll work fine for parachains, as each parachain gets a dedicated + // core. + if let Some((para_id, _)) = para_now.as_ref() { + let entry = current_assignments.entry(*para_id).or_default(); + *entry += 1; + if *entry == 1 { + gum::debug!( + target: LOG_TARGET, + ?relay_parent, + para_id = ?para_id, + "Assigned to a parachain", + ); + } + } + + *group_assignment = GroupAssignments { current: para_now, implicit: Vec::new() }; + + Ok(()) +} + +fn remove_outgoing( + current_assignments: &mut HashMap, + per_relay_parent: PerRelayParent, +) { + let GroupAssignments { current, .. } = per_relay_parent.assignment; + + if let Some((cur, _)) = current { + if let Entry::Occupied(mut occupied) = current_assignments.entry(cur) { + *occupied.get_mut() -= 1; + if *occupied.get() == 0 { + occupied.remove_entry(); + gum::debug!( + target: LOG_TARGET, + para_id = ?cur, + "Unassigned from a parachain", + ); + } + } + } } // O(n) search for collator ID by iterating through the peers map. This should be fast enough @@ -649,42 +600,29 @@ async fn fetch_collation( state: &mut State, pc: PendingCollation, id: CollatorId, -) { +) -> std::result::Result<(), FetchError> { let (tx, rx) = oneshot::channel(); - let PendingCollation { relay_parent, para_id, peer_id, .. } = pc; + let PendingCollation { relay_parent, peer_id, prospective_candidate, .. } = pc; + let candidate_hash = prospective_candidate.as_ref().map(ProspectiveCandidate::candidate_hash); - let timeout = |collator_id, relay_parent| async move { - Delay::new(MAX_UNSHARED_DOWNLOAD_TIME).await; - (collator_id, relay_parent) - }; - state - .collation_fetch_timeouts - .push(timeout(id.clone(), relay_parent.clone()).boxed()); + let peer_data = state.peer_data.get(&peer_id).ok_or(FetchError::UnknownPeer)?; - if let Some(peer_data) = state.peer_data.get(&peer_id) { - if peer_data.has_advertised(&relay_parent) { - request_collation(sender, state, relay_parent, para_id, peer_id, tx).await; - } else { - gum::debug!( - target: LOG_TARGET, - ?peer_id, - ?para_id, - ?relay_parent, - "Collation is not advertised for the relay parent by the peer, do not request it", - ); - } + if peer_data.has_advertised(&relay_parent, candidate_hash) { + request_collation(sender, state, pc, id.clone(), peer_data.version, tx).await?; + let timeout = |collator_id, candidate_hash, relay_parent| async move { + Delay::new(MAX_UNSHARED_DOWNLOAD_TIME).await; + (collator_id, candidate_hash, relay_parent) + }; + state + .collation_fetch_timeouts + .push(timeout(id.clone(), candidate_hash, relay_parent).boxed()); + state.collation_fetches.push(rx.map(move |r| ((id, pc), r)).boxed()); + + Ok(()) } else { - gum::warn!( - target: LOG_TARGET, - ?peer_id, - ?para_id, - ?relay_parent, - "Requested to fetch a collation from an unknown peer", - ); + Err(FetchError::NotAdvertised) } - - state.collation_fetches.push(rx.map(|r| ((id, pc), r)).boxed()); } /// Report a collator for some malicious actions. @@ -713,33 +651,46 @@ async fn note_good_collation( async fn notify_collation_seconded( sender: &mut impl overseer::CollatorProtocolSenderTrait, peer_id: PeerId, + version: CollationVersion, relay_parent: Hash, statement: SignedFullStatement, ) { - let wire_message = - protocol_v1::CollatorProtocolMessage::CollationSeconded(relay_parent, statement.into()); + let statement = statement.into(); + let wire_message = match version { + CollationVersion::V1 => Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol( + protocol_v1::CollatorProtocolMessage::CollationSeconded(relay_parent, statement), + )), + CollationVersion::VStaging => + Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol( + protocol_vstaging::CollatorProtocolMessage::CollationSeconded( + relay_parent, + statement, + ), + )), + }; sender - .send_message(NetworkBridgeTxMessage::SendCollationMessage( - vec![peer_id], - Versioned::V1(protocol_v1::CollationProtocol::CollatorProtocol(wire_message)), - )) + .send_message(NetworkBridgeTxMessage::SendCollationMessage(vec![peer_id], wire_message)) .await; - - modify_reputation(sender, peer_id, BENEFIT_NOTIFY_GOOD).await; } /// A peer's view has changed. A number of things should be done: /// - Ongoing collation requests have to be canceled. /// - Advertisements by this peer that are no longer relevant have to be removed. -async fn handle_peer_view_change(state: &mut State, peer_id: PeerId, view: View) -> Result<()> { - let peer_data = state.peer_data.entry(peer_id.clone()).or_default(); +fn handle_peer_view_change(state: &mut State, peer_id: PeerId, view: View) { + let peer_data = match state.peer_data.get_mut(&peer_id) { + Some(peer_data) => peer_data, + None => return, + }; - peer_data.update_view(view); + peer_data.update_view( + &state.implicit_view, + &state.active_leaves, + &state.per_relay_parent, + view, + ); state .requested_collations - .retain(|pc, _| pc.peer_id != peer_id || peer_data.has_advertised(&pc.relay_parent)); - - Ok(()) + .retain(|pc, _| pc.peer_id != peer_id || peer_data.has_advertised(&pc.relay_parent, None)); } /// Request a collation from the network. @@ -751,41 +702,49 @@ async fn handle_peer_view_change(state: &mut State, peer_id: PeerId, view: View) async fn request_collation( sender: &mut impl overseer::CollatorProtocolSenderTrait, state: &mut State, - relay_parent: Hash, - para_id: ParaId, - peer_id: PeerId, + pending_collation: PendingCollation, + collator_id: CollatorId, + peer_protocol_version: CollationVersion, result: oneshot::Sender<(CandidateReceipt, PoV)>, -) { - if !state.view.contains(&relay_parent) { - gum::debug!( - target: LOG_TARGET, - peer_id = %peer_id, - para_id = %para_id, - relay_parent = %relay_parent, - "collation is no longer in view", - ); - return - } - let pending_collation = PendingCollation::new(relay_parent, ¶_id, &peer_id); +) -> std::result::Result<(), FetchError> { if state.requested_collations.contains_key(&pending_collation) { - gum::warn!( - target: LOG_TARGET, - peer_id = %pending_collation.peer_id, - %pending_collation.para_id, - ?pending_collation.relay_parent, - "collation has already been requested", - ); - return + return Err(FetchError::AlreadyRequested) } - let (full_request, response_recv) = OutgoingRequest::new( - Recipient::Peer(peer_id), - CollationFetchingRequest { relay_parent, para_id }, - ); - let requests = Requests::CollationFetchingV1(full_request); + let PendingCollation { relay_parent, para_id, peer_id, prospective_candidate, .. } = + pending_collation; + let per_relay_parent = state + .per_relay_parent + .get_mut(&relay_parent) + .ok_or(FetchError::RelayParentOutOfView)?; + + // Relay parent mode is checked in `handle_advertisement`. + let (requests, response_recv) = match (peer_protocol_version, prospective_candidate) { + (CollationVersion::V1, _) => { + let (req, response_recv) = OutgoingRequest::new( + Recipient::Peer(peer_id), + request_v1::CollationFetchingRequest { relay_parent, para_id }, + ); + let requests = Requests::CollationFetchingV1(req); + (requests, response_recv.boxed()) + }, + (CollationVersion::VStaging, Some(ProspectiveCandidate { candidate_hash, .. })) => { + let (req, response_recv) = OutgoingRequest::new( + Recipient::Peer(peer_id), + request_vstaging::CollationFetchingRequest { + relay_parent, + para_id, + candidate_hash, + }, + ); + let requests = Requests::CollationFetchingVStaging(req); + (requests, response_recv.boxed()) + }, + _ => return Err(FetchError::ProtocolMismatch), + }; let per_request = PerRequest { - from_collator: response_recv.boxed().fuse(), + from_collator: response_recv.fuse(), to_requester: result, span: state .span_per_relay_parent @@ -794,9 +753,7 @@ async fn request_collation( _lifetime_timer: state.metrics.time_collation_request_duration(), }; - state - .requested_collations - .insert(PendingCollation::new(relay_parent, ¶_id, &peer_id), per_request); + state.requested_collations.insert(pending_collation, per_request); gum::debug!( target: LOG_TARGET, @@ -806,12 +763,21 @@ async fn request_collation( "Requesting collation", ); + let maybe_candidate_hash = + prospective_candidate.as_ref().map(ProspectiveCandidate::candidate_hash); + per_relay_parent.collations.status = CollationStatus::Fetching; + per_relay_parent + .collations + .fetching_from + .replace((collator_id, maybe_candidate_hash)); + sender .send_message(NetworkBridgeTxMessage::SendRequests( vec![requests], IfDisconnected::ImmediateError, )) .await; + Ok(()) } /// Networking message has been received. @@ -820,12 +786,18 @@ async fn process_incoming_peer_message( ctx: &mut Context, state: &mut State, origin: PeerId, - msg: protocol_v1::CollatorProtocolMessage, + msg: Versioned< + protocol_v1::CollatorProtocolMessage, + protocol_vstaging::CollatorProtocolMessage, + >, ) { - use protocol_v1::CollatorProtocolMessage::*; + use protocol_v1::CollatorProtocolMessage as V1; + use protocol_vstaging::CollatorProtocolMessage as VStaging; use sp_runtime::traits::AppVerify; + match msg { - Declare(collator_id, para_id, signature) => { + Versioned::V1(V1::Declare(collator_id, para_id, signature)) | + Versioned::VStaging(VStaging::Declare(collator_id, para_id, signature)) => { if collator_peer_id(&state.peer_data, &collator_id).is_some() { modify_reputation(ctx.sender(), origin, COST_UNEXPECTED_MESSAGE).await; return @@ -850,7 +822,7 @@ async fn process_incoming_peer_message( target: LOG_TARGET, peer_id = ?origin, ?para_id, - "Peer is not in the collating state", + "Peer is already in the collating state", ); modify_reputation(ctx.sender(), origin, COST_UNEXPECTED_MESSAGE).await; return @@ -867,7 +839,7 @@ async fn process_incoming_peer_message( return } - if state.active_paras.is_current(¶_id) { + if state.current_assignments.contains_key(¶_id) { gum::debug!( target: LOG_TARGET, peer_id = ?origin, @@ -891,165 +863,383 @@ async fn process_incoming_peer_message( disconnect_peer(ctx.sender(), origin).await; } }, - AdvertiseCollation(relay_parent) => { - let _span = state - .span_per_relay_parent - .get(&relay_parent) - .map(|s| s.child("advertise-collation")); - if !state.view.contains(&relay_parent) { + Versioned::V1(V1::AdvertiseCollation(relay_parent)) => + if let Err(err) = + handle_advertisement(ctx.sender(), state, relay_parent, &origin, None).await + { gum::debug!( target: LOG_TARGET, peer_id = ?origin, ?relay_parent, - "Advertise collation out of view", + error = ?err, + "Rejected v1 advertisement", ); - modify_reputation(ctx.sender(), origin, COST_UNEXPECTED_MESSAGE).await; - return + if let Some(rep) = err.reputation_changes() { + modify_reputation(ctx.sender(), origin.clone(), rep).await; + } + }, + Versioned::VStaging(VStaging::AdvertiseCollation { + relay_parent, + candidate_hash, + parent_head_data_hash, + }) => + if let Err(err) = handle_advertisement( + ctx.sender(), + state, + relay_parent, + &origin, + Some((candidate_hash, parent_head_data_hash)), + ) + .await + { + gum::debug!( + target: LOG_TARGET, + peer_id = ?origin, + ?relay_parent, + ?candidate_hash, + error = ?err, + "Rejected vstaging advertisement", + ); + + if let Some(rep) = err.reputation_changes() { + modify_reputation(ctx.sender(), origin.clone(), rep).await; + } + }, + Versioned::V1(V1::CollationSeconded(..)) | + Versioned::VStaging(VStaging::CollationSeconded(..)) => { + gum::warn!( + target: LOG_TARGET, + peer_id = ?origin, + "Unexpected `CollationSeconded` message, decreasing reputation", + ); + + modify_reputation(ctx.sender(), origin, COST_UNEXPECTED_MESSAGE).await; + }, + } +} + +async fn is_seconding_allowed( + _sender: &mut Sender, + _relay_parent: Hash, + _candidate_hash: CandidateHash, + _parent_head_data_hash: Hash, + _para_id: ParaId, + _active_leaves: impl IntoIterator, +) -> Option +where + Sender: CollatorProtocolSenderTrait, +{ + // TODO https://github.com/paritytech/polkadot/issues/5923 + Some(true) +} + +#[derive(Debug)] +enum AdvertisementError { + /// Relay parent is unknown. + RelayParentUnknown, + /// Peer is not present in the subsystem state. + UnknownPeer, + /// Peer has not declared its para id. + UndeclaredCollator, + /// We're assigned to a different para at the given relay parent. + InvalidAssignment, + /// Collator is trying to build on top of occupied core + /// when async backing is disabled. + CoreOccupied, + /// An advertisement format doesn't match the relay parent. + ProtocolMismatch, + /// Para reached a limit of seconded candidates for this relay parent. + SecondedLimitReached, + /// Advertisement is invalid. + Invalid(InsertAdvertisementError), + /// Failed to query prospective parachains subsystem. + ProspectiveParachainsUnavailable, +} + +impl AdvertisementError { + fn reputation_changes(&self) -> Option { + use AdvertisementError::*; + match self { + InvalidAssignment => Some(COST_WRONG_PARA), + RelayParentUnknown | UndeclaredCollator | CoreOccupied | Invalid(_) => + Some(COST_UNEXPECTED_MESSAGE), + UnknownPeer | + ProtocolMismatch | + SecondedLimitReached | + ProspectiveParachainsUnavailable => None, + } + } +} + +async fn handle_advertisement( + sender: &mut Sender, + state: &mut State, + relay_parent: Hash, + peer_id: &PeerId, + prospective_candidate: Option<(CandidateHash, Hash)>, +) -> std::result::Result<(), AdvertisementError> +where + Sender: CollatorProtocolSenderTrait, +{ + let _span = state + .span_per_relay_parent + .get(&relay_parent) + .map(|s| s.child("advertise-collation")); + + let per_relay_parent = state + .per_relay_parent + .get_mut(&relay_parent) + .ok_or(AdvertisementError::RelayParentUnknown)?; + + let relay_parent_mode = per_relay_parent.prospective_parachains_mode; + let assignment = &per_relay_parent.assignment; + + let peer_data = state.peer_data.get_mut(&peer_id).ok_or(AdvertisementError::UnknownPeer)?; + let collator_para_id = + peer_data.collating_para().ok_or(AdvertisementError::UndeclaredCollator)?; + + match assignment.current { + Some((id, core_state)) if id == collator_para_id => { + // Disallow building on top occupied core if async + // backing is disabled. + if !relay_parent_mode.is_enabled() && core_state.is_occupied() { + return Err(AdvertisementError::CoreOccupied) } + }, + _ if assignment.implicit.contains(&collator_para_id) => { + // This relay parent is a part of implicit ancestry, + // thus async backing is enabled. + }, + _ => return Err(AdvertisementError::InvalidAssignment), + }; - let peer_data = match state.peer_data.get_mut(&origin) { - None => { - gum::debug!( - target: LOG_TARGET, - peer_id = ?origin, - ?relay_parent, - "Advertise collation message has been received from an unknown peer", - ); - modify_reputation(ctx.sender(), origin, COST_UNEXPECTED_MESSAGE).await; - return - }, - Some(p) => p, - }; + // TODO: only fetch a collation if it's built on top of backed nodes in fragment tree. + // https://github.com/paritytech/polkadot/issues/5923 + let is_seconding_allowed = match (relay_parent_mode, prospective_candidate) { + (ProspectiveParachainsMode::Disabled, _) => true, + (ProspectiveParachainsMode::Enabled, Some((candidate_hash, parent_head_data_hash))) => { + let active_leaves = state.active_leaves.keys().copied(); + is_seconding_allowed( + sender, + relay_parent, + candidate_hash, + parent_head_data_hash, + collator_para_id, + active_leaves, + ) + .await + .ok_or(AdvertisementError::ProspectiveParachainsUnavailable)? + }, + _ => return Err(AdvertisementError::ProtocolMismatch), + }; - match peer_data.insert_advertisement(relay_parent, &state.view) { - Ok((id, para_id)) => { - gum::debug!( + if !is_seconding_allowed { + // TODO + return Ok(()) + } + + let candidate_hash = prospective_candidate.map(|(hash, ..)| hash); + let insert_result = peer_data.insert_advertisement( + relay_parent, + relay_parent_mode, + candidate_hash, + &state.implicit_view, + &state.active_leaves, + ); + + match insert_result { + Ok((id, para_id)) => { + gum::debug!( + target: LOG_TARGET, + peer_id = ?peer_id, + %para_id, + ?relay_parent, + "Received advertise collation", + ); + let prospective_candidate = + prospective_candidate.map(|(candidate_hash, parent_head_data_hash)| { + ProspectiveCandidate { candidate_hash, parent_head_data_hash } + }); + + let collations = &mut per_relay_parent.collations; + if !collations.is_seconded_limit_reached(relay_parent_mode, collator_para_id) { + return Err(AdvertisementError::SecondedLimitReached) + } + + let pending_collation = + PendingCollation::new(relay_parent, para_id, peer_id, prospective_candidate); + + match collations.status { + CollationStatus::Fetching | CollationStatus::WaitingOnValidation => { + gum::trace!( target: LOG_TARGET, - peer_id = ?origin, + peer_id = ?peer_id, %para_id, ?relay_parent, - "Received advertise collation", + "Added collation to the pending list" ); - - let pending_collation = PendingCollation::new(relay_parent, ¶_id, &origin); - - let collations = - state.collations_per_relay_parent.entry(relay_parent).or_default(); - - match collations.status { - CollationStatus::Fetching | CollationStatus::WaitingOnValidation => { - gum::trace!( - target: LOG_TARGET, - peer_id = ?origin, - %para_id, - ?relay_parent, - "Added collation to the pending list" - ); - collations.unfetched_collations.push((pending_collation, id)); - }, - CollationStatus::Waiting => { - collations.status = CollationStatus::Fetching; - collations.waiting_collation = Some(id.clone()); - - fetch_collation(ctx.sender(), state, pending_collation.clone(), id) - .await; - }, - CollationStatus::Seconded => { - gum::trace!( - target: LOG_TARGET, - peer_id = ?origin, - %para_id, - ?relay_parent, - "Valid seconded collation" - ); - }, - } + collations.waiting_queue.push_back((pending_collation, id)); }, - Err(error) => { - gum::debug!( + CollationStatus::Waiting => { + let _ = fetch_collation(sender, state, pending_collation, id).await; + }, + CollationStatus::Seconded if relay_parent_mode.is_enabled() => { + // Limit is not reached, it's allowed to second another + // collation. + let _ = fetch_collation(sender, state, pending_collation, id).await; + }, + CollationStatus::Seconded => { + gum::trace!( target: LOG_TARGET, - peer_id = ?origin, + peer_id = ?peer_id, + %para_id, ?relay_parent, - ?error, - "Invalid advertisement", + ?relay_parent_mode, + "A collation has already been seconded", ); - - modify_reputation(ctx.sender(), origin, COST_UNEXPECTED_MESSAGE).await; }, } }, - CollationSeconded(_, _) => { - gum::warn!( - target: LOG_TARGET, - peer_id = ?origin, - "Unexpected `CollationSeconded` message, decreasing reputation", - ); + Err(InsertAdvertisementError::ProtocolMismatch) => { + // Checked above. + return Err(AdvertisementError::ProtocolMismatch) }, + Err(error) => return Err(AdvertisementError::Invalid(error)), } -} -/// A leaf has become inactive so we want to -/// - Cancel all ongoing collation requests that are on top of that leaf. -/// - Remove all stored collations relevant to that leaf. -async fn remove_relay_parent(state: &mut State, relay_parent: Hash) -> Result<()> { - state.requested_collations.retain(|k, _| k.relay_parent != relay_parent); - - state.pending_candidates.retain(|k, _| k != &relay_parent); - - state.collations_per_relay_parent.remove(&relay_parent); Ok(()) } /// Our view has changed. -#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)] -async fn handle_our_view_change( - ctx: &mut Context, +async fn handle_our_view_change( + sender: &mut Sender, state: &mut State, keystore: &SyncCryptoStorePtr, view: OurView, -) -> Result<()> { - let old_view = std::mem::replace(&mut state.view, view); - - let added: HashMap> = state - .view - .span_per_head() - .iter() - .filter(|v| !old_view.contains(&v.0)) - .map(|v| (v.0.clone(), v.1.clone())) - .collect(); - - added.into_iter().for_each(|(h, s)| { - state.span_per_relay_parent.insert(h, PerLeafSpan::new(s, "validator-side")); - }); - - let added = state.view.difference(&old_view).cloned().collect::>(); - let removed = old_view.difference(&state.view).cloned().collect::>(); - - for removed in removed.iter().cloned() { - remove_relay_parent(state, removed).await?; - state.span_per_relay_parent.remove(&removed); +) -> Result<()> +where + Sender: CollatorProtocolSenderTrait, +{ + let current_leaves = state.active_leaves.clone(); + + let removed = current_leaves.iter().filter(|(h, _)| !view.contains(*h)); + let added = view.iter().filter(|h| !current_leaves.contains_key(h)); + + for leaf in added { + let mode = prospective_parachains_mode(sender, *leaf).await?; + + if let Some(span) = view.span_per_head().get(leaf).cloned() { + let per_leaf_span = PerLeafSpan::new(span, "validator-side"); + state.span_per_relay_parent.insert(*leaf, per_leaf_span); + } + + let mut per_relay_parent = PerRelayParent::new(mode); + assign_incoming( + sender, + &mut per_relay_parent.assignment, + &mut state.current_assignments, + keystore, + *leaf, + ) + .await?; + + state.active_leaves.insert(*leaf, mode); + + let mut implicit_assignment = + Vec::from_iter(per_relay_parent.assignment.current.map(|(para, _)| para)); + state.per_relay_parent.insert(*leaf, per_relay_parent); + + if mode.is_enabled() { + state + .implicit_view + .activate_leaf(sender, *leaf) + .await + .map_err(Error::ImplicitViewFetchError)?; + + // Order is always descending. + let allowed_ancestry = state + .implicit_view + .known_allowed_relay_parents_under(leaf, None) + .unwrap_or_default(); + for block_hash in allowed_ancestry { + let entry = match state.per_relay_parent.entry(*block_hash) { + Entry::Vacant(entry) => { + let mut per_relay_parent = + PerRelayParent::new(ProspectiveParachainsMode::Enabled); + assign_incoming( + sender, + &mut per_relay_parent.assignment, + &mut state.current_assignments, + keystore, + *block_hash, + ) + .await?; + + entry.insert(per_relay_parent) + }, + Entry::Occupied(entry) => entry.into_mut(), + }; + + let current = entry.assignment.current.map(|(para, _)| para); + let implicit = &mut entry.assignment.implicit; + + // Extend implicitly assigned parachains. + for para in &implicit_assignment { + if !implicit.contains(para) { + implicit.push(*para); + } + } + // Current assignment propagates to parents, meaning that a parachain + // we're assigned to in fresh blocks can submit collations built + // on top of relay parents in the allowed ancestry, but not vice versa. + implicit_assignment.extend(current); + } + } } - state.active_paras.assign_incoming(ctx.sender(), keystore, added).await; - state.active_paras.remove_outgoing(removed); + for (removed, mode) in removed { + state.active_leaves.remove(removed); + // If the leaf is deactivated it still may stay in the view as a part + // of implicit ancestry. Only update the state after the hash is actually + // pruned from the block info storage. + let pruned = if mode.is_enabled() { + state.implicit_view.deactivate_leaf(*removed) + } else { + vec![*removed] + }; + + for removed in pruned { + if let Some(per_relay_parent) = state.per_relay_parent.remove(&removed) { + remove_outgoing(&mut state.current_assignments, per_relay_parent); + } + + state.requested_collations.retain(|k, _| k.relay_parent != removed); + state.fetched_candidates.retain(|k, _| k.relay_parent != removed); + state.span_per_relay_parent.remove(&removed); + } + } for (peer_id, peer_data) in state.peer_data.iter_mut() { - peer_data.prune_old_advertisements(&state.view); + peer_data.prune_old_advertisements( + &state.implicit_view, + &state.active_leaves, + &state.per_relay_parent, + ); // Disconnect peers who are not relevant to our current or next para. // // If the peer hasn't declared yet, they will be disconnected if they do not // declare. if let Some(para_id) = peer_data.collating_para() { - if !state.active_paras.is_current(¶_id) { + if !state.current_assignments.contains_key(¶_id) { gum::trace!( target: LOG_TARGET, ?peer_id, ?para_id, "Disconnecting peer on view change (not current parachain id)" ); - disconnect_peer(ctx.sender(), peer_id.clone()).await; + disconnect_peer(sender, peer_id.clone()).await; } } } @@ -1068,8 +1258,26 @@ async fn handle_network_msg( use NetworkBridgeEvent::*; match bridge_message { - PeerConnected(peer_id, _role, _version, _) => { - state.peer_data.entry(peer_id).or_default(); + PeerConnected(peer_id, observed_role, protocol_version, _) => { + let version = match protocol_version.try_into() { + Ok(version) => version, + Err(err) => { + // Network bridge is expected to handle this. + gum::error!( + target: LOG_TARGET, + ?peer_id, + ?observed_role, + ?err, + "Unsupported protocol version" + ); + return Ok(()) + }, + }; + state.peer_data.entry(peer_id).or_insert_with(|| PeerData { + view: View::default(), + state: PeerState::Connected(Instant::now()), + version, + }); state.metrics.note_collator_peer_count(state.peer_data.len()); }, PeerDisconnected(peer_id) => { @@ -1080,15 +1288,14 @@ async fn handle_network_msg( // impossible! }, PeerViewChange(peer_id, view) => { - handle_peer_view_change(state, peer_id, view).await?; + handle_peer_view_change(state, peer_id, view); }, OurViewChange(view) => { - handle_our_view_change(ctx, state, keystore, view).await?; + handle_our_view_change(ctx.sender(), state, keystore, view).await?; }, - PeerMessage(remote, Versioned::V1(msg)) => { + PeerMessage(remote, msg) => { process_incoming_peer_message(ctx, state, remote, msg).await; }, - PeerMessage(_, Versioned::VStaging(_msg)) => {}, } Ok(()) @@ -1114,7 +1321,7 @@ async fn process_msg( "CollateOn message is not expected on the validator side of the protocol", ); }, - DistributeCollation(_, _, _) => { + DistributeCollation(..) => { gum::warn!( target: LOG_TARGET, "DistributeCollation message is not expected on the validator side of the protocol", @@ -1133,15 +1340,50 @@ async fn process_msg( } }, Seconded(parent, stmt) => { - if let Some(collation_event) = state.pending_candidates.remove(&parent) { + let receipt = match stmt.payload() { + Statement::Seconded(receipt) => receipt, + Statement::Valid(_) => { + gum::warn!( + target: LOG_TARGET, + ?stmt, + relay_parent = %parent, + "Seconded message received with a `Valid` statement", + ); + return + }, + }; + let fetched_collation = FetchedCollation::from(&receipt.to_plain()); + if let Some(collation_event) = state.fetched_candidates.remove(&fetched_collation) { let (collator_id, pending_collation) = collation_event; - let PendingCollation { relay_parent, peer_id, .. } = pending_collation; - note_good_collation(ctx.sender(), &state.peer_data, collator_id).await; - notify_collation_seconded(ctx.sender(), peer_id, relay_parent, stmt).await; + let PendingCollation { + relay_parent, peer_id, para_id, prospective_candidate, .. + } = pending_collation; + note_good_collation(ctx.sender(), &state.peer_data, collator_id.clone()).await; + if let Some(peer_data) = state.peer_data.get(&peer_id) { + notify_collation_seconded( + ctx.sender(), + peer_id, + peer_data.version, + relay_parent, + stmt, + ) + .await; + } - if let Some(collations) = state.collations_per_relay_parent.get_mut(&parent) { - collations.status = CollationStatus::Seconded; + if let Some(state) = state.per_relay_parent.get_mut(&parent) { + state.collations.status = CollationStatus::Seconded; + state.collations.note_seconded(para_id); } + // If async backing is enabled, make an attempt to fetch next collation. + let maybe_candidate_hash = + prospective_candidate.as_ref().map(ProspectiveCandidate::candidate_hash); + dequeue_next_collation_and_fetch( + ctx, + state, + parent, + (collator_id, maybe_candidate_hash), + ) + .await; } else { gum::debug!( target: LOG_TARGET, @@ -1151,7 +1393,9 @@ async fn process_msg( } }, Invalid(parent, candidate_receipt) => { - let id = match state.pending_candidates.entry(parent) { + let fetched_collation = FetchedCollation::from(&candidate_receipt); + let candidate_hash = fetched_collation.candidate_hash; + let id = match state.fetched_candidates.entry(fetched_collation) { Entry::Occupied(entry) if entry.get().1.commitments_hash == Some(candidate_receipt.commitments_hash) => @@ -1170,7 +1414,7 @@ async fn process_msg( report_collator(ctx.sender(), &state.peer_data, id.clone()).await; - dequeue_next_collation_and_fetch(ctx, state, parent, id).await; + dequeue_next_collation_and_fetch(ctx, state, parent, (id, Some(candidate_hash))).await; }, } } @@ -1212,17 +1456,47 @@ pub(crate) async fn run( disconnect_inactive_peers(ctx.sender(), &eviction_policy, &state.peer_data).await; } res = state.collation_fetches.select_next_some() => { - handle_collation_fetched_result(&mut ctx, &mut state, res).await; + let (collator_id, pc) = res.0.clone(); + if let Err(err) = kick_off_seconding(&mut ctx, &mut state, res).await { + gum::warn!( + target: LOG_TARGET, + relay_parent = ?pc.relay_parent, + para_id = ?pc.para_id, + peer_id = ?pc.peer_id, + error = %err, + "Seconding aborted due to an error", + ); + + if err.is_malicious() { + // Report malicious peer. + modify_reputation(ctx.sender(), pc.peer_id, COST_REPORT_BAD).await; + } + let maybe_candidate_hash = + pc.prospective_candidate.as_ref().map(ProspectiveCandidate::candidate_hash); + dequeue_next_collation_and_fetch( + &mut ctx, + &mut state, + pc.relay_parent, + (collator_id, maybe_candidate_hash), + ) + .await; + } } res = state.collation_fetch_timeouts.select_next_some() => { - let (collator_id, relay_parent) = res; + let (collator_id, maybe_candidate_hash, relay_parent) = res; gum::debug!( target: LOG_TARGET, ?relay_parent, ?collator_id, "Timeout hit - already seconded?" ); - dequeue_next_collation_and_fetch(&mut ctx, &mut state, relay_parent, collator_id).await; + dequeue_next_collation_and_fetch( + &mut ctx, + &mut state, + relay_parent, + (collator_id, maybe_candidate_hash), + ) + .await; } _ = check_collations_stream.next() => { let reputation_changes = poll_requests( @@ -1271,142 +1545,151 @@ async fn dequeue_next_collation_and_fetch( ctx: &mut Context, state: &mut State, relay_parent: Hash, - // The collator we tried to fetch from last. - previous_fetch: CollatorId, + // The collator we tried to fetch from last, optionally which candidate. + previous_fetch: (CollatorId, Option), ) { - if let Some((next, id)) = state - .collations_per_relay_parent - .get_mut(&relay_parent) - .and_then(|c| c.get_next_collation_to_fetch(Some(&previous_fetch))) - { + while let Some((next, id)) = state.per_relay_parent.get_mut(&relay_parent).and_then(|state| { + state + .collations + .get_next_collation_to_fetch(&previous_fetch, state.prospective_parachains_mode) + }) { gum::debug!( target: LOG_TARGET, ?relay_parent, ?id, "Successfully dequeued next advertisement - fetching ..." ); - fetch_collation(ctx.sender(), state, next, id).await; - } else { - gum::debug!( - target: LOG_TARGET, - ?relay_parent, - previous_collator = ?previous_fetch, - "No collations are available to fetch" - ); + if let Err(err) = fetch_collation(ctx.sender(), state, next, id).await { + gum::debug!( + target: LOG_TARGET, + relay_parent = ?next.relay_parent, + para_id = ?next.para_id, + peer_id = ?next.peer_id, + error = %err, + "Failed to request a collation, dequeueing next one", + ); + } else { + break + } } } -#[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)] -async fn request_persisted_validation_data( - ctx: &mut Context, +async fn request_persisted_validation_data( + sender: &mut Sender, relay_parent: Hash, para_id: ParaId, -) -> Option { - // TODO [https://github.com/paritytech/polkadot/issues/5054] - // - // As of https://github.com/paritytech/polkadot/pull/5557 the - // `Second` message requires the `PersistedValidationData` to be - // supplied. - // - // Without asynchronous backing, this can be easily fetched from the - // chain state. - // - // This assumes the core is _scheduled_, in keeping with the effective - // current behavior. If the core is occupied, we simply don't return - // anything. Likewise with runtime API errors, which are rare. - let res = polkadot_node_subsystem_util::request_persisted_validation_data( +) -> std::result::Result, SecondingError> +where + Sender: CollatorProtocolSenderTrait, +{ + // The core is guaranteed to be scheduled since we accepted the advertisement. + polkadot_node_subsystem_util::request_persisted_validation_data( relay_parent, para_id, OccupiedCoreAssumption::Free, - ctx.sender(), + sender, ) .await - .await; + .await + .map_err(SecondingError::CancelledRuntimePersistedValidationData)? + .map_err(SecondingError::RuntimeApi) +} - match res { - Ok(Ok(Some(pvd))) => Some(pvd), - _ => None, - } +async fn request_prospective_validation_data( + sender: &mut Sender, + candidate_relay_parent: Hash, + parent_head_data_hash: Hash, + para_id: ParaId, +) -> std::result::Result, SecondingError> +where + Sender: CollatorProtocolSenderTrait, +{ + let (tx, rx) = oneshot::channel(); + + let request = + ProspectiveValidationDataRequest { para_id, candidate_relay_parent, parent_head_data_hash }; + + sender + .send_message(ProspectiveParachainsMessage::GetProspectiveValidationData(request, tx)) + .await; + + rx.await.map_err(SecondingError::CancelledProspectiveValidationData) } /// Handle a fetched collation result. #[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)] -async fn handle_collation_fetched_result( +async fn kick_off_seconding( ctx: &mut Context, state: &mut State, (mut collation_event, res): PendingCollationFetch, -) { - // If no prior collation for this relay parent has been seconded, then - // memorize the `collation_event` for that `relay_parent`, such that we may - // notify the collator of their successful second backing +) -> std::result::Result<(), SecondingError> { let relay_parent = collation_event.1.relay_parent; + let para_id = collation_event.1.para_id; - let (candidate_receipt, pov) = match res { - Ok(res) => res, - Err(e) => { - gum::debug!( + let per_relay_parent = match state.per_relay_parent.get_mut(&relay_parent) { + Some(state) => state, + None => { + // Relay parent went out of view, not an error. + gum::trace!( target: LOG_TARGET, - relay_parent = ?collation_event.1.relay_parent, - para_id = ?collation_event.1.para_id, - peer_id = ?collation_event.1.peer_id, - collator_id = ?collation_event.0, - error = ?e, - "Failed to fetch collation.", + relay_parent = ?relay_parent, + "Fetched collation for a parent out of view", ); - - dequeue_next_collation_and_fetch(ctx, state, relay_parent, collation_event.0).await; - return + return Ok(()) }, }; + let collations = &mut per_relay_parent.collations; + let relay_parent_mode = per_relay_parent.prospective_parachains_mode; - if let Some(collations) = state.collations_per_relay_parent.get_mut(&relay_parent) { - if let CollationStatus::Seconded = collations.status { - gum::debug!( - target: LOG_TARGET, - ?relay_parent, - "Already seconded - no longer interested in collation fetch result." - ); - return - } - collations.status = CollationStatus::WaitingOnValidation; - } + let (candidate_receipt, pov) = res?; - if let Entry::Vacant(entry) = state.pending_candidates.entry(relay_parent) { + let fetched_collation = FetchedCollation::from(&candidate_receipt); + if let Entry::Vacant(entry) = state.fetched_candidates.entry(fetched_collation) { collation_event.1.commitments_hash = Some(candidate_receipt.commitments_hash); - if let Some(pvd) = request_persisted_validation_data( - ctx, - candidate_receipt.descriptor().relay_parent, - candidate_receipt.descriptor().para_id, - ) - .await - { - // TODO [https://github.com/paritytech/polkadot/issues/5054] - // - // If PVD isn't available (core occupied) then we'll silently - // just not second this. But prior to asynchronous backing - // we wouldn't second anyway because the core is occupied. - // - // The proper refactoring would be to accept declares from collators - // but not even fetch from them if the core is occupied. Given 5054, - // there's no reason to do this right now. - ctx.send_message(CandidateBackingMessage::Second( - relay_parent.clone(), - candidate_receipt, - pvd, - pov, - )) - .await; + let pvd = match (relay_parent_mode, collation_event.1.prospective_candidate) { + ( + ProspectiveParachainsMode::Enabled, + Some(ProspectiveCandidate { parent_head_data_hash, .. }), + ) => + request_prospective_validation_data( + ctx.sender(), + relay_parent, + parent_head_data_hash, + para_id, + ) + .await?, + (ProspectiveParachainsMode::Disabled, _) => + request_persisted_validation_data( + ctx.sender(), + candidate_receipt.descriptor().relay_parent, + candidate_receipt.descriptor().para_id, + ) + .await?, + _ => { + // `handle_advertisement` checks for protocol mismatch. + return Ok(()) + }, } + .ok_or(SecondingError::PersistedValidationDataNotFound)?; + + fetched_collation_sanity_check(&collation_event.1, &candidate_receipt, &pvd)?; + + ctx.send_message(CandidateBackingMessage::Second( + relay_parent, + candidate_receipt, + pvd, + pov, + )) + .await; + // There's always a single collation being fetched at any moment of time. + // In case of a failure, we reset the status back to waiting. + collations.status = CollationStatus::WaitingOnValidation; entry.insert(collation_event); + Ok(()) } else { - gum::trace!( - target: LOG_TARGET, - ?relay_parent, - candidate = ?candidate_receipt.hash(), - "Trying to insert a pending candidate failed, because there is already one.", - ) + Err(SecondingError::Duplicate) } } @@ -1519,7 +1802,7 @@ async fn poll_collation_response( ); CollationFetchResult::Error(None) }, - Ok(CollationFetchingResponse::Collation(receipt, _)) + Ok(request_v1::CollationFetchingResponse::Collation(receipt, _)) if receipt.descriptor().para_id != pending_collation.para_id => { gum::debug!( @@ -1532,7 +1815,7 @@ async fn poll_collation_response( CollationFetchResult::Error(Some(COST_WRONG_PARA)) }, - Ok(CollationFetchingResponse::Collation(receipt, pov)) => { + Ok(request_v1::CollationFetchingResponse::Collation(receipt, pov)) => { gum::debug!( target: LOG_TARGET, para_id = %pending_collation.para_id, diff --git a/node/network/collator-protocol/src/validator_side/tests.rs b/node/network/collator-protocol/src/validator_side/tests/mod.rs similarity index 73% rename from node/network/collator-protocol/src/validator_side/tests.rs rename to node/network/collator-protocol/src/validator_side/tests/mod.rs index 578100663904..ffc8796d3450 100644 --- a/node/network/collator-protocol/src/validator_side/tests.rs +++ b/node/network/collator-protocol/src/validator_side/tests/mod.rs @@ -40,9 +40,22 @@ use polkadot_primitives_test_helpers::{ dummy_candidate_descriptor, dummy_candidate_receipt_bad_sig, dummy_hash, }; +mod prospective_parachains; + const ACTIVITY_TIMEOUT: Duration = Duration::from_millis(500); const DECLARE_TIMEOUT: Duration = Duration::from_millis(25); +const API_VERSION_PROSPECTIVE_DISABLED: u32 = 2; + +fn dummy_pvd() -> PersistedValidationData { + PersistedValidationData { + parent_head: HeadData(vec![7, 8, 9]), + relay_parent_number: 5, + max_pov_size: 1024, + relay_parent_storage_root: Default::default(), + } +} + #[derive(Clone)] struct TestState { chain_ids: Vec, @@ -117,6 +130,7 @@ type VirtualOverseer = test_helpers::TestSubsystemContextHandle>(test: impl FnOnce(TestHarness) -> T) { @@ -138,9 +152,10 @@ fn test_harness>(test: impl FnOnce(TestHarne ) .unwrap(); + let keystore: SyncCryptoStorePtr = Arc::new(keystore); let subsystem = run( context, - Arc::new(keystore), + keystore.clone(), crate::CollatorEvictionPolicy { inactive_collator: ACTIVITY_TIMEOUT, undeclared: DECLARE_TIMEOUT, @@ -148,7 +163,7 @@ fn test_harness>(test: impl FnOnce(TestHarne Metrics::default(), ); - let test_fut = test(TestHarness { virtual_overseer }); + let test_fut = test(TestHarness { virtual_overseer, keystore }); futures::pin_mut!(test_fut); futures::pin_mut!(subsystem); @@ -245,30 +260,37 @@ async fn assert_candidate_backing_second( expected_relay_parent: Hash, expected_para_id: ParaId, expected_pov: &PoV, + mode: ProspectiveParachainsMode, ) -> CandidateReceipt { - // TODO [https://github.com/paritytech/polkadot/issues/5054] - // - // While collator protocol isn't updated, it's expected to receive - // a Runtime API request for persisted validation data. - let pvd = PersistedValidationData { - parent_head: HeadData(vec![7, 8, 9]), - relay_parent_number: 5, - max_pov_size: 1024, - relay_parent_storage_root: Default::default(), - }; - - assert_matches!( - overseer_recv(virtual_overseer).await, - AllMessages::RuntimeApi(RuntimeApiMessage::Request( - hash, - RuntimeApiRequest::PersistedValidationData(para_id, assumption, tx), - )) => { - assert_eq!(expected_relay_parent, hash); - assert_eq!(expected_para_id, para_id); - assert_eq!(OccupiedCoreAssumption::Free, assumption); - tx.send(Ok(Some(pvd.clone()))).unwrap(); - } - ); + let pvd = dummy_pvd(); + + // Depending on relay parent mode pvd will be either requested + // from the Runtime API or Prospective Parachains. + let msg = overseer_recv(virtual_overseer).await; + match mode { + ProspectiveParachainsMode::Disabled => assert_matches!( + msg, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + hash, + RuntimeApiRequest::PersistedValidationData(para_id, assumption, tx), + )) => { + assert_eq!(expected_relay_parent, hash); + assert_eq!(expected_para_id, para_id); + assert_eq!(OccupiedCoreAssumption::Free, assumption); + tx.send(Ok(Some(pvd.clone()))).unwrap(); + } + ), + ProspectiveParachainsMode::Enabled => assert_matches!( + msg, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetProspectiveValidationData(request, tx), + ) => { + assert_eq!(expected_relay_parent, request.candidate_relay_parent); + assert_eq!(expected_para_id, request.para_id); + tx.send(Some(pvd.clone())).unwrap(); + } + ), + } assert_matches!( overseer_recv(virtual_overseer).await, @@ -306,6 +328,7 @@ async fn assert_fetch_collation_request( virtual_overseer: &mut VirtualOverseer, relay_parent: Hash, para_id: ParaId, + candidate_hash: Option, ) -> ResponseSender { assert_matches!( overseer_recv(virtual_overseer).await, @@ -313,14 +336,26 @@ async fn assert_fetch_collation_request( ) => { let req = reqs.into_iter().next() .expect("There should be exactly one request"); - match req { - Requests::CollationFetchingV1(req) => { - let payload = req.payload; - assert_eq!(payload.relay_parent, relay_parent); - assert_eq!(payload.para_id, para_id); - req.pending_response - } - _ => panic!("Unexpected request"), + match candidate_hash { + None => assert_matches!( + req, + Requests::CollationFetchingV1(req) => { + let payload = req.payload; + assert_eq!(payload.relay_parent, relay_parent); + assert_eq!(payload.para_id, para_id); + req.pending_response + } + ), + Some(candidate_hash) => assert_matches!( + req, + Requests::CollationFetchingVStaging(req) => { + let payload = req.payload; + assert_eq!(payload.relay_parent, relay_parent); + assert_eq!(payload.para_id, para_id); + assert_eq!(payload.candidate_hash, candidate_hash); + req.pending_response + } + ), } }) } @@ -331,27 +366,38 @@ async fn connect_and_declare_collator( peer: PeerId, collator: CollatorPair, para_id: ParaId, + version: CollationVersion, ) { overseer_send( virtual_overseer, CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerConnected( peer.clone(), ObservedRole::Full, - CollationVersion::V1.into(), + version.into(), None, )), ) .await; - overseer_send( - virtual_overseer, - CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage( - peer.clone(), - Versioned::V1(protocol_v1::CollatorProtocolMessage::Declare( + let wire_message = match version { + CollationVersion::V1 => Versioned::V1(protocol_v1::CollatorProtocolMessage::Declare( + collator.public(), + para_id, + collator.sign(&protocol_v1::declare_signature_payload(&peer)), + )), + CollationVersion::VStaging => + Versioned::VStaging(protocol_vstaging::CollatorProtocolMessage::Declare( collator.public(), para_id, collator.sign(&protocol_v1::declare_signature_payload(&peer)), )), + }; + + overseer_send( + virtual_overseer, + CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage( + peer, + wire_message, )), ) .await; @@ -362,24 +408,48 @@ async fn advertise_collation( virtual_overseer: &mut VirtualOverseer, peer: PeerId, relay_parent: Hash, + candidate: Option<(CandidateHash, Hash)>, // Candidate hash + parent head data hash. ) { + let wire_message = match candidate { + Some((candidate_hash, parent_head_data_hash)) => + Versioned::VStaging(protocol_vstaging::CollatorProtocolMessage::AdvertiseCollation { + relay_parent, + candidate_hash, + parent_head_data_hash, + }), + None => + Versioned::V1(protocol_v1::CollatorProtocolMessage::AdvertiseCollation(relay_parent)), + }; overseer_send( virtual_overseer, CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::PeerMessage( peer, - Versioned::V1(protocol_v1::CollatorProtocolMessage::AdvertiseCollation(relay_parent)), + wire_message, )), ) .await; } +async fn assert_runtime_version_request(virtual_overseer: &mut VirtualOverseer, hash: Hash) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + relay_parent, + RuntimeApiRequest::Version(tx) + )) => { + assert_eq!(relay_parent, hash); + tx.send(Ok(API_VERSION_PROSPECTIVE_DISABLED)).unwrap(); + } + ); +} + // As we receive a relevant advertisement act on it and issue a collation request. #[test] fn act_on_advertisement() { let test_state = TestState::default(); test_harness(|test_harness| async move { - let TestHarness { mut virtual_overseer } = test_harness; + let TestHarness { mut virtual_overseer, .. } = test_harness; let pair = CollatorPair::generate().0; gum::trace!("activating"); @@ -392,6 +462,7 @@ fn act_on_advertisement() { ) .await; + assert_runtime_version_request(&mut virtual_overseer, test_state.relay_parent).await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; let peer_b = PeerId::random(); @@ -401,15 +472,75 @@ fn act_on_advertisement() { peer_b.clone(), pair.clone(), test_state.chain_ids[0], + CollationVersion::V1, ) .await; - advertise_collation(&mut virtual_overseer, peer_b.clone(), test_state.relay_parent).await; + advertise_collation(&mut virtual_overseer, peer_b.clone(), test_state.relay_parent, None) + .await; assert_fetch_collation_request( &mut virtual_overseer, test_state.relay_parent, test_state.chain_ids[0], + None, + ) + .await; + + virtual_overseer + }); +} + +/// Tests that validator side works with vstaging network protocol +/// before async backing is enabled. +#[test] +fn act_on_advertisement_vstaging() { + let test_state = TestState::default(); + + test_harness(|test_harness| async move { + let TestHarness { mut virtual_overseer, .. } = test_harness; + + let pair = CollatorPair::generate().0; + gum::trace!("activating"); + + overseer_send( + &mut virtual_overseer, + CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( + our_view![test_state.relay_parent], + )), + ) + .await; + + assert_runtime_version_request(&mut virtual_overseer, test_state.relay_parent).await; + respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; + + let peer_b = PeerId::random(); + + connect_and_declare_collator( + &mut virtual_overseer, + peer_b.clone(), + pair.clone(), + test_state.chain_ids[0], + CollationVersion::VStaging, + ) + .await; + + let candidate_hash = CandidateHash::default(); + let parent_head_data_hash = Hash::zero(); + // vstaging advertisement. + advertise_collation( + &mut virtual_overseer, + peer_b.clone(), + test_state.relay_parent, + Some((candidate_hash, parent_head_data_hash)), + ) + .await; + + assert_fetch_collation_request( + &mut virtual_overseer, + test_state.relay_parent, + test_state.chain_ids[0], + Some(candidate_hash), ) .await; @@ -423,7 +554,7 @@ fn collator_reporting_works() { let test_state = TestState::default(); test_harness(|test_harness| async move { - let TestHarness { mut virtual_overseer } = test_harness; + let TestHarness { mut virtual_overseer, .. } = test_harness; overseer_send( &mut virtual_overseer, @@ -433,6 +564,8 @@ fn collator_reporting_works() { ) .await; + assert_runtime_version_request(&mut virtual_overseer, test_state.relay_parent).await; + respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; let peer_b = PeerId::random(); @@ -443,6 +576,7 @@ fn collator_reporting_works() { peer_b.clone(), test_state.collators[0].clone(), test_state.chain_ids[0].clone(), + CollationVersion::V1, ) .await; @@ -451,6 +585,7 @@ fn collator_reporting_works() { peer_c.clone(), test_state.collators[1].clone(), test_state.chain_ids[0].clone(), + CollationVersion::V1, ) .await; @@ -480,7 +615,7 @@ fn collator_authentication_verification_works() { let test_state = TestState::default(); test_harness(|test_harness| async move { - let TestHarness { mut virtual_overseer } = test_harness; + let TestHarness { mut virtual_overseer, .. } = test_harness; let peer_b = PeerId::random(); @@ -531,20 +666,25 @@ fn fetch_one_collation_at_a_time() { let test_state = TestState::default(); test_harness(|test_harness| async move { - let TestHarness { mut virtual_overseer } = test_harness; + let TestHarness { mut virtual_overseer, .. } = test_harness; let second = Hash::random(); + let our_view = our_view![test_state.relay_parent, second]; + overseer_send( &mut virtual_overseer, CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view![test_state.relay_parent, second], + our_view.clone(), )), ) .await; - respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; - respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; + // Iter over view since the order may change due to sorted invariant. + for hash in our_view.iter() { + assert_runtime_version_request(&mut virtual_overseer, *hash).await; + respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; + } let peer_b = PeerId::random(); let peer_c = PeerId::random(); @@ -554,6 +694,7 @@ fn fetch_one_collation_at_a_time() { peer_b.clone(), test_state.collators[0].clone(), test_state.chain_ids[0].clone(), + CollationVersion::V1, ) .await; @@ -562,16 +703,20 @@ fn fetch_one_collation_at_a_time() { peer_c.clone(), test_state.collators[1].clone(), test_state.chain_ids[0].clone(), + CollationVersion::V1, ) .await; - advertise_collation(&mut virtual_overseer, peer_b.clone(), test_state.relay_parent).await; - advertise_collation(&mut virtual_overseer, peer_c.clone(), test_state.relay_parent).await; + advertise_collation(&mut virtual_overseer, peer_b.clone(), test_state.relay_parent, None) + .await; + advertise_collation(&mut virtual_overseer, peer_c.clone(), test_state.relay_parent, None) + .await; let response_channel = assert_fetch_collation_request( &mut virtual_overseer, test_state.relay_parent, test_state.chain_ids[0], + None, ) .await; @@ -585,10 +730,13 @@ fn fetch_one_collation_at_a_time() { dummy_candidate_receipt_bad_sig(dummy_hash(), Some(Default::default())); candidate_a.descriptor.para_id = test_state.chain_ids[0]; candidate_a.descriptor.relay_parent = test_state.relay_parent; + candidate_a.descriptor.persisted_validation_data_hash = dummy_pvd().hash(); response_channel - .send(Ok( - CollationFetchingResponse::Collation(candidate_a.clone(), pov.clone()).encode() - )) + .send(Ok(request_v1::CollationFetchingResponse::Collation( + candidate_a.clone(), + pov.clone(), + ) + .encode())) .expect("Sending response should succeed"); assert_candidate_backing_second( @@ -596,6 +744,7 @@ fn fetch_one_collation_at_a_time() { test_state.relay_parent, test_state.chain_ids[0], &pov, + ProspectiveParachainsMode::Disabled, ) .await; @@ -616,20 +765,24 @@ fn fetches_next_collation() { let test_state = TestState::default(); test_harness(|test_harness| async move { - let TestHarness { mut virtual_overseer } = test_harness; + let TestHarness { mut virtual_overseer, .. } = test_harness; let second = Hash::random(); + let our_view = our_view![test_state.relay_parent, second]; + overseer_send( &mut virtual_overseer, CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view![test_state.relay_parent, second], + our_view.clone(), )), ) .await; - respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; - respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; + for hash in our_view.iter() { + assert_runtime_version_request(&mut virtual_overseer, *hash).await; + respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; + } let peer_b = PeerId::random(); let peer_c = PeerId::random(); @@ -640,6 +793,7 @@ fn fetches_next_collation() { peer_b.clone(), test_state.collators[2].clone(), test_state.chain_ids[0].clone(), + CollationVersion::V1, ) .await; @@ -648,6 +802,7 @@ fn fetches_next_collation() { peer_c.clone(), test_state.collators[3].clone(), test_state.chain_ids[0].clone(), + CollationVersion::V1, ) .await; @@ -656,45 +811,64 @@ fn fetches_next_collation() { peer_d.clone(), test_state.collators[4].clone(), test_state.chain_ids[0].clone(), + CollationVersion::V1, ) .await; - advertise_collation(&mut virtual_overseer, peer_b.clone(), second).await; - advertise_collation(&mut virtual_overseer, peer_c.clone(), second).await; - advertise_collation(&mut virtual_overseer, peer_d.clone(), second).await; + advertise_collation(&mut virtual_overseer, peer_b.clone(), second, None).await; + advertise_collation(&mut virtual_overseer, peer_c.clone(), second, None).await; + advertise_collation(&mut virtual_overseer, peer_d.clone(), second, None).await; // Dropping the response channel should lead to fetching the second collation. - assert_fetch_collation_request(&mut virtual_overseer, second, test_state.chain_ids[0]) - .await; + assert_fetch_collation_request( + &mut virtual_overseer, + second, + test_state.chain_ids[0], + None, + ) + .await; - let response_channel_non_exclusive = - assert_fetch_collation_request(&mut virtual_overseer, second, test_state.chain_ids[0]) - .await; + let response_channel_non_exclusive = assert_fetch_collation_request( + &mut virtual_overseer, + second, + test_state.chain_ids[0], + None, + ) + .await; // Third collator should receive response after that timeout: Delay::new(MAX_UNSHARED_DOWNLOAD_TIME + Duration::from_millis(50)).await; - let response_channel = - assert_fetch_collation_request(&mut virtual_overseer, second, test_state.chain_ids[0]) - .await; + let response_channel = assert_fetch_collation_request( + &mut virtual_overseer, + second, + test_state.chain_ids[0], + None, + ) + .await; let pov = PoV { block_data: BlockData(vec![1]) }; let mut candidate_a = dummy_candidate_receipt_bad_sig(dummy_hash(), Some(Default::default())); candidate_a.descriptor.para_id = test_state.chain_ids[0]; candidate_a.descriptor.relay_parent = second; + candidate_a.descriptor.persisted_validation_data_hash = dummy_pvd().hash(); // First request finishes now: response_channel_non_exclusive - .send(Ok( - CollationFetchingResponse::Collation(candidate_a.clone(), pov.clone()).encode() - )) + .send(Ok(request_v1::CollationFetchingResponse::Collation( + candidate_a.clone(), + pov.clone(), + ) + .encode())) .expect("Sending response should succeed"); response_channel - .send(Ok( - CollationFetchingResponse::Collation(candidate_a.clone(), pov.clone()).encode() - )) + .send(Ok(request_v1::CollationFetchingResponse::Collation( + candidate_a.clone(), + pov.clone(), + ) + .encode())) .expect("Sending response should succeed"); assert_candidate_backing_second( @@ -702,6 +876,7 @@ fn fetches_next_collation() { second, test_state.chain_ids[0], &pov, + ProspectiveParachainsMode::Disabled, ) .await; @@ -714,7 +889,7 @@ fn reject_connection_to_next_group() { let test_state = TestState::default(); test_harness(|test_harness| async move { - let TestHarness { mut virtual_overseer } = test_harness; + let TestHarness { mut virtual_overseer, .. } = test_harness; overseer_send( &mut virtual_overseer, @@ -724,6 +899,7 @@ fn reject_connection_to_next_group() { ) .await; + assert_runtime_version_request(&mut virtual_overseer, test_state.relay_parent).await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; let peer_b = PeerId::random(); @@ -733,6 +909,7 @@ fn reject_connection_to_next_group() { peer_b.clone(), test_state.collators[0].clone(), test_state.chain_ids[1].clone(), // next, not current `para_id` + CollationVersion::V1, ) .await; @@ -759,20 +936,24 @@ fn fetch_next_collation_on_invalid_collation() { let test_state = TestState::default(); test_harness(|test_harness| async move { - let TestHarness { mut virtual_overseer } = test_harness; + let TestHarness { mut virtual_overseer, .. } = test_harness; let second = Hash::random(); + let our_view = our_view![test_state.relay_parent, second]; + overseer_send( &mut virtual_overseer, CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view![test_state.relay_parent, second], + our_view.clone(), )), ) .await; - respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; - respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; + for hash in our_view.iter() { + assert_runtime_version_request(&mut virtual_overseer, *hash).await; + respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; + } let peer_b = PeerId::random(); let peer_c = PeerId::random(); @@ -782,6 +963,7 @@ fn fetch_next_collation_on_invalid_collation() { peer_b.clone(), test_state.collators[0].clone(), test_state.chain_ids[0].clone(), + CollationVersion::V1, ) .await; @@ -790,16 +972,20 @@ fn fetch_next_collation_on_invalid_collation() { peer_c.clone(), test_state.collators[1].clone(), test_state.chain_ids[0].clone(), + CollationVersion::V1, ) .await; - advertise_collation(&mut virtual_overseer, peer_b.clone(), test_state.relay_parent).await; - advertise_collation(&mut virtual_overseer, peer_c.clone(), test_state.relay_parent).await; + advertise_collation(&mut virtual_overseer, peer_b.clone(), test_state.relay_parent, None) + .await; + advertise_collation(&mut virtual_overseer, peer_c.clone(), test_state.relay_parent, None) + .await; let response_channel = assert_fetch_collation_request( &mut virtual_overseer, test_state.relay_parent, test_state.chain_ids[0], + None, ) .await; @@ -808,10 +994,13 @@ fn fetch_next_collation_on_invalid_collation() { dummy_candidate_receipt_bad_sig(dummy_hash(), Some(Default::default())); candidate_a.descriptor.para_id = test_state.chain_ids[0]; candidate_a.descriptor.relay_parent = test_state.relay_parent; + candidate_a.descriptor.persisted_validation_data_hash = dummy_pvd().hash(); response_channel - .send(Ok( - CollationFetchingResponse::Collation(candidate_a.clone(), pov.clone()).encode() - )) + .send(Ok(request_v1::CollationFetchingResponse::Collation( + candidate_a.clone(), + pov.clone(), + ) + .encode())) .expect("Sending response should succeed"); let receipt = assert_candidate_backing_second( @@ -819,6 +1008,7 @@ fn fetch_next_collation_on_invalid_collation() { test_state.relay_parent, test_state.chain_ids[0], &pov, + ProspectiveParachainsMode::Disabled, ) .await; @@ -845,6 +1035,7 @@ fn fetch_next_collation_on_invalid_collation() { &mut virtual_overseer, test_state.relay_parent, test_state.chain_ids[0], + None, ) .await; @@ -857,7 +1048,7 @@ fn inactive_disconnected() { let test_state = TestState::default(); test_harness(|test_harness| async move { - let TestHarness { mut virtual_overseer } = test_harness; + let TestHarness { mut virtual_overseer, .. } = test_harness; let pair = CollatorPair::generate().0; @@ -871,6 +1062,7 @@ fn inactive_disconnected() { ) .await; + assert_runtime_version_request(&mut virtual_overseer, hash_a).await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; let peer_b = PeerId::random(); @@ -880,14 +1072,17 @@ fn inactive_disconnected() { peer_b.clone(), pair.clone(), test_state.chain_ids[0], + CollationVersion::V1, ) .await; - advertise_collation(&mut virtual_overseer, peer_b.clone(), test_state.relay_parent).await; + advertise_collation(&mut virtual_overseer, peer_b.clone(), test_state.relay_parent, None) + .await; assert_fetch_collation_request( &mut virtual_overseer, test_state.relay_parent, test_state.chain_ids[0], + None, ) .await; @@ -903,7 +1098,7 @@ fn activity_extends_life() { let test_state = TestState::default(); test_harness(|test_harness| async move { - let TestHarness { mut virtual_overseer } = test_harness; + let TestHarness { mut virtual_overseer, .. } = test_harness; let pair = CollatorPair::generate().0; @@ -911,18 +1106,20 @@ fn activity_extends_life() { let hash_b = Hash::repeat_byte(1); let hash_c = Hash::repeat_byte(2); + let our_view = our_view![hash_a, hash_b, hash_c]; + overseer_send( &mut virtual_overseer, CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange( - our_view![hash_a, hash_b, hash_c], + our_view.clone(), )), ) .await; - // 3 heads, 3 times. - respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; - respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; - respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; + for hash in our_view.iter() { + assert_runtime_version_request(&mut virtual_overseer, *hash).await; + respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; + } let peer_b = PeerId::random(); @@ -931,29 +1128,45 @@ fn activity_extends_life() { peer_b.clone(), pair.clone(), test_state.chain_ids[0], + CollationVersion::V1, ) .await; Delay::new(ACTIVITY_TIMEOUT * 2 / 3).await; - advertise_collation(&mut virtual_overseer, peer_b.clone(), hash_a).await; + advertise_collation(&mut virtual_overseer, peer_b.clone(), hash_a, None).await; - assert_fetch_collation_request(&mut virtual_overseer, hash_a, test_state.chain_ids[0]) - .await; + assert_fetch_collation_request( + &mut virtual_overseer, + hash_a, + test_state.chain_ids[0], + None, + ) + .await; Delay::new(ACTIVITY_TIMEOUT * 2 / 3).await; - advertise_collation(&mut virtual_overseer, peer_b.clone(), hash_b).await; + advertise_collation(&mut virtual_overseer, peer_b.clone(), hash_b, None).await; - assert_fetch_collation_request(&mut virtual_overseer, hash_b, test_state.chain_ids[0]) - .await; + assert_fetch_collation_request( + &mut virtual_overseer, + hash_b, + test_state.chain_ids[0], + None, + ) + .await; Delay::new(ACTIVITY_TIMEOUT * 2 / 3).await; - advertise_collation(&mut virtual_overseer, peer_b.clone(), hash_c).await; + advertise_collation(&mut virtual_overseer, peer_b.clone(), hash_c, None).await; - assert_fetch_collation_request(&mut virtual_overseer, hash_c, test_state.chain_ids[0]) - .await; + assert_fetch_collation_request( + &mut virtual_overseer, + hash_c, + test_state.chain_ids[0], + None, + ) + .await; Delay::new(ACTIVITY_TIMEOUT * 3 / 2).await; @@ -968,7 +1181,7 @@ fn disconnect_if_no_declare() { let test_state = TestState::default(); test_harness(|test_harness| async move { - let TestHarness { mut virtual_overseer } = test_harness; + let TestHarness { mut virtual_overseer, .. } = test_harness; overseer_send( &mut virtual_overseer, @@ -978,6 +1191,7 @@ fn disconnect_if_no_declare() { ) .await; + assert_runtime_version_request(&mut virtual_overseer, test_state.relay_parent).await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; let peer_b = PeerId::random(); @@ -1004,7 +1218,7 @@ fn disconnect_if_wrong_declare() { let test_state = TestState::default(); test_harness(|test_harness| async move { - let TestHarness { mut virtual_overseer } = test_harness; + let TestHarness { mut virtual_overseer, .. } = test_harness; let pair = CollatorPair::generate().0; @@ -1016,6 +1230,7 @@ fn disconnect_if_wrong_declare() { ) .await; + assert_runtime_version_request(&mut virtual_overseer, test_state.relay_parent).await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; let peer_b = PeerId::random(); @@ -1066,7 +1281,7 @@ fn view_change_clears_old_collators() { let mut test_state = TestState::default(); test_harness(|test_harness| async move { - let TestHarness { mut virtual_overseer } = test_harness; + let TestHarness { mut virtual_overseer, .. } = test_harness; let pair = CollatorPair::generate().0; @@ -1078,6 +1293,7 @@ fn view_change_clears_old_collators() { ) .await; + assert_runtime_version_request(&mut virtual_overseer, test_state.relay_parent).await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; let peer_b = PeerId::random(); @@ -1087,6 +1303,7 @@ fn view_change_clears_old_collators() { peer_b.clone(), pair.clone(), test_state.chain_ids[0], + CollationVersion::V1, ) .await; @@ -1101,6 +1318,7 @@ fn view_change_clears_old_collators() { .await; test_state.group_rotation_info = test_state.group_rotation_info.bump_rotation(); + assert_runtime_version_request(&mut virtual_overseer, hash_b).await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; assert_collator_disconnect(&mut virtual_overseer, peer_b.clone()).await; diff --git a/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs b/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs new file mode 100644 index 000000000000..1eccb97cbd67 --- /dev/null +++ b/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs @@ -0,0 +1,616 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Tests for the validator side with enabled prospective parachains. + +use super::*; + +use polkadot_node_subsystem::messages::ChainApiMessage; +use polkadot_primitives::v2::{ + BlockNumber, CandidateCommitments, CommittedCandidateReceipt, Header, SigningContext, + ValidatorId, +}; + +const ALLOWED_ANCESTRY: u32 = 3; + +fn get_parent_hash(hash: Hash) -> Hash { + Hash::from_low_u64_be(hash.to_low_u64_be() + 1) +} + +async fn assert_assign_incoming( + virtual_overseer: &mut VirtualOverseer, + test_state: &TestState, + hash: Hash, + number: BlockNumber, + next_msg: &mut Option, +) { + let msg = match next_msg.take() { + Some(msg) => msg, + None => overseer_recv(virtual_overseer).await, + }; + assert_matches!( + msg, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::Validators(tx)) + ) if parent == hash => { + tx.send(Ok(test_state.validator_public.clone())).unwrap(); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ValidatorGroups(tx)) + ) if parent == hash => { + let validator_groups = test_state.validator_groups.clone(); + let mut group_rotation_info = test_state.group_rotation_info.clone(); + group_rotation_info.now = number; + tx.send(Ok((validator_groups, group_rotation_info))).unwrap(); + } + ); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AvailabilityCores(tx)) + ) if parent == hash => { + tx.send(Ok(test_state.cores.clone())).unwrap(); + } + ); +} + +/// Handle a view update. +async fn update_view( + virtual_overseer: &mut VirtualOverseer, + test_state: &TestState, + new_view: Vec<(Hash, u32)>, // Hash and block number. + activated: u8, // How many new heads does this update contain? +) { + let new_view: HashMap = HashMap::from_iter(new_view); + + let our_view = + OurView::new(new_view.keys().map(|hash| (*hash, Arc::new(jaeger::Span::Disabled))), 0); + + overseer_send( + virtual_overseer, + CollatorProtocolMessage::NetworkBridgeUpdate(NetworkBridgeEvent::OurViewChange(our_view)), + ) + .await; + + let mut next_overseer_message = None; + for _ in 0..activated { + let (leaf_hash, leaf_number) = assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + parent, + RuntimeApiRequest::Version(tx), + )) => { + tx.send(Ok(RuntimeApiRequest::VALIDITY_CONSTRAINTS)).unwrap(); + (parent, new_view.get(&parent).copied().expect("Unknown parent requested")) + } + ); + + assert_assign_incoming( + virtual_overseer, + test_state, + leaf_hash, + leaf_number, + &mut next_overseer_message, + ) + .await; + + let min_number = leaf_number.saturating_sub(ALLOWED_ANCESTRY); + + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetMinimumRelayParents(parent, tx), + ) if parent == leaf_hash => { + tx.send(test_state.chain_ids.iter().map(|para_id| (*para_id, min_number)).collect()).unwrap(); + } + ); + + let ancestry_len = leaf_number + 1 - min_number; + let ancestry_hashes = std::iter::successors(Some(leaf_hash), |h| Some(get_parent_hash(*h))) + .take(ancestry_len as usize); + let ancestry_numbers = (min_number..=leaf_number).rev(); + let ancestry_iter = ancestry_hashes.clone().zip(ancestry_numbers).peekable(); + + // How many blocks were actually requested. + let mut requested_len: usize = 0; + { + let mut ancestry_iter = ancestry_iter.clone(); + loop { + let (hash, number) = match ancestry_iter.next() { + Some((hash, number)) => (hash, number), + None => break, + }; + + // May be `None` for the last element. + let parent_hash = + ancestry_iter.peek().map(|(h, _)| *h).unwrap_or_else(|| get_parent_hash(hash)); + + let msg = match next_overseer_message.take() { + Some(msg) => msg, + None => overseer_recv(virtual_overseer).await, + }; + + if !matches!(&msg, AllMessages::ChainApi(ChainApiMessage::BlockHeader(..))) { + // Ancestry has already been cached for this leaf. + next_overseer_message.replace(msg); + break + } + + assert_matches!( + msg, + AllMessages::ChainApi(ChainApiMessage::BlockHeader(.., tx)) => { + let header = Header { + parent_hash, + number, + state_root: Hash::zero(), + extrinsics_root: Hash::zero(), + digest: Default::default(), + }; + + tx.send(Ok(Some(header))).unwrap(); + } + ); + + requested_len += 1; + } + } + + // Skip the leaf. + for (hash, number) in ancestry_iter.skip(1).take(requested_len.saturating_sub(1)) { + assert_assign_incoming( + virtual_overseer, + test_state, + hash, + number, + &mut next_overseer_message, + ) + .await; + } + } +} + +async fn send_seconded_statement( + virtual_overseer: &mut VirtualOverseer, + keystore: SyncCryptoStorePtr, + candidate: &CommittedCandidateReceipt, +) { + let signing_context = SigningContext { session_index: 0, parent_hash: Hash::zero() }; + let stmt = SignedFullStatement::sign( + &keystore, + Statement::Seconded(candidate.clone()), + &signing_context, + ValidatorIndex(0), + &ValidatorId::from(Sr25519Keyring::Alice.public()), + ) + .await + .ok() + .flatten() + .expect("should be signed"); + + overseer_send( + virtual_overseer, + CollatorProtocolMessage::Seconded(candidate.descriptor.relay_parent, stmt), + ) + .await; +} + +async fn assert_collation_seconded( + virtual_overseer: &mut VirtualOverseer, + relay_parent: Hash, + peer_id: PeerId, +) { + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer( + peer, + rep, + )) => { + assert_eq!(peer_id, peer); + assert_eq!(rep, BENEFIT_NOTIFY_GOOD); + } + ); + assert_matches!( + overseer_recv(virtual_overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendCollationMessage( + peers, + Versioned::VStaging(protocol_vstaging::CollationProtocol::CollatorProtocol( + protocol_vstaging::CollatorProtocolMessage::CollationSeconded( + _relay_parent, + .., + ), + )), + )) => { + assert_eq!(peers, vec![peer_id]); + assert_eq!(relay_parent, _relay_parent); + } + ); +} + +#[test] +fn v1_advertisement_rejected() { + let test_state = TestState::default(); + + test_harness(|test_harness| async move { + let TestHarness { mut virtual_overseer, .. } = test_harness; + + let pair_a = CollatorPair::generate().0; + + let head_b = Hash::from_low_u64_be(128); + let head_b_num: u32 = 0; + + update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + + let peer_a = PeerId::random(); + + // Accept both collators from the implicit view. + connect_and_declare_collator( + &mut virtual_overseer, + peer_a, + pair_a.clone(), + test_state.chain_ids[0], + CollationVersion::V1, + ) + .await; + + advertise_collation(&mut virtual_overseer, peer_a, head_b, None).await; + + // Not reported. + assert!(overseer_recv_with_timeout(&mut virtual_overseer, Duration::from_millis(50)) + .await + .is_none()); + + virtual_overseer + }); +} + +#[test] +fn accept_advertisements_from_implicit_view() { + let test_state = TestState::default(); + + test_harness(|test_harness| async move { + let TestHarness { mut virtual_overseer, .. } = test_harness; + + let pair_a = CollatorPair::generate().0; + let pair_b = CollatorPair::generate().0; + + let head_b = Hash::from_low_u64_be(128); + let head_b_num: u32 = 2; + + // Grandparent of head `b`. + // Group rotation frequency is 1 by default, at `c` we're assigned + // to the first para. + let head_c = Hash::from_low_u64_be(130); + + // Activated leaf is `b`, but the collation will be based on `c`. + update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + + // Accept both collators from the implicit view. + connect_and_declare_collator( + &mut virtual_overseer, + peer_a, + pair_a.clone(), + test_state.chain_ids[0], + CollationVersion::VStaging, + ) + .await; + connect_and_declare_collator( + &mut virtual_overseer, + peer_b, + pair_b.clone(), + test_state.chain_ids[1], + CollationVersion::VStaging, + ) + .await; + + let candidate_hash = CandidateHash::default(); + let parent_head_data_hash = Hash::zero(); + advertise_collation( + &mut virtual_overseer, + peer_b, + head_c, + Some((candidate_hash, parent_head_data_hash)), + ) + .await; + // Advertise with different para. + advertise_collation( + &mut virtual_overseer, + peer_a, + head_c, + Some((candidate_hash, parent_head_data_hash)), + ) + .await; + + let response_channel = assert_fetch_collation_request( + &mut virtual_overseer, + head_c, + test_state.chain_ids[1], + Some(candidate_hash), + ) + .await; + + // Respond with an error to abort seconding. + response_channel + .send(Err(sc_network::RequestFailure::NotConnected)) + .expect("Sending response should succeed"); + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(..),) + ); + + assert_fetch_collation_request( + &mut virtual_overseer, + head_c, + test_state.chain_ids[0], + Some(candidate_hash), + ) + .await; + + virtual_overseer + }); +} + +#[test] +fn second_multiple_candidates_per_relay_parent() { + let test_state = TestState::default(); + + test_harness(|test_harness| async move { + let TestHarness { mut virtual_overseer, keystore } = test_harness; + + let pair = CollatorPair::generate().0; + + // Grandparent of head `a`. + let head_b = Hash::from_low_u64_be(128); + let head_b_num: u32 = 2; + + // Grandparent of head `b`. + // Group rotation frequency is 1 by default, at `c` we're assigned + // to the first para. + let head_c = Hash::from_low_u64_be(130); + + // Activated leaf is `b`, but the collation will be based on `c`. + update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + + let peer_a = PeerId::random(); + + connect_and_declare_collator( + &mut virtual_overseer, + peer_a, + pair.clone(), + test_state.chain_ids[0], + CollationVersion::VStaging, + ) + .await; + + for i in 0..(MAX_CANDIDATE_DEPTH + 1) { + let mut candidate = dummy_candidate_receipt_bad_sig(head_c, Some(Default::default())); + candidate.descriptor.para_id = test_state.chain_ids[0]; + candidate.descriptor.relay_parent = head_c; + candidate.descriptor.persisted_validation_data_hash = dummy_pvd().hash(); + let commitments = CandidateCommitments { + head_data: HeadData(vec![i as u8]), + horizontal_messages: Vec::new(), + upward_messages: Vec::new(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }; + candidate.commitments_hash = commitments.hash(); + + let candidate_hash = candidate.hash(); + let parent_head_data_hash = Hash::zero(); + + advertise_collation( + &mut virtual_overseer, + peer_a, + head_c, + Some((candidate_hash, parent_head_data_hash)), + ) + .await; + + let response_channel = assert_fetch_collation_request( + &mut virtual_overseer, + head_c, + test_state.chain_ids[0], + Some(candidate_hash), + ) + .await; + + let pov = PoV { block_data: BlockData(vec![1]) }; + + response_channel + .send(Ok(request_vstaging::CollationFetchingResponse::Collation( + candidate.clone(), + pov.clone(), + ) + .encode())) + .expect("Sending response should succeed"); + + assert_candidate_backing_second( + &mut virtual_overseer, + head_c, + test_state.chain_ids[0], + &pov, + ProspectiveParachainsMode::Enabled, + ) + .await; + + let candidate = + CommittedCandidateReceipt { descriptor: candidate.descriptor, commitments }; + + send_seconded_statement(&mut virtual_overseer, keystore.clone(), &candidate).await; + + assert_collation_seconded(&mut virtual_overseer, head_c, peer_a).await; + } + + // No more advertisements can be made for this relay parent. + let candidate_hash = CandidateHash(Hash::repeat_byte(0xAA)); + advertise_collation( + &mut virtual_overseer, + peer_a, + head_c, + Some((candidate_hash, Hash::zero())), + ) + .await; + + // Reported because reached the limit of advertisements per relay parent. + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::NetworkBridgeTx( + NetworkBridgeTxMessage::ReportPeer(peer_id, rep), + ) => { + assert_eq!(peer_a, peer_id); + assert_eq!(rep, COST_UNEXPECTED_MESSAGE); + } + ); + + // By different peer too (not reported). + let pair_b = CollatorPair::generate().0; + let peer_b = PeerId::random(); + + connect_and_declare_collator( + &mut virtual_overseer, + peer_b, + pair_b.clone(), + test_state.chain_ids[0], + CollationVersion::VStaging, + ) + .await; + + let candidate_hash = CandidateHash(Hash::repeat_byte(0xFF)); + advertise_collation( + &mut virtual_overseer, + peer_b, + head_c, + Some((candidate_hash, Hash::zero())), + ) + .await; + + assert!(overseer_recv_with_timeout(&mut virtual_overseer, Duration::from_millis(50)) + .await + .is_none()); + + virtual_overseer + }); +} + +#[test] +fn fetched_collation_sanity_check() { + let test_state = TestState::default(); + + test_harness(|test_harness| async move { + let TestHarness { mut virtual_overseer, .. } = test_harness; + + let pair = CollatorPair::generate().0; + + // Grandparent of head `a`. + let head_b = Hash::from_low_u64_be(128); + let head_b_num: u32 = 2; + + // Grandparent of head `b`. + // Group rotation frequency is 1 by default, at `c` we're assigned + // to the first para. + let head_c = Hash::from_low_u64_be(130); + + // Activated leaf is `b`, but the collation will be based on `c`. + update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + + let peer_a = PeerId::random(); + + connect_and_declare_collator( + &mut virtual_overseer, + peer_a, + pair.clone(), + test_state.chain_ids[0], + CollationVersion::VStaging, + ) + .await; + + let mut candidate = dummy_candidate_receipt_bad_sig(head_c, Some(Default::default())); + candidate.descriptor.para_id = test_state.chain_ids[0]; + candidate.descriptor.relay_parent = head_c; + let commitments = CandidateCommitments { + head_data: HeadData(vec![1, 2, 3]), + horizontal_messages: Vec::new(), + upward_messages: Vec::new(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: 0, + }; + candidate.commitments_hash = commitments.hash(); + + let candidate_hash = CandidateHash(Hash::zero()); + let parent_head_data_hash = Hash::zero(); + + advertise_collation( + &mut virtual_overseer, + peer_a, + head_c, + Some((candidate_hash, parent_head_data_hash)), + ) + .await; + + let response_channel = assert_fetch_collation_request( + &mut virtual_overseer, + head_c, + test_state.chain_ids[0], + Some(candidate_hash), + ) + .await; + + let pov = PoV { block_data: BlockData(vec![1]) }; + + response_channel + .send(Ok(request_vstaging::CollationFetchingResponse::Collation( + candidate.clone(), + pov.clone(), + ) + .encode())) + .expect("Sending response should succeed"); + + // PVD request. + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetProspectiveValidationData(request, tx), + ) => { + assert_eq!(head_c, request.candidate_relay_parent); + assert_eq!(test_state.chain_ids[0], request.para_id); + tx.send(Some(dummy_pvd())).unwrap(); + } + ); + + // Reported malicious. + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::NetworkBridgeTx( + NetworkBridgeTxMessage::ReportPeer(peer_id, rep), + ) => { + assert_eq!(peer_a, peer_id); + assert_eq!(rep, COST_REPORT_BAD); + } + ); + + virtual_overseer + }); +} diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index a45bca82df49..7e08d22697f5 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -592,7 +592,7 @@ pub mod vstaging { use parity_scale_codec::{Decode, Encode}; use polkadot_primitives::vstaging::{ - CandidateIndex, CollatorId, CollatorSignature, Hash, Id as ParaId, + CandidateHash, CandidateIndex, CollatorId, CollatorSignature, Hash, Id as ParaId, UncheckedSignedAvailabilityBitfield, }; @@ -651,7 +651,14 @@ pub mod vstaging { /// Advertise a collation to a validator. Can only be sent once the peer has /// declared that they are a collator with given ID. #[codec(index = 1)] - AdvertiseCollation(Hash), + AdvertiseCollation { + /// Hash of the relay parent advertised collation is based on. + relay_parent: Hash, + /// Candidate hash. + candidate_hash: CandidateHash, + /// Parachain head data hash before candidate execution. + parent_head_data_hash: Hash, + }, /// A collation sent to a validator was seconded. #[codec(index = 4)] CollationSeconded(Hash, UncheckedSignedFullStatement), diff --git a/node/network/protocol/src/request_response/mod.rs b/node/network/protocol/src/request_response/mod.rs index d24537e219c7..13541f810f91 100644 --- a/node/network/protocol/src/request_response/mod.rs +++ b/node/network/protocol/src/request_response/mod.rs @@ -55,6 +55,9 @@ pub use outgoing::{OutgoingRequest, OutgoingResult, Recipient, Requests, Respons /// Actual versioned requests and responses, that are sent over the wire. pub mod v1; +/// Staging requests to be sent over the wire. +pub mod vstaging; + /// A protocol per subsystem seems to make the most sense, this way we don't need any dispatching /// within protocols. #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, EnumIter)] @@ -63,6 +66,8 @@ pub enum Protocol { ChunkFetchingV1, /// Protocol for fetching collations from collators. CollationFetchingV1, + /// Protocol for fetching collations from collators when async backing is enabled. + CollationFetchingVStaging, /// Protocol for fetching seconded PoVs from validators of the same group. PoVFetchingV1, /// Protocol for fetching available data. @@ -147,15 +152,16 @@ impl Protocol { request_timeout: CHUNK_REQUEST_TIMEOUT, inbound_queue: Some(tx), }, - Protocol::CollationFetchingV1 => RequestResponseConfig { - name, - fallback_names, - max_request_size: 1_000, - max_response_size: POV_RESPONSE_SIZE, - // Taken from initial implementation in collator protocol: - request_timeout: POV_REQUEST_TIMEOUT_CONNECTED, - inbound_queue: Some(tx), - }, + Protocol::CollationFetchingV1 | Protocol::CollationFetchingVStaging => + RequestResponseConfig { + name, + fallback_names, + max_request_size: 1_000, + max_response_size: POV_RESPONSE_SIZE, + // Taken from initial implementation in collator protocol: + request_timeout: POV_REQUEST_TIMEOUT_CONNECTED, + inbound_queue: Some(tx), + }, Protocol::PoVFetchingV1 => RequestResponseConfig { name, fallback_names, @@ -215,7 +221,7 @@ impl Protocol { // as well. Protocol::ChunkFetchingV1 => 100, // 10 seems reasonable, considering group sizes of max 10 validators. - Protocol::CollationFetchingV1 => 10, + Protocol::CollationFetchingV1 | Protocol::CollationFetchingVStaging => 10, // 10 seems reasonable, considering group sizes of max 10 validators. Protocol::PoVFetchingV1 => 10, // Validators are constantly self-selecting to request available data which may lead @@ -259,6 +265,7 @@ impl Protocol { match self { Protocol::ChunkFetchingV1 => "/polkadot/req_chunk/1", Protocol::CollationFetchingV1 => "/polkadot/req_collation/1", + Protocol::CollationFetchingVStaging => "/polkadot/req_collation/2", Protocol::PoVFetchingV1 => "/polkadot/req_pov/1", Protocol::AvailableDataFetchingV1 => "/polkadot/req_available_data/1", Protocol::StatementFetchingV1 => "/polkadot/req_statement/1", @@ -314,6 +321,7 @@ impl ReqProtocolNames { let short_name = match protocol { Protocol::ChunkFetchingV1 => "/req_chunk/1", Protocol::CollationFetchingV1 => "/req_collation/1", + Protocol::CollationFetchingVStaging => "/req_collation/2", Protocol::PoVFetchingV1 => "/req_pov/1", Protocol::AvailableDataFetchingV1 => "/req_available_data/1", Protocol::StatementFetchingV1 => "/req_statement/1", diff --git a/node/network/protocol/src/request_response/outgoing.rs b/node/network/protocol/src/request_response/outgoing.rs index b93c4e93cd31..e01111d1cf12 100644 --- a/node/network/protocol/src/request_response/outgoing.rs +++ b/node/network/protocol/src/request_response/outgoing.rs @@ -23,7 +23,7 @@ use sc_network::PeerId; use polkadot_primitives::v2::AuthorityDiscoveryId; -use super::{v1, IsRequest, Protocol}; +use super::{v1, vstaging, IsRequest, Protocol}; /// All requests that can be sent to the network bridge via `NetworkBridgeTxMessage::SendRequest`. #[derive(Debug)] @@ -40,6 +40,10 @@ pub enum Requests { StatementFetchingV1(OutgoingRequest), /// Requests for notifying about an ongoing dispute. DisputeSendingV1(OutgoingRequest), + + /// Fetch a collation from a collator which previously announced it. + /// Compared to V1 it requires specifying which candidate is requested by its hash. + CollationFetchingVStaging(OutgoingRequest), } impl Requests { @@ -48,6 +52,7 @@ impl Requests { match self { Self::ChunkFetchingV1(_) => Protocol::ChunkFetchingV1, Self::CollationFetchingV1(_) => Protocol::CollationFetchingV1, + Self::CollationFetchingVStaging(_) => Protocol::CollationFetchingVStaging, Self::PoVFetchingV1(_) => Protocol::PoVFetchingV1, Self::AvailableDataFetchingV1(_) => Protocol::AvailableDataFetchingV1, Self::StatementFetchingV1(_) => Protocol::StatementFetchingV1, @@ -66,6 +71,7 @@ impl Requests { match self { Self::ChunkFetchingV1(r) => r.encode_request(), Self::CollationFetchingV1(r) => r.encode_request(), + Self::CollationFetchingVStaging(r) => r.encode_request(), Self::PoVFetchingV1(r) => r.encode_request(), Self::AvailableDataFetchingV1(r) => r.encode_request(), Self::StatementFetchingV1(r) => r.encode_request(), diff --git a/node/network/protocol/src/request_response/vstaging.rs b/node/network/protocol/src/request_response/vstaging.rs new file mode 100644 index 000000000000..0b8d223e3aee --- /dev/null +++ b/node/network/protocol/src/request_response/vstaging.rs @@ -0,0 +1,40 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use parity_scale_codec::{Decode, Encode}; +use polkadot_primitives::v2::{CandidateHash, Hash, Id as ParaId}; + +use super::{IsRequest, Protocol}; + +/// Responses as sent by collators. +pub type CollationFetchingResponse = super::v1::CollationFetchingResponse; + +/// Request the advertised collation at that relay-parent. +#[derive(Debug, Clone, Encode, Decode)] +pub struct CollationFetchingRequest { + /// Relay parent collation is built on top of. + pub relay_parent: Hash, + /// The `ParaId` of the collation. + pub para_id: ParaId, + /// Candidate hash. + pub candidate_hash: CandidateHash, +} + +impl IsRequest for CollationFetchingRequest { + // The response is the same as for V1. + type Response = CollationFetchingResponse; + const PROTOCOL: Protocol = Protocol::CollationFetchingVStaging; +} diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index 464e47d60111..3546161c0706 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -558,6 +558,8 @@ pub struct Overseer { NetworkBridgeTxMessage, RuntimeApiMessage, CandidateBackingMessage, + ChainApiMessage, + ProspectiveParachainsMessage, ])] collator_protocol: CollatorProtocol, diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index 3619d05c7592..2552f9c44330 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -887,7 +887,11 @@ where config.network.request_response_protocols.push(cfg); let (chunk_req_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); config.network.request_response_protocols.push(cfg); - let (collation_req_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); + let (collation_req_v1_receiver, cfg) = + IncomingRequest::get_config_receiver(&req_protocol_names); + config.network.request_response_protocols.push(cfg); + let (collation_req_vstaging_receiver, cfg) = + IncomingRequest::get_config_receiver(&req_protocol_names); config.network.request_response_protocols.push(cfg); let (available_data_req_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); @@ -1067,7 +1071,8 @@ where authority_discovery_service, pov_req_receiver, chunk_req_receiver, - collation_req_receiver, + collation_req_v1_receiver, + collation_req_vstaging_receiver, available_data_req_receiver, statement_req_receiver, dispute_req_receiver, diff --git a/node/service/src/overseer.rs b/node/service/src/overseer.rs index af8ca6aea54b..ef7c8de74e9c 100644 --- a/node/service/src/overseer.rs +++ b/node/service/src/overseer.rs @@ -26,7 +26,9 @@ use polkadot_node_core_chain_selection::Config as ChainSelectionConfig; use polkadot_node_core_dispute_coordinator::Config as DisputeCoordinatorConfig; use polkadot_node_network_protocol::{ peer_set::PeerSetProtocolNames, - request_response::{v1 as request_v1, IncomingRequestReceiver, ReqProtocolNames}, + request_response::{ + v1 as request_v1, vstaging as request_vstaging, IncomingRequestReceiver, ReqProtocolNames, + }, }; #[cfg(any(feature = "malus", test))] pub use polkadot_overseer::{ @@ -93,13 +95,21 @@ where pub network_service: Arc>, /// Underlying authority discovery service. pub authority_discovery_service: AuthorityDiscoveryService, - /// POV request receiver + /// POV request receiver. pub pov_req_receiver: IncomingRequestReceiver, + /// Erasure chunks request receiver. pub chunk_req_receiver: IncomingRequestReceiver, - pub collation_req_receiver: IncomingRequestReceiver, + /// Collations request receiver for network protocol v1. + pub collation_req_v1_receiver: IncomingRequestReceiver, + /// Collations request receiver for network protocol vstaging. + pub collation_req_vstaging_receiver: + IncomingRequestReceiver, + /// Receiver for available data requests. pub available_data_req_receiver: IncomingRequestReceiver, + /// Receiver for incoming large statement requests. pub statement_req_receiver: IncomingRequestReceiver, + /// Receiver for incoming disputes. pub dispute_req_receiver: IncomingRequestReceiver, /// Prometheus registry, commonly used for production systems, less so for test. pub registry: Option<&'a Registry>, @@ -139,7 +149,8 @@ pub fn prepared_overseer_builder<'a, Spawner, RuntimeClient>( authority_discovery_service, pov_req_receiver, chunk_req_receiver, - collation_req_receiver, + collation_req_v1_receiver, + collation_req_vstaging_receiver, available_data_req_receiver, statement_req_receiver, dispute_req_receiver, @@ -257,12 +268,13 @@ where .collation_generation(CollationGenerationSubsystem::new(Metrics::register(registry)?)) .collator_protocol({ let side = match is_collator { - IsCollator::Yes(collator_pair) => ProtocolSide::Collator( - network_service.local_peer_id().clone(), + IsCollator::Yes(collator_pair) => ProtocolSide::Collator { + peer_id: network_service.local_peer_id().clone(), collator_pair, - collation_req_receiver, - Metrics::register(registry)?, - ), + request_receiver_v1: collation_req_v1_receiver, + request_receiver_vstaging: collation_req_vstaging_receiver, + metrics: Metrics::register(registry)?, + }, IsCollator::No => ProtocolSide::Validator { keystore: keystore.clone(), eviction_policy: Default::default(), diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 84a38d4a7be6..e3c03ee11c05 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -195,10 +195,16 @@ pub enum CollatorProtocolMessage { /// This should be sent before any `DistributeCollation` message. CollateOn(ParaId), /// Provide a collation to distribute to validators with an optional result sender. + /// The second argument is the parent head-data hash. /// /// The result sender should be informed when at least one parachain validator seconded the collation. It is also /// completely okay to just drop the sender. - DistributeCollation(CandidateReceipt, PoV, Option>), + DistributeCollation( + CandidateReceipt, + Hash, + PoV, + Option>, + ), /// Report a collator as having provided an invalid collation. This should lead to disconnect /// and blacklist of the collator. ReportCollator(CollatorId), @@ -719,7 +725,7 @@ impl RuntimeApiRequest { /// `Disputes` pub const DISPUTES_RUNTIME_REQUIREMENT: u32 = 3; - /// Minimum version for valididty constraints, required for async backing. + /// Minimum version for validity constraints, required for async backing. /// /// 99 for now, should be adjusted to VSTAGING/actual runtime version once released. pub const VALIDITY_CONSTRAINTS: u32 = 99; diff --git a/node/subsystem-util/src/backing_implicit_view.rs b/node/subsystem-util/src/backing_implicit_view.rs index dc10efe519fe..d3734e73c14f 100644 --- a/node/subsystem-util/src/backing_implicit_view.rs +++ b/node/subsystem-util/src/backing_implicit_view.rs @@ -100,6 +100,11 @@ struct BlockInfo { } impl View { + /// Get an iterator over active leaves in the view. + pub fn leaves<'a>(&'a self) -> impl Iterator + 'a { + self.leaves.keys() + } + /// Activate a leaf in the view. /// This will request the minimum relay parents from the /// Prospective Parachains subsystem for each leaf and will load headers in the ancestry of each @@ -152,9 +157,13 @@ impl View { } /// Deactivate a leaf in the view. This prunes any outdated implicit ancestors as well. - pub fn deactivate_leaf(&mut self, leaf_hash: Hash) { + /// + /// Returns hashes of blocks pruned from storage. + pub fn deactivate_leaf(&mut self, leaf_hash: Hash) -> Vec { + let mut removed = Vec::new(); + if self.leaves.remove(&leaf_hash).is_none() { - return + return removed } // Prune everything before the minimum out of all leaves, @@ -165,8 +174,15 @@ impl View { { let minimum = self.leaves.values().map(|l| l.retain_minimum).min(); - self.block_info_storage - .retain(|_, i| minimum.map_or(false, |m| i.block_number >= m)); + self.block_info_storage.retain(|hash, i| { + let keep = minimum.map_or(false, |m| i.block_number >= m); + if !keep { + removed.push(*hash); + } + keep + }); + + removed } } @@ -212,17 +228,26 @@ impl View { } /// Errors when fetching a leaf and associated ancestry. -#[derive(Debug)] +#[fatality::fatality] pub enum FetchError { - /// Leaf was already known. + /// Activated leaf is already present in view. + #[error("Leaf was already known")] AlreadyKnown, - /// The prospective parachains subsystem was unavailable. + + /// Request to the prospective parachains subsystem failed. + #[error("The prospective parachains subsystem was unavailable")] ProspectiveParachainsUnavailable, - /// A block header was unavailable. + + /// Failed to fetch the block header. + #[error("A block header was unavailable")] BlockHeaderUnavailable(Hash, BlockHeaderUnavailableReason), + /// A block header was unavailable due to a chain API error. + #[error("A block header was unavailable due to a chain API error")] ChainApiError(Hash, ChainApiError), - /// The chain API subsystem was unavailable. + + /// Request to the Chain API subsystem failed. + #[error("The chain API subsystem was unavailable")] ChainApiUnavailable, } diff --git a/scripts/ci/gitlab/lingua.dic b/scripts/ci/gitlab/lingua.dic index 3add6a276cf0..b68a8c20600a 100644 --- a/scripts/ci/gitlab/lingua.dic +++ b/scripts/ci/gitlab/lingua.dic @@ -298,6 +298,7 @@ unreserve unreserving unroutable unservable/B +unshare/D untrusted untyped unvested @@ -314,10 +315,11 @@ verify/R versa Versi version/DMSG -versioned VMP/SM VPS VRF/SM +vstaging +VStaging w3f/MS wakeup wakeups From 604cfc42b8640a483fc4c63b75b29bc7f46f161d Mon Sep 17 00:00:00 2001 From: Chris Sosnin <48099298+slumber@users.noreply.github.com> Date: Wed, 19 Oct 2022 14:18:23 +0400 Subject: [PATCH 16/76] validator assignment fixes for backing and collator protocol (#6158) * Rename depth->ancestry len in tests * Refactor group assignments * Remove implicit assignments * backing: consider occupied core assignments * Track a single para on validator side --- node/core/backing/src/lib.rs | 40 +++-- .../src/tests/prospective_parachains.rs | 166 ++++++++++++++++-- .../src/validator_side/collation.rs | 30 ++-- .../src/validator_side/mod.rs | 117 ++++-------- .../tests/prospective_parachains.rs | 34 ++-- .../src/legacy_v1/mod.rs | 4 +- 6 files changed, 226 insertions(+), 165 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index a743c529165a..b319a9cbcfac 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -651,7 +651,7 @@ async fn validate_and_make_available( let pov = match pov { PoVData::Ready(pov) => pov, - PoVData::FetchFromValidator { from_validator, candidate_hash, pov_hash } => { + PoVData::FetchFromValidator { from_validator, candidate_hash, pov_hash } => match request_pov( &mut sender, relay_parent, @@ -674,8 +674,7 @@ async fn validate_and_make_available( }, Err(err) => return Err(err), Ok(pov) => pov, - } - } + }, }; let v = { @@ -1077,16 +1076,26 @@ async fn construct_per_relay_parent_state( let mut assignment = None; for (idx, core) in cores.into_iter().enumerate() { - // Ignore prospective assignments on occupied cores for the time being. - if let CoreState::Scheduled(scheduled) = core { - let core_index = CoreIndex(idx as _); - let group_index = group_rotation_info.group_for_core(core_index, n_cores); - if let Some(g) = validator_groups.get(group_index.0 as usize) { - if validator.as_ref().map_or(false, |v| g.contains(&v.index())) { - assignment = Some((scheduled.para_id, scheduled.collator)); - } - groups.insert(scheduled.para_id, g.clone()); + let core_para_id = match core { + CoreState::Scheduled(scheduled) => scheduled.para_id, + CoreState::Occupied(occupied) => + if mode.is_enabled() { + // Async backing makes it legal to build on top of + // occupied core. + occupied.candidate_descriptor.para_id + } else { + continue + }, + CoreState::Free => continue, + }; + + let core_index = CoreIndex(idx as _); + let group_index = group_rotation_info.group_for_core(core_index, n_cores); + if let Some(g) = validator_groups.get(group_index.0 as usize) { + if validator.as_ref().map_or(false, |v| g.contains(&v.index())) { + assignment = Some(core_para_id); } + groups.insert(core_para_id, g.clone()); } } @@ -1098,13 +1107,6 @@ async fn construct_per_relay_parent_state( }, }; - // TODO [now]: I've removed the `required_collator` more broadly, - // because it's not used in practice and was intended for parathreads. - // - // We should attempt parathreads another way, I think, so it makes sense - // to remove. - let assignment = assignment.map(|(a, _required_collator)| a); - Ok(Some(PerRelayParentState { prospective_parachains_mode: mode, parent, diff --git a/node/core/backing/src/tests/prospective_parachains.rs b/node/core/backing/src/tests/prospective_parachains.rs index 59db7f62b722..59ed1027e624 100644 --- a/node/core/backing/src/tests/prospective_parachains.rs +++ b/node/core/backing/src/tests/prospective_parachains.rs @@ -17,7 +17,7 @@ //! Tests for the backing subsystem with enabled prospective parachains. use polkadot_node_subsystem::{messages::ChainApiMessage, TimeoutExt}; -use polkadot_primitives::v2::{BlockNumber, Header}; +use polkadot_primitives::v2::{BlockNumber, Header, OccupiedCore}; use super::*; @@ -299,7 +299,7 @@ fn seconding_sanity_check_allowed() { test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { // Candidate is seconded in a parent of the activated `leaf_a`. const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; - const LEAF_A_DEPTH: BlockNumber = 3; + const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; let para_id = test_state.chain_ids[0]; let leaf_b_hash = Hash::from_low_u64_be(128); @@ -312,11 +312,11 @@ fn seconding_sanity_check_allowed() { status: LeafStatus::Fresh, span: Arc::new(jaeger::Span::Disabled), }; - let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_DEPTH)]; + let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; let test_leaf_a = TestLeaf { activated, min_relay_parents }; const LEAF_B_BLOCK_NUMBER: BlockNumber = LEAF_A_BLOCK_NUMBER + 2; - const LEAF_B_DEPTH: BlockNumber = 4; + const LEAF_B_ANCESTRY_LEN: BlockNumber = 4; let activated = ActivatedLeaf { hash: leaf_b_hash, @@ -324,7 +324,7 @@ fn seconding_sanity_check_allowed() { status: LeafStatus::Fresh, span: Arc::new(jaeger::Span::Disabled), }; - let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_DEPTH)]; + let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_ANCESTRY_LEN)]; let test_leaf_b = TestLeaf { activated, min_relay_parents }; activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; @@ -436,7 +436,7 @@ fn seconding_sanity_check_disallowed() { test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { // Candidate is seconded in a parent of the activated `leaf_a`. const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; - const LEAF_A_DEPTH: BlockNumber = 3; + const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; let para_id = test_state.chain_ids[0]; let leaf_b_hash = Hash::from_low_u64_be(128); @@ -449,11 +449,11 @@ fn seconding_sanity_check_disallowed() { status: LeafStatus::Fresh, span: Arc::new(jaeger::Span::Disabled), }; - let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_DEPTH)]; + let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; let test_leaf_a = TestLeaf { activated, min_relay_parents }; const LEAF_B_BLOCK_NUMBER: BlockNumber = LEAF_A_BLOCK_NUMBER + 2; - const LEAF_B_DEPTH: BlockNumber = 4; + const LEAF_B_ANCESTRY_LEN: BlockNumber = 4; let activated = ActivatedLeaf { hash: leaf_b_hash, @@ -461,7 +461,7 @@ fn seconding_sanity_check_disallowed() { status: LeafStatus::Fresh, span: Arc::new(jaeger::Span::Disabled), }; - let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_DEPTH)]; + let min_relay_parents = vec![(para_id, LEAF_B_BLOCK_NUMBER - LEAF_B_ANCESTRY_LEN)]; let test_leaf_b = TestLeaf { activated, min_relay_parents }; activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; @@ -633,7 +633,7 @@ fn prospective_parachains_reject_candidate() { test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { // Candidate is seconded in a parent of the activated `leaf_a`. const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; - const LEAF_A_DEPTH: BlockNumber = 3; + const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; let para_id = test_state.chain_ids[0]; let leaf_a_hash = Hash::from_low_u64_be(130); @@ -644,7 +644,7 @@ fn prospective_parachains_reject_candidate() { status: LeafStatus::Fresh, span: Arc::new(jaeger::Span::Disabled), }; - let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_DEPTH)]; + let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; let test_leaf_a = TestLeaf { activated, min_relay_parents }; activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; @@ -798,7 +798,7 @@ fn second_multiple_candidates_per_relay_parent() { test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { // Candidate `a` is seconded in a parent of the activated `leaf`. const LEAF_BLOCK_NUMBER: BlockNumber = 100; - const LEAF_DEPTH: BlockNumber = 3; + const LEAF_ANCESTRY_LEN: BlockNumber = 3; let para_id = test_state.chain_ids[0]; let leaf_hash = Hash::from_low_u64_be(130); @@ -810,7 +810,7 @@ fn second_multiple_candidates_per_relay_parent() { status: LeafStatus::Fresh, span: Arc::new(jaeger::Span::Disabled), }; - let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_DEPTH)]; + let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN)]; let test_leaf_a = TestLeaf { activated, min_relay_parents }; activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; @@ -922,7 +922,7 @@ fn backing_works() { test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { // Candidate `a` is seconded in a parent of the activated `leaf`. const LEAF_BLOCK_NUMBER: BlockNumber = 100; - const LEAF_DEPTH: BlockNumber = 3; + const LEAF_ANCESTRY_LEN: BlockNumber = 3; let para_id = test_state.chain_ids[0]; let leaf_hash = Hash::from_low_u64_be(130); @@ -933,7 +933,7 @@ fn backing_works() { status: LeafStatus::Fresh, span: Arc::new(jaeger::Span::Disabled), }; - let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_DEPTH)]; + let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN)]; let test_leaf_a = TestLeaf { activated, min_relay_parents }; activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; @@ -1081,7 +1081,7 @@ fn concurrent_dependent_candidates() { // Candidate `a` is seconded in a grandparent of the activated `leaf`, // candidate `b` -- in parent. const LEAF_BLOCK_NUMBER: BlockNumber = 100; - const LEAF_DEPTH: BlockNumber = 3; + const LEAF_ANCESTRY_LEN: BlockNumber = 3; let para_id = test_state.chain_ids[0]; let leaf_hash = Hash::from_low_u64_be(130); @@ -1093,7 +1093,7 @@ fn concurrent_dependent_candidates() { status: LeafStatus::Fresh, span: Arc::new(jaeger::Span::Disabled), }; - let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_DEPTH)]; + let min_relay_parents = vec![(para_id, LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN)]; let test_leaf_a = TestLeaf { activated, min_relay_parents }; activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; @@ -1308,7 +1308,7 @@ fn seconding_sanity_check_occupy_same_depth() { test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { // Candidate `a` is seconded in a parent of the activated `leaf`. const LEAF_BLOCK_NUMBER: BlockNumber = 100; - const LEAF_DEPTH: BlockNumber = 3; + const LEAF_ANCESTRY_LEN: BlockNumber = 3; let para_id_a = test_state.chain_ids[0]; let para_id_b = test_state.chain_ids[1]; @@ -1323,7 +1323,7 @@ fn seconding_sanity_check_occupy_same_depth() { span: Arc::new(jaeger::Span::Disabled), }; - let min_block_number = LEAF_BLOCK_NUMBER - LEAF_DEPTH; + let min_block_number = LEAF_BLOCK_NUMBER - LEAF_ANCESTRY_LEN; let min_relay_parents = vec![(para_id_a, min_block_number), (para_id_b, min_block_number)]; let test_leaf_a = TestLeaf { activated, min_relay_parents }; @@ -1433,3 +1433,131 @@ fn seconding_sanity_check_occupy_same_depth() { virtual_overseer }); } + +// Test that the subsystem doesn't skip occupied cores assignments. +#[test] +fn occupied_core_assignment() { + let mut test_state = TestState::default(); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + // Candidate is seconded in a parent of the activated `leaf_a`. + const LEAF_A_BLOCK_NUMBER: BlockNumber = 100; + const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; + let para_id = test_state.chain_ids[0]; + + // Set the core state to occupied. + let mut candidate_descriptor = ::test_helpers::dummy_candidate_descriptor(Hash::zero()); + candidate_descriptor.para_id = para_id; + test_state.availability_cores[0] = CoreState::Occupied(OccupiedCore { + group_responsible: Default::default(), + next_up_on_available: None, + occupied_since: 100_u32, + time_out_at: 200_u32, + next_up_on_time_out: None, + availability: Default::default(), + candidate_descriptor, + candidate_hash: Default::default(), + }); + + let leaf_a_hash = Hash::from_low_u64_be(130); + let leaf_a_parent = get_parent_hash(leaf_a_hash); + let activated = ActivatedLeaf { + hash: leaf_a_hash, + number: LEAF_A_BLOCK_NUMBER, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + }; + let min_relay_parents = vec![(para_id, LEAF_A_BLOCK_NUMBER - LEAF_A_ANCESTRY_LEN)]; + let test_leaf_a = TestLeaf { activated, min_relay_parents }; + + activate_leaf(&mut virtual_overseer, test_leaf_a, &test_state, 0).await; + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(¶_id).unwrap(); + + let pov_hash = pov.hash(); + let candidate = TestCandidateBuilder { + para_id, + relay_parent: leaf_a_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + ..Default::default() + } + .build(); + + let second = CandidateBackingMessage::Second( + leaf_a_hash, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + assert_validate_seconded_candidate( + &mut virtual_overseer, + leaf_a_parent, + &candidate, + &pov, + &pvd, + &validation_code, + expected_head_data, + false, + ) + .await; + + // `seconding_sanity_check` + let expected_request = HypotheticalDepthRequest { + candidate_hash: candidate.hash(), + candidate_para: para_id, + parent_head_data_hash: pvd.parent_head.hash(), + candidate_relay_parent: leaf_a_parent, + fragment_tree_relay_parent: leaf_a_hash, + }; + assert_hypothetical_depth_requests( + &mut virtual_overseer, + vec![(expected_request, vec![0, 1, 2, 3])], + ) + .await; + // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateSeconded( + candidate_para, + candidate_receipt, + _pvd, + tx, + ), + ) if candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => { + // Any non-empty response will do. + tx.send(vec![(leaf_a_hash, vec![0, 1, 2, 3])]).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + parent_hash, + _signed_statement, + ) + ) if parent_hash == leaf_a_parent => {} + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Seconded(hash, statement)) => { + assert_eq!(leaf_a_parent, hash); + assert_matches!(statement.payload(), Statement::Seconded(_)); + } + ); + + virtual_overseer + }); +} diff --git a/node/network/collator-protocol/src/validator_side/collation.rs b/node/network/collator-protocol/src/validator_side/collation.rs index c5cb848d2815..48353cb266ad 100644 --- a/node/network/collator-protocol/src/validator_side/collation.rs +++ b/node/network/collator-protocol/src/validator_side/collation.rs @@ -28,7 +28,7 @@ //! └─▶Advertised ─▶ Pending ─▶ Fetched ─▶ Validated use futures::channel::oneshot; -use std::collections::{HashMap, VecDeque}; +use std::collections::VecDeque; use polkadot_node_network_protocol::PeerId; use polkadot_node_primitives::PoV; @@ -183,14 +183,14 @@ pub struct Collations { pub fetching_from: Option<(CollatorId, Option)>, /// Collation that were advertised to us, but we did not yet fetch. pub waiting_queue: VecDeque<(PendingCollation, CollatorId)>, - /// How many collations have been seconded per parachain. - pub seconded_count: HashMap, + /// How many collations have been seconded. + pub seconded_count: usize, } impl Collations { /// Note a seconded collation for a given para. - pub(super) fn note_seconded(&mut self, para_id: ParaId) { - *self.seconded_count.entry(para_id).or_insert(0) += 1 + pub(super) fn note_seconded(&mut self) { + self.seconded_count += 1 } /// Returns the next collation to fetch from the `waiting_queue`. @@ -225,17 +225,12 @@ impl Collations { match self.status { // We don't need to fetch any other collation when we already have seconded one. CollationStatus::Seconded => None, - CollationStatus::Waiting => { - while let Some(next) = self.waiting_queue.pop_front() { - let para_id = next.0.para_id; - if !self.is_seconded_limit_reached(relay_parent_mode, para_id) { - continue - } - - return Some(next) - } - None - }, + CollationStatus::Waiting => + if !self.is_seconded_limit_reached(relay_parent_mode) { + None + } else { + self.waiting_queue.pop_front() + }, CollationStatus::WaitingOnValidation | CollationStatus::Fetching => unreachable!("We have reset the status above!"), } @@ -245,10 +240,9 @@ impl Collations { pub(super) fn is_seconded_limit_reached( &self, relay_parent_mode: ProspectiveParachainsMode, - para_id: ParaId, ) -> bool { let seconded_limit = if relay_parent_mode.is_enabled() { MAX_CANDIDATE_DEPTH + 1 } else { 1 }; - self.seconded_count.get(¶_id).map_or(true, |&num| num < seconded_limit) + self.seconded_count < seconded_limit } } diff --git a/node/network/collator-protocol/src/validator_side/mod.rs b/node/network/collator-protocol/src/validator_side/mod.rs index 2454a23e3581..2b93b295418d 100644 --- a/node/network/collator-protocol/src/validator_side/mod.rs +++ b/node/network/collator-protocol/src/validator_side/mod.rs @@ -364,31 +364,10 @@ impl PeerData { } } -#[derive(Debug, Copy, Clone)] -enum AssignedCoreState { - Scheduled, - Occupied, -} - -impl AssignedCoreState { - fn is_occupied(&self) -> bool { - matches!(self, AssignedCoreState::Occupied) - } -} - -#[derive(Debug, Clone)] +#[derive(Debug)] struct GroupAssignments { /// Current assignment. - current: Option<(ParaId, AssignedCoreState)>, - /// Paras we're implicitly assigned to with respect to ancestry. - /// This only includes paras from children relay chain blocks assignments. - /// - /// Implicit assignments are not reference-counted since they're accumulated - /// from the most recent leaf. - /// - /// Should be relatively small depending on the group rotation frequency and - /// allowed ancestry length. - implicit: Vec, + current: Option, } struct PerRelayParent { @@ -401,7 +380,7 @@ impl PerRelayParent { fn new(mode: ProspectiveParachainsMode) -> Self { Self { prospective_parachains_mode: mode, - assignment: GroupAssignments { current: None, implicit: Vec::new() }, + assignment: GroupAssignments { current: None }, collations: Collations::default(), } } @@ -490,6 +469,7 @@ async fn assign_incoming( current_assignments: &mut HashMap, keystore: &SyncCryptoStorePtr, relay_parent: Hash, + relay_parent_mode: ProspectiveParachainsMode, ) -> Result<()> where Sender: CollatorProtocolSenderTrait, @@ -518,9 +498,9 @@ where let core_now = rotation_info.core_for_group(group, cores.len()); cores.get(core_now.0 as usize).and_then(|c| match c { - CoreState::Occupied(core) => Some((core.para_id(), AssignedCoreState::Occupied)), - CoreState::Scheduled(core) => Some((core.para_id, AssignedCoreState::Scheduled)), - CoreState::Free => None, + CoreState::Occupied(core) if relay_parent_mode.is_enabled() => Some(core.para_id()), + CoreState::Scheduled(core) => Some(core.para_id), + CoreState::Occupied(_) | CoreState::Free => None, }) }, None => { @@ -538,7 +518,7 @@ where // // However, this'll work fine for parachains, as each parachain gets a dedicated // core. - if let Some((para_id, _)) = para_now.as_ref() { + if let Some(para_id) = para_now.as_ref() { let entry = current_assignments.entry(*para_id).or_default(); *entry += 1; if *entry == 1 { @@ -551,7 +531,7 @@ where } } - *group_assignment = GroupAssignments { current: para_now, implicit: Vec::new() }; + *group_assignment = GroupAssignments { current: para_now }; Ok(()) } @@ -562,7 +542,7 @@ fn remove_outgoing( ) { let GroupAssignments { current, .. } = per_relay_parent.assignment; - if let Some((cur, _)) = current { + if let Some(cur) = current { if let Entry::Occupied(mut occupied) = current_assignments.entry(cur) { *occupied.get_mut() -= 1; if *occupied.get() == 0 { @@ -944,9 +924,6 @@ enum AdvertisementError { UndeclaredCollator, /// We're assigned to a different para at the given relay parent. InvalidAssignment, - /// Collator is trying to build on top of occupied core - /// when async backing is disabled. - CoreOccupied, /// An advertisement format doesn't match the relay parent. ProtocolMismatch, /// Para reached a limit of seconded candidates for this relay parent. @@ -962,8 +939,7 @@ impl AdvertisementError { use AdvertisementError::*; match self { InvalidAssignment => Some(COST_WRONG_PARA), - RelayParentUnknown | UndeclaredCollator | CoreOccupied | Invalid(_) => - Some(COST_UNEXPECTED_MESSAGE), + RelayParentUnknown | UndeclaredCollator | Invalid(_) => Some(COST_UNEXPECTED_MESSAGE), UnknownPeer | ProtocolMismatch | SecondedLimitReached | @@ -1000,16 +976,8 @@ where peer_data.collating_para().ok_or(AdvertisementError::UndeclaredCollator)?; match assignment.current { - Some((id, core_state)) if id == collator_para_id => { - // Disallow building on top occupied core if async - // backing is disabled. - if !relay_parent_mode.is_enabled() && core_state.is_occupied() { - return Err(AdvertisementError::CoreOccupied) - } - }, - _ if assignment.implicit.contains(&collator_para_id) => { - // This relay parent is a part of implicit ancestry, - // thus async backing is enabled. + Some(id) if id == collator_para_id => { + // Our assignment. }, _ => return Err(AdvertisementError::InvalidAssignment), }; @@ -1063,7 +1031,7 @@ where }); let collations = &mut per_relay_parent.collations; - if !collations.is_seconded_limit_reached(relay_parent_mode, collator_para_id) { + if !collations.is_seconded_limit_reached(relay_parent_mode) { return Err(AdvertisementError::SecondedLimitReached) } @@ -1141,13 +1109,11 @@ where &mut state.current_assignments, keystore, *leaf, + mode, ) .await?; state.active_leaves.insert(*leaf, mode); - - let mut implicit_assignment = - Vec::from_iter(per_relay_parent.assignment.current.map(|(para, _)| para)); state.per_relay_parent.insert(*leaf, per_relay_parent); if mode.is_enabled() { @@ -1163,37 +1129,21 @@ where .known_allowed_relay_parents_under(leaf, None) .unwrap_or_default(); for block_hash in allowed_ancestry { - let entry = match state.per_relay_parent.entry(*block_hash) { - Entry::Vacant(entry) => { - let mut per_relay_parent = - PerRelayParent::new(ProspectiveParachainsMode::Enabled); - assign_incoming( - sender, - &mut per_relay_parent.assignment, - &mut state.current_assignments, - keystore, - *block_hash, - ) - .await?; - - entry.insert(per_relay_parent) - }, - Entry::Occupied(entry) => entry.into_mut(), - }; - - let current = entry.assignment.current.map(|(para, _)| para); - let implicit = &mut entry.assignment.implicit; + if let Entry::Vacant(entry) = state.per_relay_parent.entry(*block_hash) { + let mut per_relay_parent = + PerRelayParent::new(ProspectiveParachainsMode::Enabled); + assign_incoming( + sender, + &mut per_relay_parent.assignment, + &mut state.current_assignments, + keystore, + *block_hash, + mode, + ) + .await?; - // Extend implicitly assigned parachains. - for para in &implicit_assignment { - if !implicit.contains(para) { - implicit.push(*para); - } + entry.insert(per_relay_parent); } - // Current assignment propagates to parents, meaning that a parachain - // we're assigned to in fresh blocks can submit collations built - // on top of relay parents in the allowed ancestry, but not vice versa. - implicit_assignment.extend(current); } } } @@ -1355,9 +1305,8 @@ async fn process_msg( let fetched_collation = FetchedCollation::from(&receipt.to_plain()); if let Some(collation_event) = state.fetched_candidates.remove(&fetched_collation) { let (collator_id, pending_collation) = collation_event; - let PendingCollation { - relay_parent, peer_id, para_id, prospective_candidate, .. - } = pending_collation; + let PendingCollation { relay_parent, peer_id, prospective_candidate, .. } = + pending_collation; note_good_collation(ctx.sender(), &state.peer_data, collator_id.clone()).await; if let Some(peer_data) = state.peer_data.get(&peer_id) { notify_collation_seconded( @@ -1370,9 +1319,9 @@ async fn process_msg( .await; } - if let Some(state) = state.per_relay_parent.get_mut(&parent) { - state.collations.status = CollationStatus::Seconded; - state.collations.note_seconded(para_id); + if let Some(rp_state) = state.per_relay_parent.get_mut(&parent) { + rp_state.collations.status = CollationStatus::Seconded; + rp_state.collations.note_seconded(); } // If async backing is enabled, make an attempt to fetch next collation. let maybe_candidate_hash = diff --git a/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs b/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs index 1eccb97cbd67..b854ae202014 100644 --- a/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs +++ b/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs @@ -294,10 +294,11 @@ fn accept_advertisements_from_implicit_view() { let head_b = Hash::from_low_u64_be(128); let head_b_num: u32 = 2; + let head_c = get_parent_hash(head_b); // Grandparent of head `b`. - // Group rotation frequency is 1 by default, at `c` we're assigned + // Group rotation frequency is 1 by default, at `d` we're assigned // to the first para. - let head_c = Hash::from_low_u64_be(130); + let head_d = get_parent_hash(head_c); // Activated leaf is `b`, but the collation will be based on `c`. update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; @@ -332,35 +333,24 @@ fn accept_advertisements_from_implicit_view() { Some((candidate_hash, parent_head_data_hash)), ) .await; - // Advertise with different para. - advertise_collation( + assert_fetch_collation_request( &mut virtual_overseer, - peer_a, head_c, - Some((candidate_hash, parent_head_data_hash)), + test_state.chain_ids[1], + Some(candidate_hash), ) .await; - - let response_channel = assert_fetch_collation_request( + // Advertise with different para. + advertise_collation( &mut virtual_overseer, - head_c, - test_state.chain_ids[1], - Some(candidate_hash), + peer_a, + head_d, // Note different relay parent. + Some((candidate_hash, parent_head_data_hash)), ) .await; - - // Respond with an error to abort seconding. - response_channel - .send(Err(sc_network::RequestFailure::NotConnected)) - .expect("Sending response should succeed"); - assert_matches!( - overseer_recv(&mut virtual_overseer).await, - AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(..),) - ); - assert_fetch_collation_request( &mut virtual_overseer, - head_c, + head_d, test_state.chain_ids[0], Some(candidate_hash), ) diff --git a/node/network/statement-distribution/src/legacy_v1/mod.rs b/node/network/statement-distribution/src/legacy_v1/mod.rs index ae58d643d19e..776c1b95f5c4 100644 --- a/node/network/statement-distribution/src/legacy_v1/mod.rs +++ b/node/network/statement-distribution/src/legacy_v1/mod.rs @@ -18,9 +18,7 @@ use parity_scale_codec::Encode; use polkadot_node_network_protocol::{ self as net_protocol, - grid_topology::{ - GridNeighbors, RequiredRouting, SessionBoundGridTopologyStorage, - }, + grid_topology::{GridNeighbors, RequiredRouting, SessionBoundGridTopologyStorage}, peer_set::{IsAuthority, PeerSet, ValidationVersion}, v1::{self as protocol_v1, StatementMetadata}, vstaging as protocol_vstaging, IfDisconnected, PeerId, UnifiedReputationChange as Rep, From 41e2f4d4f657b1a8eca4fe6c2a7ac906449961a1 Mon Sep 17 00:00:00 2001 From: Chris Sosnin <48099298+slumber@users.noreply.github.com> Date: Mon, 24 Oct 2022 12:13:00 +0400 Subject: [PATCH 17/76] Refactor prospective parachains mode request (#6179) * Extract prospective parachains mode into util * Skip activations depending on the mode --- node/core/backing/src/error.rs | 22 +++--- node/core/backing/src/lib.rs | 51 ++------------ node/core/prospective-parachains/src/error.rs | 4 ++ node/core/prospective-parachains/src/lib.rs | 23 ++++-- node/core/provisioner/src/error.rs | 9 +-- node/core/provisioner/src/lib.rs | 70 +++++-------------- node/core/provisioner/src/tests.rs | 41 +++++------ .../src/collator_side/mod.rs | 9 +-- node/network/collator-protocol/src/error.rs | 3 - node/network/collator-protocol/src/lib.rs | 51 +------------- .../src/validator_side/collation.rs | 3 +- .../src/validator_side/mod.rs | 9 ++- node/subsystem-util/src/lib.rs | 1 + node/subsystem-util/src/runtime/mod.rs | 56 ++++++++++++++- 14 files changed, 148 insertions(+), 204 deletions(-) diff --git a/node/core/backing/src/error.rs b/node/core/backing/src/error.rs index 13d33d852f60..4078a3317949 100644 --- a/node/core/backing/src/error.rs +++ b/node/core/backing/src/error.rs @@ -18,7 +18,7 @@ use fatality::Nested; use futures::channel::{mpsc, oneshot}; use polkadot_node_subsystem::{messages::ValidationFailed, RuntimeApiError, SubsystemError}; -use polkadot_node_subsystem_util::Error as UtilError; +use polkadot_node_subsystem_util::{runtime, Error as UtilError}; use polkadot_primitives::v2::{BackedCandidate, ValidationCodeHash}; use crate::LOG_TARGET; @@ -30,6 +30,18 @@ pub type FatalResult = std::result::Result; #[allow(missing_docs)] #[fatality::fatality(splitable)] pub enum Error { + #[fatal] + #[error("Failed to spawn background task")] + FailedToSpawnBackgroundTask, + + #[fatal(forward)] + #[error("Error while accessing runtime information")] + Runtime(#[from] runtime::Error), + + #[fatal] + #[error(transparent)] + BackgroundValidationMpsc(#[from] mpsc::SendError), + #[error("Candidate is not found")] CandidateNotFound, @@ -54,10 +66,6 @@ pub enum Error { #[error("Candidate rejected by prospective parachains subsystem")] RejectedByProspectiveParachains, - #[fatal] - #[error("Failed to spawn background task")] - FailedToSpawnBackgroundTask, - #[error("ValidateFromExhaustive channel closed before receipt")] ValidateFromExhaustive(#[source] oneshot::Canceled), @@ -76,10 +84,6 @@ pub enum Error { #[error(transparent)] ValidationFailed(#[from] ValidationFailed), - #[fatal] - #[error(transparent)] - BackgroundValidationMpsc(#[from] mpsc::SendError), - #[error(transparent)] UtilError(#[from] UtilError), diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index b319a9cbcfac..2a8fd59bd6e1 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -96,7 +96,9 @@ use polkadot_node_subsystem_util::{ self as util, backing_implicit_view::{FetchError as ImplicitViewFetchError, View as ImplicitView}, request_from_runtime, request_session_index_for_child, request_validator_groups, - request_validators, Validator, + request_validators, + runtime::{prospective_parachains_mode, ProspectiveParachainsMode}, + Validator, }; use polkadot_primitives::v2::{ BackedCandidate, CandidateCommitments, CandidateHash, CandidateReceipt, @@ -225,20 +227,6 @@ struct PerCandidateState { relay_parent: Hash, } -#[derive(Debug, Clone, Copy, PartialEq)] -enum ProspectiveParachainsMode { - // v2 runtime API: no prospective parachains. - Disabled, - // vstaging runtime API: prospective parachains. - Enabled, -} - -impl ProspectiveParachainsMode { - fn is_enabled(&self) -> bool { - self == &ProspectiveParachainsMode::Enabled - } -} - struct ActiveLeafState { prospective_parachains_mode: ProspectiveParachainsMode, /// The candidates seconded at various depths under this active @@ -775,37 +763,6 @@ async fn handle_communication( Ok(()) } -#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] -async fn prospective_parachains_mode( - ctx: &mut Context, - leaf_hash: Hash, -) -> Result { - // TODO: call a Runtime API once staging version is available - // https://github.com/paritytech/substrate/discussions/11338 - - let (tx, rx) = oneshot::channel(); - ctx.send_message(RuntimeApiMessage::Request(leaf_hash, RuntimeApiRequest::Version(tx))) - .await; - - let version = rx - .await - .map_err(Error::RuntimeApiUnavailable)? - .map_err(Error::FetchRuntimeApiVersion)?; - - if version >= RuntimeApiRequest::VALIDITY_CONSTRAINTS { - Ok(ProspectiveParachainsMode::Enabled) - } else { - if version < 2 { - gum::warn!( - target: LOG_TARGET, - "Runtime API version is {}, it is expected to be at least 2. Prospective parachains are disabled", - version - ); - } - Ok(ProspectiveParachainsMode::Disabled) - } -} - #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn handle_active_leaves_update( ctx: &mut Context, @@ -822,7 +779,7 @@ async fn handle_active_leaves_update( let res = if let Some(leaf) = update.activated { // Only activate in implicit view if prospective // parachains are enabled. - let mode = prospective_parachains_mode(ctx, leaf.hash).await?; + let mode = prospective_parachains_mode(ctx.sender(), leaf.hash).await?; let leaf_hash = leaf.hash; Some(( diff --git a/node/core/prospective-parachains/src/error.rs b/node/core/prospective-parachains/src/error.rs index e7fa2f0e9641..0ad98d1ff908 100644 --- a/node/core/prospective-parachains/src/error.rs +++ b/node/core/prospective-parachains/src/error.rs @@ -22,6 +22,7 @@ use polkadot_node_subsystem::{ errors::{ChainApiError, RuntimeApiError}, SubsystemError, }; +use polkadot_node_subsystem_util::runtime; use crate::LOG_TARGET; use fatality::Nested; @@ -45,6 +46,9 @@ pub enum Error { #[error("Receiving message from overseer failed: {0}")] SubsystemReceive(#[source] SubsystemError), + #[error("Error while accessing runtime information")] + Runtime(#[from] runtime::Error), + #[error(transparent)] RuntimeApi(#[from] RuntimeApiError), diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 6d203e902cdd..effa87c3e032 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -39,7 +39,10 @@ use polkadot_node_subsystem::{ }, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; -use polkadot_node_subsystem_util::inclusion_emulator::staging::{Constraints, RelayChainBlockInfo}; +use polkadot_node_subsystem_util::{ + inclusion_emulator::staging::{Constraints, RelayChainBlockInfo}, + runtime::prospective_parachains_mode, +}; use polkadot_primitives::vstaging::{ BlockNumber, CandidateHash, CommittedCandidateReceipt, CoreState, Hash, Id as ParaId, PersistedValidationData, @@ -163,10 +166,22 @@ async fn handle_active_leaves_update( } for activated in update.activated.into_iter() { - // TODO [now]: skip leaves which don't have prospective parachains - // enabled. This should be a runtime API version check. - let hash = activated.hash; + + let mode = prospective_parachains_mode(ctx.sender(), hash) + .await + .map_err(JfyiError::Runtime)?; + if !mode.is_enabled() { + gum::trace!( + target: LOG_TARGET, + block_hash = ?hash, + "Skipping leaf activation since async backing is disabled" + ); + + // Not a part of any allowed ancestry. + return Ok(()) + } + let scheduled_paras = fetch_upcoming_paras(&mut *ctx, hash).await?; let block_info: RelayChainBlockInfo = match fetch_block_info(&mut *ctx, hash).await? { diff --git a/node/core/provisioner/src/error.rs b/node/core/provisioner/src/error.rs index db7be5ea7e31..dfd0f6831532 100644 --- a/node/core/provisioner/src/error.rs +++ b/node/core/provisioner/src/error.rs @@ -28,6 +28,10 @@ pub type Result = std::result::Result; #[allow(missing_docs)] #[fatality::fatality(splitable)] pub enum Error { + #[fatal(forward)] + #[error("Error while accessing runtime information")] + Runtime(#[from] util::runtime::Error), + #[error(transparent)] Util(#[from] util::Error), @@ -49,14 +53,11 @@ pub enum Error { #[error("failed to get backable candidate from prospective parachains")] CanceledBackableCandidate(#[source] oneshot::Canceled), - #[error("failed to get Runtime API version")] - CanceledRuntimeApiVersion(#[source] oneshot::Canceled), - #[error(transparent)] ChainApi(#[from] ChainApiError), #[error(transparent)] - Runtime(#[from] RuntimeApiError), + RuntimeApi(#[from] RuntimeApiError), #[error("failed to send message to ChainAPI")] ChainApiMessageSend(#[source] mpsc::SendError), diff --git a/node/core/provisioner/src/lib.rs b/node/core/provisioner/src/lib.rs index ea3e2108424b..bd343ded1964 100644 --- a/node/core/provisioner/src/lib.rs +++ b/node/core/provisioner/src/lib.rs @@ -35,7 +35,9 @@ use polkadot_node_subsystem::{ PerLeafSpan, RuntimeApiError, SpawnedSubsystem, SubsystemError, }; use polkadot_node_subsystem_util::{ - request_availability_cores, request_persisted_validation_data, TimeoutExt, + request_availability_cores, request_persisted_validation_data, + runtime::{prospective_parachains_mode, ProspectiveParachainsMode}, + TimeoutExt, }; use polkadot_primitives::v2::{ BackedCandidate, BlockNumber, CandidateHash, CandidateReceipt, CoreState, Hash, Id as ParaId, @@ -75,20 +77,10 @@ impl ProvisionerSubsystem { } } -#[derive(Debug, Clone)] -enum ProspectiveParachainsMode { - Enabled, - Disabled { - // Without prospective parachains it's necessary - // to track backed candidates to choose from when assembling - // a relay chain block. - backed_candidates: Vec, - }, -} - /// A per-relay-parent state for the provisioning subsystem. pub struct PerRelayParent { leaf: ActivatedLeaf, + backed_candidates: Vec, prospective_parachains_mode: ProspectiveParachainsMode, signed_bitfields: Vec, is_inherent_ready: bool, @@ -102,6 +94,7 @@ impl PerRelayParent { Self { leaf, + backed_candidates: Vec::new(), prospective_parachains_mode, signed_bitfields: Vec::new(), is_inherent_ready: false, @@ -185,36 +178,6 @@ async fn run_iteration( } } -async fn prospective_parachains_mode( - sender: &mut impl overseer::ProvisionerSenderTrait, - leaf_hash: Hash, -) -> Result { - // TODO: call a Runtime API once staging version is available - // https://github.com/paritytech/substrate/discussions/11338 - // - // Implementation should probably be shared with backing. - - let (tx, rx) = oneshot::channel(); - sender - .send_message(RuntimeApiMessage::Request(leaf_hash, RuntimeApiRequest::Version(tx))) - .await; - - let version = rx.await.map_err(Error::CanceledRuntimeApiVersion)?.map_err(Error::Runtime)?; - - if version >= RuntimeApiRequest::VALIDITY_CONSTRAINTS { - Ok(ProspectiveParachainsMode::Enabled) - } else { - if version < 2 { - gum::warn!( - target: LOG_TARGET, - "Runtime API version is {}, it is expected to be at least 2. Prospective parachains are disabled", - version - ); - } - Ok(ProspectiveParachainsMode::Disabled { backed_candidates: Vec::new() }) - } -} - async fn handle_active_leaves_update( sender: &mut impl overseer::ProvisionerSenderTrait, update: ActiveLeavesUpdate, @@ -286,7 +249,8 @@ async fn send_inherent_data_bg( ) -> Result<(), Error> { let leaf = per_relay_parent.leaf.clone(); let signed_bitfields = per_relay_parent.signed_bitfields.clone(); - let prospective_parachains_mode = per_relay_parent.prospective_parachains_mode.clone(); + let backed_candidates = per_relay_parent.backed_candidates.clone(); + let mode = per_relay_parent.prospective_parachains_mode; let span = per_relay_parent.span.child("req-inherent-data"); let mut sender = ctx.sender().clone(); @@ -304,7 +268,8 @@ async fn send_inherent_data_bg( let send_result = send_inherent_data( &leaf, &signed_bitfields, - &prospective_parachains_mode, + &backed_candidates, + mode, return_senders, &mut sender, &metrics, @@ -367,11 +332,7 @@ fn note_provisionable_data( .child("provisionable-backed") .with_candidate(candidate_hash) .with_para_id(backed_candidate.descriptor().para_id); - if let ProspectiveParachainsMode::Disabled { backed_candidates } = - &mut per_relay_parent.prospective_parachains_mode - { - backed_candidates.push(backed_candidate) - } + per_relay_parent.backed_candidates.push(backed_candidate); }, _ => {}, } @@ -399,7 +360,8 @@ type CoreAvailability = BitVec; async fn send_inherent_data( leaf: &ActivatedLeaf, bitfields: &[SignedAvailabilityBitfield], - prospective_parachains_mode: &ProspectiveParachainsMode, + candidates: &[CandidateReceipt], + prospective_parachains_mode: ProspectiveParachainsMode, return_senders: Vec>, from_job: &mut impl overseer::ProvisionerSenderTrait, metrics: &Metrics, @@ -454,6 +416,7 @@ async fn send_inherent_data( let candidates = select_candidates( &availability_cores, &bitfields, + candidates, prospective_parachains_mode, leaf.hash, from_job, @@ -724,18 +687,19 @@ async fn request_backable_candidates( async fn select_candidates( availability_cores: &[CoreState], bitfields: &[SignedAvailabilityBitfield], - prospective_parachains_mode: &ProspectiveParachainsMode, + candidates: &[CandidateReceipt], + prospective_parachains_mode: ProspectiveParachainsMode, relay_parent: Hash, sender: &mut impl overseer::ProvisionerSenderTrait, ) -> Result, Error> { let selected_candidates = match prospective_parachains_mode { ProspectiveParachainsMode::Enabled => request_backable_candidates(availability_cores, bitfields, relay_parent, sender).await?, - ProspectiveParachainsMode::Disabled { backed_candidates } => + ProspectiveParachainsMode::Disabled => select_candidate_hashes_from_tracked( availability_cores, bitfields, - &backed_candidates, + &candidates, relay_parent, sender, ) diff --git a/node/core/provisioner/src/tests.rs b/node/core/provisioner/src/tests.rs index 14110d8bf666..4b34905b77ca 100644 --- a/node/core/provisioner/src/tests.rs +++ b/node/core/provisioner/src/tests.rs @@ -234,6 +234,7 @@ mod select_candidates { }, }; use polkadot_node_subsystem_test_helpers::TestSubsystemSender; + use polkadot_node_subsystem_util::runtime::ProspectiveParachainsMode; use polkadot_primitives::v2::{ BlockNumber, CandidateCommitments, CommittedCandidateReceipt, PersistedValidationData, }; @@ -316,15 +317,10 @@ mod select_candidates { ] } - enum TestProspectiveParachainsMode { - Enabled, - Disabled, - } - async fn mock_overseer( mut receiver: mpsc::UnboundedReceiver, expected: Vec, - prospective_parachains_mode: TestProspectiveParachainsMode, + prospective_parachains_mode: ProspectiveParachainsMode, ) { use ChainApiMessage::BlockNumber; use RuntimeApiMessage::Request; @@ -354,10 +350,10 @@ mod select_candidates { AllMessages::ProspectiveParachains( ProspectiveParachainsMessage::GetBackableCandidate(.., tx), ) => match prospective_parachains_mode { - TestProspectiveParachainsMode::Enabled => { + ProspectiveParachainsMode::Enabled => { let _ = tx.send(candidates.next()); }, - TestProspectiveParachainsMode::Disabled => + ProspectiveParachainsMode::Disabled => panic!("unexpected prospective parachains request"), }, _ => panic!("Unexpected message: {:?}", from_job), @@ -368,14 +364,14 @@ mod select_candidates { #[test] fn can_succeed() { test_harness( - |r| mock_overseer(r, Vec::new(), TestProspectiveParachainsMode::Disabled), + |r| mock_overseer(r, Vec::new(), ProspectiveParachainsMode::Disabled), |mut tx: TestSubsystemSender| async move { - let prospective_parachains_mode = - ProspectiveParachainsMode::Disabled { backed_candidates: Vec::new() }; + let prospective_parachains_mode = ProspectiveParachainsMode::Disabled; select_candidates( &[], &[], - &prospective_parachains_mode, + &[], + prospective_parachains_mode, Default::default(), &mut tx, ) @@ -431,8 +427,7 @@ mod select_candidates { // why those particular indices? see the comments on mock_availability_cores() let expected_candidates: Vec<_> = [1, 4, 7, 8, 10].iter().map(|&idx| candidates[idx].clone()).collect(); - let prospective_parachains_mode = - ProspectiveParachainsMode::Disabled { backed_candidates: candidates }; + let prospective_parachains_mode = ProspectiveParachainsMode::Disabled; let expected_backed = expected_candidates .iter() @@ -447,12 +442,13 @@ mod select_candidates { .collect(); test_harness( - |r| mock_overseer(r, expected_backed, TestProspectiveParachainsMode::Disabled), + |r| mock_overseer(r, expected_backed, ProspectiveParachainsMode::Disabled), |mut tx: TestSubsystemSender| async move { let result = select_candidates( &mock_cores, &[], - &prospective_parachains_mode, + &candidates, + prospective_parachains_mode, Default::default(), &mut tx, ) @@ -520,16 +516,16 @@ mod select_candidates { let expected_backed_filtered: Vec<_> = expected_cores.iter().map(|&idx| candidates[idx].clone()).collect(); - let prospective_parachains_mode = - ProspectiveParachainsMode::Disabled { backed_candidates: candidates }; + let prospective_parachains_mode = ProspectiveParachainsMode::Disabled; test_harness( - |r| mock_overseer(r, expected_backed, TestProspectiveParachainsMode::Disabled), + |r| mock_overseer(r, expected_backed, ProspectiveParachainsMode::Disabled), |mut tx: TestSubsystemSender| async move { let result = select_candidates( &mock_cores, &[], - &prospective_parachains_mode, + &candidates, + prospective_parachains_mode, Default::default(), &mut tx, ) @@ -591,12 +587,13 @@ mod select_candidates { .collect(); test_harness( - |r| mock_overseer(r, expected_backed, TestProspectiveParachainsMode::Enabled), + |r| mock_overseer(r, expected_backed, ProspectiveParachainsMode::Enabled), |mut tx: TestSubsystemSender| async move { let result = select_candidates( &mock_cores, &[], - &prospective_parachains_mode, + &[], + prospective_parachains_mode, Default::default(), &mut tx, ) diff --git a/node/network/collator-protocol/src/collator_side/mod.rs b/node/network/collator-protocol/src/collator_side/mod.rs index 79222fde117c..4e05db3f247c 100644 --- a/node/network/collator-protocol/src/collator_side/mod.rs +++ b/node/network/collator-protocol/src/collator_side/mod.rs @@ -46,7 +46,10 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_util::{ backing_implicit_view::View as ImplicitView, - runtime::{get_availability_cores, get_group_rotation_info, RuntimeInfo}, + runtime::{ + get_availability_cores, get_group_rotation_info, prospective_parachains_mode, + ProspectiveParachainsMode, RuntimeInfo, + }, TimeoutExt, }; use polkadot_primitives::v2::{ @@ -54,9 +57,7 @@ use polkadot_primitives::v2::{ GroupIndex, Hash, Id as ParaId, SessionIndex, }; -use super::{ - prospective_parachains_mode, ProspectiveParachainsMode, LOG_TARGET, MAX_CANDIDATE_DEPTH, -}; +use super::{LOG_TARGET, MAX_CANDIDATE_DEPTH}; use crate::{ error::{log_error, Error, FatalError, Result}, modify_reputation, diff --git a/node/network/collator-protocol/src/error.rs b/node/network/collator-protocol/src/error.rs index 4003ac438c92..c9dc4ac3207f 100644 --- a/node/network/collator-protocol/src/error.rs +++ b/node/network/collator-protocol/src/error.rs @@ -52,9 +52,6 @@ pub enum Error { #[error(transparent)] ImplicitViewFetchError(backing_implicit_view::FetchError), - #[error("Response receiver for Runtime API version request cancelled")] - CancelledRuntimeApiVersion(oneshot::Canceled), - #[error("Response receiver for active validators request cancelled")] CancelledActiveValidators(oneshot::Canceled), diff --git a/node/network/collator-protocol/src/lib.rs b/node/network/collator-protocol/src/lib.rs index 85f560ceaa6e..ca5bf5e297c6 100644 --- a/node/network/collator-protocol/src/lib.rs +++ b/node/network/collator-protocol/src/lib.rs @@ -34,12 +34,10 @@ use polkadot_node_network_protocol::{ request_response::{v1 as request_v1, vstaging as protocol_vstaging, IncomingRequestReceiver}, PeerId, UnifiedReputationChange as Rep, }; -use polkadot_primitives::v2::{CollatorPair, Hash}; +use polkadot_primitives::v2::CollatorPair; use polkadot_node_subsystem::{ - errors::SubsystemError, - messages::{NetworkBridgeTxMessage, RuntimeApiMessage, RuntimeApiRequest}, - overseer, SpawnedSubsystem, + errors::SubsystemError, messages::NetworkBridgeTxMessage, overseer, SpawnedSubsystem, }; mod error; @@ -189,48 +187,3 @@ fn tick_stream(period: Duration) -> impl FusedStream { }) .fuse() } - -#[derive(Debug, Clone, Copy, PartialEq)] -enum ProspectiveParachainsMode { - // v2 runtime API: no prospective parachains. - Disabled, - // vstaging runtime API: prospective parachains. - Enabled, -} - -impl ProspectiveParachainsMode { - fn is_enabled(&self) -> bool { - matches!(self, Self::Enabled) - } -} - -async fn prospective_parachains_mode( - sender: &mut Sender, - leaf_hash: Hash, -) -> Result -where - Sender: polkadot_node_subsystem::CollatorProtocolSenderTrait, -{ - let (tx, rx) = futures::channel::oneshot::channel(); - sender - .send_message(RuntimeApiMessage::Request(leaf_hash, RuntimeApiRequest::Version(tx))) - .await; - - let version = rx - .await - .map_err(error::Error::CancelledRuntimeApiVersion)? - .map_err(error::Error::RuntimeApi)?; - - if version >= RuntimeApiRequest::VALIDITY_CONSTRAINTS { - Ok(ProspectiveParachainsMode::Enabled) - } else { - if version < 2 { - gum::warn!( - target: LOG_TARGET, - "Runtime API version is {}, it is expected to be at least 2. Prospective parachains are disabled", - version - ); - } - Ok(ProspectiveParachainsMode::Disabled) - } -} diff --git a/node/network/collator-protocol/src/validator_side/collation.rs b/node/network/collator-protocol/src/validator_side/collation.rs index 48353cb266ad..e00e05c00a01 100644 --- a/node/network/collator-protocol/src/validator_side/collation.rs +++ b/node/network/collator-protocol/src/validator_side/collation.rs @@ -32,11 +32,12 @@ use std::collections::VecDeque; use polkadot_node_network_protocol::PeerId; use polkadot_node_primitives::PoV; +use polkadot_node_subsystem_util::runtime::ProspectiveParachainsMode; use polkadot_primitives::v2::{ CandidateHash, CandidateReceipt, CollatorId, Hash, Id as ParaId, PersistedValidationData, }; -use crate::{error::SecondingError, ProspectiveParachainsMode, LOG_TARGET, MAX_CANDIDATE_DEPTH}; +use crate::{error::SecondingError, LOG_TARGET, MAX_CANDIDATE_DEPTH}; /// Candidate supplied with a para head it's built on top of. #[derive(Debug, Copy, Clone, Hash, Eq, PartialEq)] diff --git a/node/network/collator-protocol/src/validator_side/mod.rs b/node/network/collator-protocol/src/validator_side/mod.rs index 2b93b295418d..6de0dd6b8542 100644 --- a/node/network/collator-protocol/src/validator_side/mod.rs +++ b/node/network/collator-protocol/src/validator_side/mod.rs @@ -54,7 +54,9 @@ use polkadot_node_subsystem::{ overseer, CollatorProtocolSenderTrait, FromOrchestra, OverseerSignal, PerLeafSpan, }; use polkadot_node_subsystem_util::{ - backing_implicit_view::View as ImplicitView, metrics::prometheus::prometheus::HistogramTimer, + backing_implicit_view::View as ImplicitView, + metrics::prometheus::prometheus::HistogramTimer, + runtime::{prospective_parachains_mode, ProspectiveParachainsMode}, }; use polkadot_primitives::v2::{ CandidateHash, CandidateReceipt, CollatorId, CoreState, Hash, Id as ParaId, @@ -63,10 +65,7 @@ use polkadot_primitives::v2::{ use crate::error::{Error, FetchError, Result, SecondingError}; -use super::{ - modify_reputation, prospective_parachains_mode, tick_stream, ProspectiveParachainsMode, - LOG_TARGET, MAX_CANDIDATE_DEPTH, -}; +use super::{modify_reputation, tick_stream, LOG_TARGET, MAX_CANDIDATE_DEPTH}; mod collation; mod metrics; diff --git a/node/subsystem-util/src/lib.rs b/node/subsystem-util/src/lib.rs index b30742e78aba..68fdad77895d 100644 --- a/node/subsystem-util/src/lib.rs +++ b/node/subsystem-util/src/lib.rs @@ -195,6 +195,7 @@ macro_rules! specialize_requests { } specialize_requests! { + fn request_runtime_api_version() -> u32; Version; fn request_authorities() -> Vec; Authorities; fn request_validators() -> Vec; Validators; fn request_validator_groups() -> (Vec>, GroupRotationInfo); ValidatorGroups; diff --git a/node/subsystem-util/src/runtime/mod.rs b/node/subsystem-util/src/runtime/mod.rs index 801a8c7d0e4a..410f46e78e21 100644 --- a/node/subsystem-util/src/runtime/mod.rs +++ b/node/subsystem-util/src/runtime/mod.rs @@ -25,7 +25,10 @@ use sp_application_crypto::AppKey; use sp_core::crypto::ByteArray; use sp_keystore::{CryptoStore, SyncCryptoStorePtr}; -use polkadot_node_subsystem::{messages::RuntimeApiMessage, overseer, SubsystemSender}; +use polkadot_node_subsystem::{ + messages::{RuntimeApiMessage, RuntimeApiRequest}, + overseer, SubsystemSender, +}; use polkadot_primitives::v2::{ CandidateEvent, CoreState, EncodeAs, GroupIndex, GroupRotationInfo, Hash, OccupiedCore, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed, SigningContext, UncheckedSigned, @@ -34,8 +37,8 @@ use polkadot_primitives::v2::{ use crate::{ request_availability_cores, request_candidate_events, request_on_chain_votes, - request_session_index_for_child, request_session_info, request_validation_code_by_hash, - request_validator_groups, + request_runtime_api_version, request_session_index_for_child, request_session_info, + request_validation_code_by_hash, request_validator_groups, }; /// Errors that can happen on runtime fetches. @@ -44,6 +47,8 @@ mod error; use error::{recv_runtime, Result}; pub use error::{Error, FatalError, JfyiError}; +const LOG_TARGET: &'static str = "parachain::runtime-info"; + /// Configuration for construction a `RuntimeInfo`. pub struct Config { /// Needed for retrieval of `ValidatorInfo` @@ -341,4 +346,49 @@ where .await } +/// Prospective parachains mode of a relay parent. Defined by +/// the Runtime API version. +/// +/// Needed for the period of transition to asynchronous backing. +#[derive(Debug, Copy, Clone)] +pub enum ProspectiveParachainsMode { + /// v2 runtime API: no prospective parachains. + Disabled, + /// vstaging runtime API: prospective parachains. + Enabled, +} + +impl ProspectiveParachainsMode { + /// Returns `true` if mode is enabled, `false` otherwise. + pub fn is_enabled(&self) -> bool { + matches!(self, ProspectiveParachainsMode::Enabled) + } +} + +/// Requests prospective parachains mode for a given relay parent based on +/// the Runtime API version. +pub async fn prospective_parachains_mode( + sender: &mut Sender, + relay_parent: Hash, +) -> Result +where + Sender: SubsystemSender, +{ + let version = recv_runtime(request_runtime_api_version(relay_parent, sender).await).await?; + + if version >= RuntimeApiRequest::VALIDITY_CONSTRAINTS { + Ok(ProspectiveParachainsMode::Enabled) + } else { + if version < 2 { + gum::warn!( + target: LOG_TARGET, + ?relay_parent, + "Runtime API version is {}, it is expected to be at least 2. Prospective parachains are disabled", + version + ); + } + Ok(ProspectiveParachainsMode::Disabled) + } +} + // TODO [now] : a way of getting all [`ContextLimitations`] from runtime. From fd1569a86435ae024298f1b3656e90d3d07b3388 Mon Sep 17 00:00:00 2001 From: Chris Sosnin <48099298+slumber@users.noreply.github.com> Date: Tue, 25 Oct 2022 08:14:28 +0400 Subject: [PATCH 18/76] backing: don't send backed candidate to provisioner (#6185) --- node/core/backing/src/lib.rs | 22 +++++++++---------- .../src/tests/prospective_parachains.rs | 13 ----------- 2 files changed, 11 insertions(+), 24 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 2a8fd59bd6e1..9c897b4848af 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -1525,18 +1525,18 @@ async fn import_statement( candidate_hash, )) .await; + } else { + // The provisioner waits on candidate-backing, which means + // that we need to send unbounded messages to avoid cycles. + // + // Backed candidates are bounded by the number of validators, + // parachains, and the block production rate of the relay chain. + let message = ProvisionerMessage::ProvisionableData( + rp_state.parent, + ProvisionableData::BackedCandidate(backed.receipt()), + ); + ctx.send_unbounded_message(message); } - - // The provisioner waits on candidate-backing, which means - // that we need to send unbounded messages to avoid cycles. - // - // Backed candidates are bounded by the number of validators, - // parachains, and the block production rate of the relay chain. - let message = ProvisionerMessage::ProvisionableData( - rp_state.parent, - ProvisionableData::BackedCandidate(backed.receipt()), - ); - ctx.send_unbounded_message(message); } } } diff --git a/node/core/backing/src/tests/prospective_parachains.rs b/node/core/backing/src/tests/prospective_parachains.rs index 59ed1027e624..d9aed831565b 100644 --- a/node/core/backing/src/tests/prospective_parachains.rs +++ b/node/core/backing/src/tests/prospective_parachains.rs @@ -1044,18 +1044,6 @@ fn backing_works() { ) if candidate_a_hash == candidate_hash && candidate_para_id == para_id ); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::Provisioner( - ProvisionerMessage::ProvisionableData( - _, - ProvisionableData::BackedCandidate(candidate_receipt) - ) - ) => { - assert_eq!(candidate_receipt, candidate_a.to_plain()); - } - ); - assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -1267,7 +1255,6 @@ fn concurrent_dependent_candidates() { AllMessages::ProspectiveParachains( ProspectiveParachainsMessage::CandidateBacked(..), ) => {}, - AllMessages::Provisioner(ProvisionerMessage::ProvisionableData(..)) => {}, AllMessages::StatementDistribution(StatementDistributionMessage::Share( _, statement, From 0f3ef99e0277832aa8967d6ac552fd28c2cb7535 Mon Sep 17 00:00:00 2001 From: Chris Sosnin <48099298+slumber@users.noreply.github.com> Date: Thu, 3 Nov 2022 23:11:09 +0400 Subject: [PATCH 19/76] backing: introduce `CanSecond` request for advertisements filtering (#6225) * Drop BoundToRelayParent * draft changes * fix backing tests * Fix genesis ancestry * Fix validator side tests * more tests --- node/core/backing/src/lib.rs | 89 +++- node/core/backing/src/tests/mod.rs | 2 +- .../src/tests/prospective_parachains.rs | 11 +- .../src/collator_side/mod.rs | 14 +- .../src/validator_side/collation.rs | 15 + .../src/validator_side/mod.rs | 368 +++++++++++----- .../src/validator_side/tests/mod.rs | 4 +- .../tests/prospective_parachains.rs | 401 +++++++++++++++++- node/subsystem-types/src/messages.rs | 61 ++- .../src/backing_implicit_view.rs | 29 +- 10 files changed, 818 insertions(+), 176 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 9c897b4848af..4a70555d08a2 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -85,10 +85,10 @@ use polkadot_node_primitives::{ }; use polkadot_node_subsystem::{ messages::{ - AvailabilityDistributionMessage, AvailabilityStoreMessage, CandidateBackingMessage, - CandidateValidationMessage, CollatorProtocolMessage, HypotheticalDepthRequest, - ProspectiveParachainsMessage, ProvisionableData, ProvisionerMessage, RuntimeApiMessage, - RuntimeApiRequest, StatementDistributionMessage, + AvailabilityDistributionMessage, AvailabilityStoreMessage, CanSecondRequest, + CandidateBackingMessage, CandidateValidationMessage, CollatorProtocolMessage, + HypotheticalDepthRequest, ProspectiveParachainsMessage, ProvisionableData, + ProvisionerMessage, RuntimeApiMessage, RuntimeApiRequest, StatementDistributionMessage, }, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; @@ -758,6 +758,8 @@ async fn handle_communication( "Received `GetBackedCandidates` for an unknown relay parent." ); }, + CandidateBackingMessage::CanSecond(request, tx) => + handle_can_second_request(ctx, state, request, tx).await, } Ok(()) @@ -1093,22 +1095,8 @@ async fn seconding_sanity_check( candidate_hash: CandidateHash, candidate_para: ParaId, parent_head_data_hash: Hash, - head_data_hash: Hash, candidate_relay_parent: Hash, ) -> SecondingAllowed { - // Note that `GetHypotheticalDepths` doesn't account for recursion, - // i.e. candidates can appear at multiple depths in the tree and in fact - // at all depths, and we don't know what depths a candidate will ultimately occupy - // because that's dependent on other candidates we haven't yet received. - // - // The only way to effectively rule this out is to have candidate receipts - // directly commit to the parachain block number or some other incrementing - // counter. That requires a major primitives format upgrade, so for now - // we just rule out trivial cycles. - if parent_head_data_hash == head_data_hash { - return SecondingAllowed::No - } - let mut membership = Vec::new(); let mut responses = FuturesOrdered::>>::new(); @@ -1193,6 +1181,46 @@ async fn seconding_sanity_check( SecondingAllowed::Yes(membership) } +/// Performs seconding sanity check for an advertisement. +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn handle_can_second_request( + ctx: &mut Context, + state: &State, + request: CanSecondRequest, + tx: oneshot::Sender, +) { + let relay_parent = request.candidate_relay_parent; + let response = if state + .per_relay_parent + .get(&relay_parent) + .map_or(false, |pr_state| pr_state.prospective_parachains_mode.is_enabled()) + { + let result = seconding_sanity_check( + ctx, + &state.per_leaf, + &state.implicit_view, + request.candidate_hash, + request.candidate_para_id, + request.parent_head_data_hash, + relay_parent, + ) + .await; + + match result { + SecondingAllowed::No => false, + SecondingAllowed::Yes(membership) => { + // Candidate should be recognized by at least some fragment tree. + membership.iter().any(|(_, m)| !m.is_empty()) + }, + } + } else { + // Relay parent is unknown or async backing is disabled. + false + }; + + let _ = tx.send(response); +} + #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn handle_validated_candidate_command( ctx: &mut Context, @@ -1219,6 +1247,19 @@ async fn handle_validated_candidate_command( return Ok(()) } + let parent_head_data_hash = persisted_validation_data.parent_head.hash(); + // Note that `GetHypotheticalDepths` doesn't account for recursion, + // i.e. candidates can appear at multiple depths in the tree and in fact + // at all depths, and we don't know what depths a candidate will ultimately occupy + // because that's dependent on other candidates we haven't yet received. + // + // The only way to effectively rule this out is to have candidate receipts + // directly commit to the parachain block number or some other incrementing + // counter. That requires a major primitives format upgrade, so for now + // we just rule out trivial cycles. + if parent_head_data_hash == commitments.head_data.hash() { + return Ok(()) + } // sanity check that we're allowed to second the candidate // and that it doesn't conflict with other candidates we've // seconded. @@ -1229,7 +1270,6 @@ async fn handle_validated_candidate_command( candidate_hash, candidate.descriptor().para_id, persisted_validation_data.parent_head.hash(), - commitments.head_data.hash(), candidate.descriptor().relay_parent, ) .await @@ -1517,14 +1557,21 @@ async fn import_statement( "Candidate backed", ); - // Inform the prospective parachains subsystem - // that the candidate is now backed. if rp_state.prospective_parachains_mode.is_enabled() { + // Inform the prospective parachains subsystem + // that the candidate is now backed. ctx.send_message(ProspectiveParachainsMessage::CandidateBacked( para_id, candidate_hash, )) .await; + // Backed candidate potentially unblocks new advertisements, + // notify collator protocol. + ctx.send_message(CollatorProtocolMessage::Backed { + para_id, + para_head: backed.candidate.descriptor.para_head, + }) + .await; } else { // The provisioner waits on candidate-backing, which means // that we need to send unbounded messages to avoid cycles. diff --git a/node/core/backing/src/tests/mod.rs b/node/core/backing/src/tests/mod.rs index 2e2a5878a888..dff05c7b76f0 100644 --- a/node/core/backing/src/tests/mod.rs +++ b/node/core/backing/src/tests/mod.rs @@ -212,7 +212,7 @@ impl TestCandidateBuilder { erasure_root: self.erasure_root, collator: dummy_collator(), signature: dummy_collator_signature(), - para_head: dummy_hash(), + para_head: self.head_data.hash(), validation_code_hash: ValidationCode(self.validation_code).hash(), persisted_validation_data_hash: self.persisted_validation_data_hash, }, diff --git a/node/core/backing/src/tests/prospective_parachains.rs b/node/core/backing/src/tests/prospective_parachains.rs index d9aed831565b..86ef1e069977 100644 --- a/node/core/backing/src/tests/prospective_parachains.rs +++ b/node/core/backing/src/tests/prospective_parachains.rs @@ -959,6 +959,7 @@ fn backing_works() { .build(); let candidate_a_hash = candidate_a.hash(); + let candidate_a_para_head = candidate_a.descriptor().para_head; let public1 = CryptoStore::sr25519_generate_new( &*test_state.keystore, @@ -1034,7 +1035,7 @@ fn backing_works() { ) .await; - // Prospective parachains are notified about candidate backed. + // Prospective parachains and collator protocol are notified about candidate backed. assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( @@ -1043,6 +1044,13 @@ fn backing_works() { ), ) if candidate_a_hash == candidate_hash && candidate_para_id == para_id ); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Backed { + para_id: _para_id, + para_head, + }) if para_id == _para_id && candidate_a_para_head == para_head + ); assert_matches!( virtual_overseer.recv().await, @@ -1255,6 +1263,7 @@ fn concurrent_dependent_candidates() { AllMessages::ProspectiveParachains( ProspectiveParachainsMessage::CandidateBacked(..), ) => {}, + AllMessages::CollatorProtocol(CollatorProtocolMessage::Backed { .. }) => {}, AllMessages::StatementDistribution(StatementDistributionMessage::Share( _, statement, diff --git a/node/network/collator-protocol/src/collator_side/mod.rs b/node/network/collator-protocol/src/collator_side/mod.rs index 4e05db3f247c..846fd888fd0e 100644 --- a/node/network/collator-protocol/src/collator_side/mod.rs +++ b/node/network/collator-protocol/src/collator_side/mod.rs @@ -789,12 +789,6 @@ async fn process_msg( }, } }, - ReportCollator(_) => { - gum::warn!( - target: LOG_TARGET, - "ReportCollator message is not expected on the collator side of the protocol", - ); - }, NetworkBridgeUpdate(event) => { // We should count only this shoulder in the histogram, as other shoulders are just introducing noise let _ = state.metrics.time_process_msg(); @@ -807,7 +801,13 @@ async fn process_msg( ); } }, - _ => {}, + msg @ (ReportCollator(..) | Invalid(..) | Seconded(..) | Backed { .. }) => { + gum::warn!( + target: LOG_TARGET, + "{:?} message is not expected on the collator side of the protocol", + msg, + ); + }, } Ok(()) diff --git a/node/network/collator-protocol/src/validator_side/collation.rs b/node/network/collator-protocol/src/validator_side/collation.rs index e00e05c00a01..a18397e09051 100644 --- a/node/network/collator-protocol/src/validator_side/collation.rs +++ b/node/network/collator-protocol/src/validator_side/collation.rs @@ -112,6 +112,21 @@ impl PendingCollation { } } +/// vstaging advertisement that was rejected by the backing +/// subsystem. Validator may fetch it later if its fragment +/// membership gets recognized before relay parent goes out of view. +#[derive(Debug, Clone)] +pub struct BlockedAdvertisement { + /// Peer that advertised the collation. + pub peer_id: PeerId, + /// Collator id. + pub collator_id: CollatorId, + /// The relay-parent of the candidate. + pub candidate_relay_parent: Hash, + /// Hash of the candidate. + pub candidate_hash: CandidateHash, +} + /// Performs a sanity check between advertised and fetched collations. /// /// Since the persisted validation data is constructed using the advertised diff --git a/node/network/collator-protocol/src/validator_side/mod.rs b/node/network/collator-protocol/src/validator_side/mod.rs index 6de0dd6b8542..a6ef8d45bffc 100644 --- a/node/network/collator-protocol/src/validator_side/mod.rs +++ b/node/network/collator-protocol/src/validator_side/mod.rs @@ -48,8 +48,9 @@ use polkadot_node_primitives::{PoV, SignedFullStatement, Statement}; use polkadot_node_subsystem::{ jaeger, messages::{ - CandidateBackingMessage, CollatorProtocolMessage, IfDisconnected, NetworkBridgeEvent, - NetworkBridgeTxMessage, ProspectiveParachainsMessage, ProspectiveValidationDataRequest, + CanSecondRequest, CandidateBackingMessage, CollatorProtocolMessage, IfDisconnected, + NetworkBridgeEvent, NetworkBridgeTxMessage, ProspectiveParachainsMessage, + ProspectiveValidationDataRequest, }, overseer, CollatorProtocolSenderTrait, FromOrchestra, OverseerSignal, PerLeafSpan, }; @@ -71,8 +72,8 @@ mod collation; mod metrics; use collation::{ - fetched_collation_sanity_check, CollationEvent, CollationStatus, Collations, FetchedCollation, - PendingCollation, PendingCollationFetch, ProspectiveCandidate, + fetched_collation_sanity_check, BlockedAdvertisement, CollationEvent, CollationStatus, + Collations, FetchedCollation, PendingCollation, PendingCollationFetch, ProspectiveCandidate, }; #[cfg(test)] @@ -427,6 +428,14 @@ struct State { /// Span per relay parent. span_per_relay_parent: HashMap, + /// Advertisements that were accepted as valid by collator protocol but rejected by backing. + /// + /// It's only legal to fetch collations that are either built on top of the root + /// of some fragment tree or have a parent node which represents backed candidate. + /// Otherwise, a validator will keep such advertisement in the memory and re-trigger + /// requests to backing on new backed candidates and activations. + blocked_advertisements: HashMap<(ParaId, Hash), Vec>, + /// Keep track of all fetch collation requests collation_fetches: FuturesUnordered>, @@ -844,7 +853,7 @@ async fn process_incoming_peer_message( }, Versioned::V1(V1::AdvertiseCollation(relay_parent)) => if let Err(err) = - handle_advertisement(ctx.sender(), state, relay_parent, &origin, None).await + handle_advertisement(ctx.sender(), state, relay_parent, origin, None).await { gum::debug!( target: LOG_TARGET, @@ -867,7 +876,7 @@ async fn process_incoming_peer_message( ctx.sender(), state, relay_parent, - &origin, + origin, Some((candidate_hash, parent_head_data_hash)), ) .await @@ -898,21 +907,6 @@ async fn process_incoming_peer_message( } } -async fn is_seconding_allowed( - _sender: &mut Sender, - _relay_parent: Hash, - _candidate_hash: CandidateHash, - _parent_head_data_hash: Hash, - _para_id: ParaId, - _active_leaves: impl IntoIterator, -) -> Option -where - Sender: CollatorProtocolSenderTrait, -{ - // TODO https://github.com/paritytech/polkadot/issues/5923 - Some(true) -} - #[derive(Debug)] enum AdvertisementError { /// Relay parent is unknown. @@ -929,8 +923,6 @@ enum AdvertisementError { SecondedLimitReached, /// Advertisement is invalid. Invalid(InsertAdvertisementError), - /// Failed to query prospective parachains subsystem. - ProspectiveParachainsUnavailable, } impl AdvertisementError { @@ -939,10 +931,93 @@ impl AdvertisementError { match self { InvalidAssignment => Some(COST_WRONG_PARA), RelayParentUnknown | UndeclaredCollator | Invalid(_) => Some(COST_UNEXPECTED_MESSAGE), - UnknownPeer | - ProtocolMismatch | - SecondedLimitReached | - ProspectiveParachainsUnavailable => None, + UnknownPeer | ProtocolMismatch | SecondedLimitReached => None, + } + } +} + +// Requests backing to sanity check the advertisement. +async fn can_second( + sender: &mut Sender, + candidate_para_id: ParaId, + candidate_relay_parent: Hash, + candidate_hash: CandidateHash, + parent_head_data_hash: Hash, +) -> bool +where + Sender: CollatorProtocolSenderTrait, +{ + let request = CanSecondRequest { + candidate_para_id, + candidate_relay_parent, + candidate_hash, + parent_head_data_hash, + }; + let (tx, rx) = oneshot::channel(); + sender.send_message(CandidateBackingMessage::CanSecond(request, tx)).await; + + rx.await.unwrap_or_else(|err| { + gum::warn!( + target: LOG_TARGET, + ?err, + ?candidate_relay_parent, + ?candidate_para_id, + ?candidate_hash, + "CanSecond-request responder was dropped", + ); + + false + }) +} + +/// Checks whether any of the advertisements are unblocked and attempts to fetch them. +async fn request_unblocked_collations(sender: &mut Sender, state: &mut State, blocked: I) +where + Sender: CollatorProtocolSenderTrait, + I: IntoIterator)>, +{ + for (key, mut value) in blocked { + let (para_id, para_head) = key; + let blocked = std::mem::take(&mut value); + for blocked in blocked { + let is_seconding_allowed = can_second( + sender, + para_id, + blocked.candidate_relay_parent, + blocked.candidate_hash, + para_head, + ) + .await; + + if is_seconding_allowed { + let result = enqueue_collation( + sender, + state, + blocked.candidate_relay_parent, + para_id, + blocked.peer_id, + blocked.collator_id, + Some((blocked.candidate_hash, para_head)), + ) + .await; + if let Err(fetch_error) = result { + gum::debug!( + target: LOG_TARGET, + relay_parent = ?blocked.candidate_relay_parent, + para_id = ?para_id, + peer_id = ?blocked.peer_id, + error = %fetch_error, + "Failed to request unblocked collation", + ); + } + } else { + // Keep the advertisement. + value.push(blocked); + } + } + + if !value.is_empty() { + state.blocked_advertisements.insert(key, value); } } } @@ -951,7 +1026,7 @@ async fn handle_advertisement( sender: &mut Sender, state: &mut State, relay_parent: Hash, - peer_id: &PeerId, + peer_id: PeerId, prospective_candidate: Option<(CandidateHash, Hash)>, ) -> std::result::Result<(), AdvertisementError> where @@ -964,7 +1039,7 @@ where let per_relay_parent = state .per_relay_parent - .get_mut(&relay_parent) + .get(&relay_parent) .ok_or(AdvertisementError::RelayParentUnknown)?; let relay_parent_mode = per_relay_parent.prospective_parachains_mode; @@ -981,98 +1056,171 @@ where _ => return Err(AdvertisementError::InvalidAssignment), }; - // TODO: only fetch a collation if it's built on top of backed nodes in fragment tree. - // https://github.com/paritytech/polkadot/issues/5923 - let is_seconding_allowed = match (relay_parent_mode, prospective_candidate) { - (ProspectiveParachainsMode::Disabled, _) => true, - (ProspectiveParachainsMode::Enabled, Some((candidate_hash, parent_head_data_hash))) => { - let active_leaves = state.active_leaves.keys().copied(); - is_seconding_allowed( + if relay_parent_mode.is_enabled() && prospective_candidate.is_none() { + // Expected vstaging advertisement. + return Err(AdvertisementError::ProtocolMismatch) + } + + // Always insert advertisements that pass all the checks for spam protection. + let candidate_hash = prospective_candidate.map(|(hash, ..)| hash); + let (collator_id, para_id) = peer_data + .insert_advertisement( + relay_parent, + relay_parent_mode, + candidate_hash, + &state.implicit_view, + &state.active_leaves, + ) + .map_err(AdvertisementError::Invalid)?; + if !per_relay_parent.collations.is_seconded_limit_reached(relay_parent_mode) { + return Err(AdvertisementError::SecondedLimitReached) + } + + if let Some((candidate_hash, parent_head_data_hash)) = prospective_candidate { + let is_seconding_allowed = !relay_parent_mode.is_enabled() || + can_second( sender, + collator_para_id, relay_parent, candidate_hash, parent_head_data_hash, - collator_para_id, - active_leaves, ) - .await - .ok_or(AdvertisementError::ProspectiveParachainsUnavailable)? - }, - _ => return Err(AdvertisementError::ProtocolMismatch), - }; + .await; - if !is_seconding_allowed { - // TODO - return Ok(()) + if !is_seconding_allowed { + gum::debug!( + target: LOG_TARGET, + relay_parent = ?relay_parent, + para_id = ?para_id, + ?candidate_hash, + "Seconding is not allowed by backing, queueing advertisement", + ); + state + .blocked_advertisements + .entry((collator_para_id, parent_head_data_hash)) + .or_default() + .push(BlockedAdvertisement { + peer_id, + collator_id: collator_id.clone(), + candidate_relay_parent: relay_parent, + candidate_hash, + }); + + return Ok(()) + } } - let candidate_hash = prospective_candidate.map(|(hash, ..)| hash); - let insert_result = peer_data.insert_advertisement( + let result = enqueue_collation( + sender, + state, relay_parent, - relay_parent_mode, - candidate_hash, - &state.implicit_view, - &state.active_leaves, - ); + para_id, + peer_id, + collator_id, + prospective_candidate, + ) + .await; + if let Err(fetch_error) = result { + gum::debug!( + target: LOG_TARGET, + relay_parent = ?relay_parent, + para_id = ?para_id, + peer_id = ?peer_id, + error = %fetch_error, + "Failed to request advertised collation", + ); + } - match insert_result { - Ok((id, para_id)) => { - gum::debug!( + Ok(()) +} + +/// Enqueue collation for fetching. The advertisement is expected to be +/// validated. +async fn enqueue_collation( + sender: &mut Sender, + state: &mut State, + relay_parent: Hash, + para_id: ParaId, + peer_id: PeerId, + collator_id: CollatorId, + prospective_candidate: Option<(CandidateHash, Hash)>, +) -> std::result::Result<(), FetchError> +where + Sender: CollatorProtocolSenderTrait, +{ + gum::debug!( + target: LOG_TARGET, + peer_id = ?peer_id, + %para_id, + ?relay_parent, + "Received advertise collation", + ); + let per_relay_parent = match state.per_relay_parent.get_mut(&relay_parent) { + Some(rp_state) => rp_state, + None => { + // Race happened, not an error. + gum::trace!( target: LOG_TARGET, peer_id = ?peer_id, %para_id, ?relay_parent, - "Received advertise collation", + ?prospective_candidate, + "Candidate relay parent went out of view for valid advertisement", ); - let prospective_candidate = - prospective_candidate.map(|(candidate_hash, parent_head_data_hash)| { - ProspectiveCandidate { candidate_hash, parent_head_data_hash } - }); + return Ok(()) + }, + }; + let relay_parent_mode = per_relay_parent.prospective_parachains_mode; + let prospective_candidate = + prospective_candidate.map(|(candidate_hash, parent_head_data_hash)| ProspectiveCandidate { + candidate_hash, + parent_head_data_hash, + }); - let collations = &mut per_relay_parent.collations; - if !collations.is_seconded_limit_reached(relay_parent_mode) { - return Err(AdvertisementError::SecondedLimitReached) - } + let collations = &mut per_relay_parent.collations; + if !collations.is_seconded_limit_reached(relay_parent_mode) { + gum::trace!( + target: LOG_TARGET, + peer_id = ?peer_id, + %para_id, + ?relay_parent, + "Limit of seconded collations reached for valid advertisement", + ); + return Ok(()) + } - let pending_collation = - PendingCollation::new(relay_parent, para_id, peer_id, prospective_candidate); + let pending_collation = + PendingCollation::new(relay_parent, para_id, &peer_id, prospective_candidate); - match collations.status { - CollationStatus::Fetching | CollationStatus::WaitingOnValidation => { - gum::trace!( - target: LOG_TARGET, - peer_id = ?peer_id, - %para_id, - ?relay_parent, - "Added collation to the pending list" - ); - collations.waiting_queue.push_back((pending_collation, id)); - }, - CollationStatus::Waiting => { - let _ = fetch_collation(sender, state, pending_collation, id).await; - }, - CollationStatus::Seconded if relay_parent_mode.is_enabled() => { - // Limit is not reached, it's allowed to second another - // collation. - let _ = fetch_collation(sender, state, pending_collation, id).await; - }, - CollationStatus::Seconded => { - gum::trace!( - target: LOG_TARGET, - peer_id = ?peer_id, - %para_id, - ?relay_parent, - ?relay_parent_mode, - "A collation has already been seconded", - ); - }, - } + match collations.status { + CollationStatus::Fetching | CollationStatus::WaitingOnValidation => { + gum::trace!( + target: LOG_TARGET, + peer_id = ?peer_id, + %para_id, + ?relay_parent, + "Added collation to the pending list" + ); + collations.waiting_queue.push_back((pending_collation, collator_id)); + }, + CollationStatus::Waiting => { + fetch_collation(sender, state, pending_collation, collator_id).await?; + }, + CollationStatus::Seconded if relay_parent_mode.is_enabled() => { + // Limit is not reached, it's allowed to second another + // collation. + fetch_collation(sender, state, pending_collation, collator_id).await?; }, - Err(InsertAdvertisementError::ProtocolMismatch) => { - // Checked above. - return Err(AdvertisementError::ProtocolMismatch) + CollationStatus::Seconded => { + gum::trace!( + target: LOG_TARGET, + peer_id = ?peer_id, + %para_id, + ?relay_parent, + ?relay_parent_mode, + "A collation has already been seconded", + ); }, - Err(error) => return Err(AdvertisementError::Invalid(error)), } Ok(()) @@ -1168,6 +1316,20 @@ where state.span_per_relay_parent.remove(&removed); } } + // Remove blocked advertisements that left the view. + state.blocked_advertisements.retain(|_, ads| { + ads.retain(|ad| state.per_relay_parent.contains_key(&ad.candidate_relay_parent)); + + !ads.is_empty() + }); + // Re-trigger previously failed requests again. + // + // This makes sense for several reasons, one simple example: if a hypothetical depth + // for an advertisement initially exceeded the limit and the candidate was included + // in a new leaf. + let maybe_unblocked = std::mem::take(&mut state.blocked_advertisements); + // Could be optimized to only sanity check new leaves. + request_unblocked_collations(sender, state, maybe_unblocked).await; for (peer_id, peer_data) in state.peer_data.iter_mut() { peer_data.prune_old_advertisements( @@ -1340,6 +1502,10 @@ async fn process_msg( ); } }, + Backed { para_id, para_head } => { + let maybe_unblocked = state.blocked_advertisements.remove_entry(&(para_id, para_head)); + request_unblocked_collations(ctx.sender(), state, maybe_unblocked).await; + }, Invalid(parent, candidate_receipt) => { let fetched_collation = FetchedCollation::from(&candidate_receipt); let candidate_hash = fetched_collation.candidate_hash; diff --git a/node/network/collator-protocol/src/validator_side/tests/mod.rs b/node/network/collator-protocol/src/validator_side/tests/mod.rs index ffc8796d3450..3499c8639a6d 100644 --- a/node/network/collator-protocol/src/validator_side/tests/mod.rs +++ b/node/network/collator-protocol/src/validator_side/tests/mod.rs @@ -20,7 +20,7 @@ use futures::{executor, future, Future}; use sp_core::{crypto::Pair, Encode}; use sp_keyring::Sr25519Keyring; use sp_keystore::{testing::KeyStore as TestKeyStore, SyncCryptoStore}; -use std::{iter, sync::Arc, task::Poll, time::Duration}; +use std::{iter, sync::Arc, time::Duration}; use polkadot_node_network_protocol::{ our_view, @@ -752,7 +752,7 @@ fn fetch_one_collation_at_a_time() { test_helpers::Yield::new().await; // Second collation is not requested since there's already seconded one. - assert_matches!(futures::poll!(virtual_overseer.recv().boxed()), Poll::Pending); + assert_matches!(virtual_overseer.recv().now_or_never(), None); virtual_overseer }) diff --git a/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs b/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs index b854ae202014..3b25f9203fc9 100644 --- a/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs +++ b/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs @@ -78,7 +78,7 @@ async fn update_view( test_state: &TestState, new_view: Vec<(Hash, u32)>, // Hash and block number. activated: u8, // How many new heads does this update contain? -) { +) -> Option { let new_view: HashMap = HashMap::from_iter(new_view); let our_view = @@ -185,6 +185,7 @@ async fn update_view( .await; } } + next_overseer_message } async fn send_seconded_statement( @@ -273,9 +274,8 @@ fn v1_advertisement_rejected() { advertise_collation(&mut virtual_overseer, peer_a, head_b, None).await; // Not reported. - assert!(overseer_recv_with_timeout(&mut virtual_overseer, Duration::from_millis(50)) - .await - .is_none()); + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); virtual_overseer }); @@ -333,6 +333,18 @@ fn accept_advertisements_from_implicit_view() { Some((candidate_hash, parent_head_data_hash)), ) .await; + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateBacking( + CandidateBackingMessage::CanSecond(request, tx), + ) => { + assert_eq!(request.candidate_hash, candidate_hash); + assert_eq!(request.candidate_para_id, test_state.chain_ids[1]); + assert_eq!(request.parent_head_data_hash, parent_head_data_hash); + tx.send(true).expect("receiving side should be alive"); + } + ); + assert_fetch_collation_request( &mut virtual_overseer, head_c, @@ -348,6 +360,18 @@ fn accept_advertisements_from_implicit_view() { Some((candidate_hash, parent_head_data_hash)), ) .await; + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateBacking( + CandidateBackingMessage::CanSecond(request, tx), + ) => { + assert_eq!(request.candidate_hash, candidate_hash); + assert_eq!(request.candidate_para_id, test_state.chain_ids[0]); + assert_eq!(request.parent_head_data_hash, parent_head_data_hash); + tx.send(true).expect("receiving side should be alive"); + } + ); + assert_fetch_collation_request( &mut virtual_overseer, head_d, @@ -417,6 +441,17 @@ fn second_multiple_candidates_per_relay_parent() { Some((candidate_hash, parent_head_data_hash)), ) .await; + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateBacking( + CandidateBackingMessage::CanSecond(request, tx), + ) => { + assert_eq!(request.candidate_hash, candidate_hash); + assert_eq!(request.candidate_para_id, test_state.chain_ids[0]); + assert_eq!(request.parent_head_data_hash, parent_head_data_hash); + tx.send(true).expect("receiving side should be alive"); + } + ); let response_channel = assert_fetch_collation_request( &mut virtual_overseer, @@ -496,9 +531,8 @@ fn second_multiple_candidates_per_relay_parent() { ) .await; - assert!(overseer_recv_with_timeout(&mut virtual_overseer, Duration::from_millis(50)) - .await - .is_none()); + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); virtual_overseer }); @@ -559,6 +593,17 @@ fn fetched_collation_sanity_check() { Some((candidate_hash, parent_head_data_hash)), ) .await; + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateBacking( + CandidateBackingMessage::CanSecond(request, tx), + ) => { + assert_eq!(request.candidate_hash, candidate_hash); + assert_eq!(request.candidate_para_id, test_state.chain_ids[0]); + assert_eq!(request.parent_head_data_hash, parent_head_data_hash); + tx.send(true).expect("receiving side should be alive"); + } + ); let response_channel = assert_fetch_collation_request( &mut virtual_overseer, @@ -604,3 +649,345 @@ fn fetched_collation_sanity_check() { virtual_overseer }); } + +#[test] +fn advertisement_spam_protection() { + let test_state = TestState::default(); + + test_harness(|test_harness| async move { + let TestHarness { mut virtual_overseer, .. } = test_harness; + + let pair_a = CollatorPair::generate().0; + + let head_b = Hash::from_low_u64_be(128); + let head_b_num: u32 = 2; + + let head_c = get_parent_hash(head_b); + + // Activated leaf is `b`, but the collation will be based on `c`. + update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + + let peer_a = PeerId::random(); + connect_and_declare_collator( + &mut virtual_overseer, + peer_a, + pair_a.clone(), + test_state.chain_ids[1], + CollationVersion::VStaging, + ) + .await; + + let candidate_hash = CandidateHash::default(); + let parent_head_data_hash = Hash::zero(); + advertise_collation( + &mut virtual_overseer, + peer_a, + head_c, + Some((candidate_hash, parent_head_data_hash)), + ) + .await; + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateBacking( + CandidateBackingMessage::CanSecond(request, tx), + ) => { + assert_eq!(request.candidate_hash, candidate_hash); + assert_eq!(request.candidate_para_id, test_state.chain_ids[1]); + assert_eq!(request.parent_head_data_hash, parent_head_data_hash); + // Reject it. + tx.send(false).expect("receiving side should be alive"); + } + ); + + // Send the same advertisement again. + advertise_collation( + &mut virtual_overseer, + peer_a, + head_c, + Some((candidate_hash, parent_head_data_hash)), + ) + .await; + // Reported. + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::NetworkBridgeTx( + NetworkBridgeTxMessage::ReportPeer(peer_id, rep), + ) => { + assert_eq!(peer_a, peer_id); + assert_eq!(rep, COST_UNEXPECTED_MESSAGE); + } + ); + + virtual_overseer + }); +} + +#[test] +fn backed_candidate_unblocks_advertisements() { + let test_state = TestState::default(); + + test_harness(|test_harness| async move { + let TestHarness { mut virtual_overseer, .. } = test_harness; + + let pair_a = CollatorPair::generate().0; + let pair_b = CollatorPair::generate().0; + + let head_b = Hash::from_low_u64_be(128); + let head_b_num: u32 = 2; + + let head_c = get_parent_hash(head_b); + // Grandparent of head `b`. + // Group rotation frequency is 1 by default, at `d` we're assigned + // to the first para. + let head_d = get_parent_hash(head_c); + + // Activated leaf is `b`, but the collation will be based on `c`. + update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + + // Accept both collators from the implicit view. + connect_and_declare_collator( + &mut virtual_overseer, + peer_a, + pair_a.clone(), + test_state.chain_ids[0], + CollationVersion::VStaging, + ) + .await; + connect_and_declare_collator( + &mut virtual_overseer, + peer_b, + pair_b.clone(), + test_state.chain_ids[1], + CollationVersion::VStaging, + ) + .await; + + let candidate_hash = CandidateHash::default(); + let parent_head_data_hash = Hash::zero(); + advertise_collation( + &mut virtual_overseer, + peer_b, + head_c, + Some((candidate_hash, parent_head_data_hash)), + ) + .await; + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateBacking( + CandidateBackingMessage::CanSecond(request, tx), + ) => { + assert_eq!(request.candidate_hash, candidate_hash); + assert_eq!(request.candidate_para_id, test_state.chain_ids[1]); + assert_eq!(request.parent_head_data_hash, parent_head_data_hash); + // Reject it. + tx.send(false).expect("receiving side should be alive"); + } + ); + + // Advertise with different para. + advertise_collation( + &mut virtual_overseer, + peer_a, + head_d, // Note different relay parent. + Some((candidate_hash, parent_head_data_hash)), + ) + .await; + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateBacking( + CandidateBackingMessage::CanSecond(request, tx), + ) => { + assert_eq!(request.candidate_hash, candidate_hash); + assert_eq!(request.candidate_para_id, test_state.chain_ids[0]); + assert_eq!(request.parent_head_data_hash, parent_head_data_hash); + tx.send(false).expect("receiving side should be alive"); + } + ); + + overseer_send( + &mut virtual_overseer, + CollatorProtocolMessage::Backed { + para_id: test_state.chain_ids[0], + para_head: parent_head_data_hash, + }, + ) + .await; + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateBacking( + CandidateBackingMessage::CanSecond(request, tx), + ) => { + assert_eq!(request.candidate_hash, candidate_hash); + assert_eq!(request.candidate_para_id, test_state.chain_ids[0]); + assert_eq!(request.parent_head_data_hash, parent_head_data_hash); + tx.send(true).expect("receiving side should be alive"); + } + ); + assert_fetch_collation_request( + &mut virtual_overseer, + head_d, + test_state.chain_ids[0], + Some(candidate_hash), + ) + .await; + virtual_overseer + }); +} + +#[test] +fn active_leave_unblocks_advertisements() { + let mut test_state = TestState::default(); + test_state.group_rotation_info.group_rotation_frequency = 100; + + test_harness(|test_harness| async move { + let TestHarness { mut virtual_overseer, .. } = test_harness; + + let head_b = Hash::from_low_u64_be(128); + let head_b_num: u32 = 0; + + update_view(&mut virtual_overseer, &test_state, vec![(head_b, head_b_num)], 1).await; + + let peers: Vec = (0..3).map(|_| CollatorPair::generate().0).collect(); + let peer_ids: Vec = (0..3).map(|_| PeerId::random()).collect(); + let candidates: Vec = + (0u8..3).map(|i| CandidateHash(Hash::repeat_byte(i))).collect(); + + for (peer, peer_id) in peers.iter().zip(&peer_ids) { + connect_and_declare_collator( + &mut virtual_overseer, + *peer_id, + peer.clone(), + test_state.chain_ids[0], + CollationVersion::VStaging, + ) + .await; + } + + let parent_head_data_hash = Hash::zero(); + for (peer, candidate) in peer_ids.iter().zip(&candidates).take(2) { + advertise_collation( + &mut virtual_overseer, + *peer, + head_b, + Some((*candidate, parent_head_data_hash)), + ) + .await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateBacking( + CandidateBackingMessage::CanSecond(request, tx), + ) => { + assert_eq!(request.candidate_hash, *candidate); + assert_eq!(request.candidate_para_id, test_state.chain_ids[0]); + assert_eq!(request.parent_head_data_hash, parent_head_data_hash); + // Send false. + tx.send(false).expect("receiving side should be alive"); + } + ); + } + + let head_c = Hash::from_low_u64_be(127); + let head_c_num: u32 = 1; + + let next_overseer_message = + update_view(&mut virtual_overseer, &test_state, vec![(head_c, head_c_num)], 1) + .await + .expect("should've sent request to backing"); + + // Unblock first request. + assert_matches!( + next_overseer_message, + AllMessages::CandidateBacking( + CandidateBackingMessage::CanSecond(request, tx), + ) => { + assert_eq!(request.candidate_hash, candidates[0]); + assert_eq!(request.candidate_para_id, test_state.chain_ids[0]); + assert_eq!(request.parent_head_data_hash, parent_head_data_hash); + tx.send(true).expect("receiving side should be alive"); + } + ); + + assert_fetch_collation_request( + &mut virtual_overseer, + head_b, + test_state.chain_ids[0], + Some(candidates[0]), + ) + .await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateBacking( + CandidateBackingMessage::CanSecond(request, tx), + ) => { + assert_eq!(request.candidate_hash, candidates[1]); + assert_eq!(request.candidate_para_id, test_state.chain_ids[0]); + assert_eq!(request.parent_head_data_hash, parent_head_data_hash); + tx.send(false).expect("receiving side should be alive"); + } + ); + + // Collation request was discarded. + test_helpers::Yield::new().await; + assert_matches!(virtual_overseer.recv().now_or_never(), None); + + advertise_collation( + &mut virtual_overseer, + peer_ids[2], + head_c, + Some((candidates[2], parent_head_data_hash)), + ) + .await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateBacking( + CandidateBackingMessage::CanSecond(request, tx), + ) => { + assert_eq!(request.candidate_hash, candidates[2]); + tx.send(false).expect("receiving side should be alive"); + } + ); + + let head_d = Hash::from_low_u64_be(126); + let head_d_num: u32 = 2; + + let next_overseer_message = + update_view(&mut virtual_overseer, &test_state, vec![(head_d, head_d_num)], 1) + .await + .expect("should've sent request to backing"); + + // Reject 2, accept 3. + assert_matches!( + next_overseer_message, + AllMessages::CandidateBacking( + CandidateBackingMessage::CanSecond(request, tx), + ) => { + assert_eq!(request.candidate_hash, candidates[1]); + tx.send(false).expect("receiving side should be alive"); + } + ); + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::CandidateBacking( + CandidateBackingMessage::CanSecond(request, tx), + ) => { + assert_eq!(request.candidate_hash, candidates[2]); + tx.send(true).expect("receiving side should be alive"); + } + ); + assert_fetch_collation_request( + &mut virtual_overseer, + head_c, + test_state.chain_ids[0], + Some(candidates[2]), + ) + .await; + + virtual_overseer + }); +} diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 84dfcbed8af2..db45fb4b6bdc 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -61,10 +61,18 @@ use std::{ pub mod network_bridge_event; pub use network_bridge_event::NetworkBridgeEvent; -/// Subsystem messages where each message is always bound to a relay parent. -pub trait BoundToRelayParent { - /// Returns the relay parent this message is bound to. - fn relay_parent(&self) -> Hash; +/// A request to the candidate backing subsystem to check whether +/// there exists vacant membership in some fragment tree. +#[derive(Debug, Copy, Clone)] +pub struct CanSecondRequest { + /// Para id of the candidate. + pub candidate_para_id: ParaId, + /// The relay-parent of the candidate. + pub candidate_relay_parent: Hash, + /// Hash of the candidate. + pub candidate_hash: CandidateHash, + /// Parent head data hash. + pub parent_head_data_hash: Hash, } /// Messages received by the Candidate Backing subsystem. @@ -73,6 +81,13 @@ pub enum CandidateBackingMessage { /// Requests a set of backable candidates that could be backed in a child of the given /// relay-parent, referenced by its hash. GetBackedCandidates(Hash, Vec, oneshot::Sender>), + /// Request the subsystem to check whether it's allowed to second given candidate. + /// The rule is to only fetch collations that are either built on top of the root + /// of some fragment tree or have a parent node which represents backed candidate. + /// + /// Always responses with `false` if async backing is disabled for candidate's relay + /// parent. + CanSecond(CanSecondRequest, oneshot::Sender), /// Note that the Candidate Backing subsystem should second the given candidate in the context of the /// given relay-parent (ref. by hash). This candidate must be validated. Second(Hash, CandidateReceipt, PersistedValidationData, PoV), @@ -81,16 +96,6 @@ pub enum CandidateBackingMessage { Statement(Hash, SignedFullStatementWithPVD), } -impl BoundToRelayParent for CandidateBackingMessage { - fn relay_parent(&self) -> Hash { - match self { - Self::GetBackedCandidates(hash, _, _) => *hash, - Self::Second(hash, _, _, _) => *hash, - Self::Statement(hash, _) => *hash, - } - } -} - /// Blanket error for validation failing for internal reasons. #[derive(Debug, Error)] #[error("Validation failed with {0:?}")] @@ -219,6 +224,13 @@ pub enum CollatorProtocolMessage { /// /// The hash is the relay parent. Seconded(Hash, SignedFullStatement), + /// The candidate received enough validity votes from the backing group. + Backed { + /// Candidate's para id. + para_id: ParaId, + /// Hash of the para head generated by candidate. + para_head: Hash, + }, } impl Default for CollatorProtocolMessage { @@ -227,12 +239,6 @@ impl Default for CollatorProtocolMessage { } } -impl BoundToRelayParent for CollatorProtocolMessage { - fn relay_parent(&self) -> Hash { - Default::default() - } -} - /// Messages received by the dispute coordinator subsystem. /// /// NOTE: Any response oneshots might get cancelled if the `DisputeCoordinator` was not yet @@ -488,12 +494,6 @@ impl BitfieldDistributionMessage { #[derive(Debug)] pub enum BitfieldSigningMessage {} -impl BoundToRelayParent for BitfieldSigningMessage { - fn relay_parent(&self) -> Hash { - match *self {} - } -} - /// Availability store subsystem message. #[derive(Debug)] pub enum AvailabilityStoreMessage { @@ -793,15 +793,6 @@ pub enum ProvisionerMessage { ProvisionableData(Hash, ProvisionableData), } -impl BoundToRelayParent for ProvisionerMessage { - fn relay_parent(&self) -> Hash { - match self { - Self::RequestInherentData(hash, _) => *hash, - Self::ProvisionableData(hash, _) => *hash, - } - } -} - /// Message to the Collation Generation subsystem. #[derive(Debug)] pub enum CollationGenerationMessage { diff --git a/node/subsystem-util/src/backing_implicit_view.rs b/node/subsystem-util/src/backing_implicit_view.rs index d3734e73c14f..4d38657f5bbf 100644 --- a/node/subsystem-util/src/backing_implicit_view.rs +++ b/node/subsystem-util/src/backing_implicit_view.rs @@ -377,7 +377,7 @@ where ancestry } else { - Vec::new() + vec![leaf_hash] }; let fetched_ancestry = FetchSummary { @@ -709,4 +709,31 @@ mod tests { view.deactivate_leaf(*leaf_a); assert!(view.block_info_storage.is_empty()); } + + #[test] + fn genesis_ancestry() { + let pool = TaskExecutor::new(); + let (mut ctx, mut ctx_handle) = make_subsystem_context::(pool); + + let mut view = View::default(); + + const PARA_A_MIN_PARENT: u32 = 0; + + let prospective_response = vec![(PARA_A, PARA_A_MIN_PARENT)]; + let fut = view.activate_leaf(ctx.sender(), GENESIS_HASH).timeout(TIMEOUT).map(|res| { + let paras = res.expect("`activate_leaf` timed out").unwrap(); + assert_eq!(paras, vec![PARA_A]); + }); + let overseer_fut = async { + assert_min_relay_parents_request(&mut ctx_handle, &GENESIS_HASH, prospective_response) + .await; + assert_block_header_requests(&mut ctx_handle, &[GENESIS_HASH], &[GENESIS_HASH]).await; + }; + futures::executor::block_on(join(fut, overseer_fut)); + + assert_matches!( + view.known_allowed_relay_parents_under(&GENESIS_HASH, None), + Some(hashes) if !hashes.is_empty() + ); + } } From f062f3c86d943a38c25077ce8c15197cc78521cf Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Sun, 13 Nov 2022 22:29:52 +0400 Subject: [PATCH 20/76] cargo generate-lockfile --- Cargo.lock | 2374 +++++++++++++++++++++++++++------------------------- 1 file changed, 1213 insertions(+), 1161 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6a87bcc1b714..762220d287fe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -33,7 +33,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b613b8e1e3cf911a086f53f03bf286f52fd7a7258e4fa606f0ef220d39d8877" dependencies = [ - "generic-array 0.14.4", + "generic-array 0.14.6", ] [[package]] @@ -68,16 +68,16 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.8", "once_cell", "version_check", ] [[package]] name = "aho-corasick" -version = "0.7.18" +version = "0.7.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e" dependencies = [ "memchr", ] @@ -88,6 +88,15 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fbf688625d06217d5b1bb0ea9d9c44a1635fd0ee3534466388d18203174f4d11" +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + [[package]] name = "ansi_term" version = "0.12.1" @@ -105,18 +114,18 @@ checksum = "216261ddc8289130e551ddcd5ce8a064710c0d064a4d2895c67151c92b5443f6" [[package]] name = "approx" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "072df7202e63b127ab55acfe16ce97013d5b97bf160489336d3f1840fd78e99e" +checksum = "cab112f0a86d568ea0e627cc1d6be74a1e9cd55214684db5561995f6dad897c6" dependencies = [ "num-traits", ] [[package]] name = "arbitrary" -version = "1.0.3" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "510c76ecefdceada737ea728f4f9a84bd2e1ef29f1ba555e560940fe279954de" +checksum = "29d47fbf90d5149a107494b15a7dc8d69b351be2db3bb9691740e88ec17fd880" [[package]] name = "array-bytes" @@ -150,11 +159,11 @@ checksum = "e22d1f4b888c298a027c99dc9048015fac177587de20fc30232a057dfbe24a21" [[package]] name = "assert_cmd" -version = "2.0.4" +version = "2.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93ae1ddd39efd67689deb1979d80bad3bf7f2b09c6e6117c8d1f2443b5e2f83e" +checksum = "ba45b8163c49ab5f972e59a8a5a03b6d2972619d486e19ec9fe744f7c2753d3c" dependencies = [ - "bstr", + "bstr 1.0.1", "doc-comment", "predicates", "predicates-core", @@ -180,56 +189,56 @@ dependencies = [ [[package]] name = "async-channel" -version = "1.6.1" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2114d64672151c0c5eaa5e131ec84a74f06e1e559830dabba01ca30605d66319" +checksum = "e14485364214912d3b19cc3435dde4df66065127f05fa0d75c712f36f12c2f28" dependencies = [ - "concurrent-queue", + "concurrent-queue 1.2.4", "event-listener", "futures-core", ] [[package]] name = "async-executor" -version = "1.4.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "871f9bb5e0a22eeb7e8cf16641feb87c9dc67032ccf8ff49e772eb9941d3a965" +checksum = "17adb73da160dfb475c183343c8cccd80721ea5a605d3eb57125f0a7b7a92d0b" dependencies = [ + "async-lock", "async-task", - "concurrent-queue", + "concurrent-queue 2.0.0", "fastrand", "futures-lite", - "once_cell", "slab", ] [[package]] name = "async-global-executor" -version = "2.0.2" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9586ec52317f36de58453159d48351bc244bc24ced3effc1fce22f3d48664af6" +checksum = "f1b6f5d7df27bd294849f8eec66ecfc63d11814df7a4f5d74168a2394467b776" dependencies = [ "async-channel", "async-executor", "async-io", - "async-mutex", + "async-lock", "blocking", "futures-lite", - "num_cpus", "once_cell", ] [[package]] name = "async-io" -version = "1.6.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a811e6a479f2439f0c04038796b5cfb3d2ad56c230e0f2d3f7b04d68cfee607b" +checksum = "e8121296a9f05be7f34aa4196b1747243b3b62e048bb7906f644f3fbfc490cf7" dependencies = [ - "concurrent-queue", + "async-lock", + "autocfg", + "concurrent-queue 1.2.4", "futures-lite", "libc", "log", - "once_cell", "parking", "polling", "slab", @@ -240,29 +249,22 @@ dependencies = [ [[package]] name = "async-lock" -version = "2.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6a8ea61bf9947a1007c5cada31e647dbc77b103c679858150003ba697ea798b" -dependencies = [ - "event-listener", -] - -[[package]] -name = "async-mutex" -version = "1.4.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "479db852db25d9dbf6204e6cb6253698f175c15726470f78af0d918e99d6156e" +checksum = "c8101efe8695a6c17e02911402145357e718ac92d3ff88ae8419e84b1707b685" dependencies = [ "event-listener", + "futures-lite", ] [[package]] name = "async-process" -version = "1.3.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83137067e3a2a6a06d67168e49e68a0957d215410473a740cea95a2425c0b7c6" +checksum = "02111fd8655a613c25069ea89fc8d9bb89331fa77486eb3bc059ee757cfa481c" dependencies = [ "async-io", + "autocfg", "blocking", "cfg-if", "event-listener", @@ -295,7 +297,7 @@ dependencies = [ "log", "memchr", "once_cell", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.9", "pin-utils", "slab", "wasm-bindgen-futures", @@ -318,15 +320,15 @@ dependencies = [ [[package]] name = "async-task" -version = "4.0.3" +version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e91831deabf0d6d7ec49552e489aed63b7456a7a3c46cff62adad428110b0af0" +checksum = "7a40729d2133846d9ed0ea60a8b9541bccddab49cd30f0715a1da672fe9a2524" [[package]] name = "async-trait" -version = "0.1.57" +version = "0.1.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76464446b8bc32758d7e88ee1a804d9914cd9b1cb264c029899680b0be29826f" +checksum = "1e805d94e6b5001b651426cf4cd446b1ab5f319d27bab5c644f61de0a804360c" dependencies = [ "proc-macro2", "quote", @@ -335,15 +337,15 @@ dependencies = [ [[package]] name = "asynchronous-codec" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0de5164e5edbf51c45fb8c2d9664ae1c095cce1b265ecf7569093c0d66ef690" +checksum = "06a0daa378f5fd10634e44b0a29b2a87b890657658e072a30d6f26e57ddee182" dependencies = [ "bytes", "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.9", ] [[package]] @@ -365,30 +367,30 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "backtrace" -version = "0.3.64" +version = "0.3.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e121dee8023ce33ab248d9ce1493df03c3b38a659b240096fcbd7048ff9c31f" +checksum = "cab84319d616cfb654d03394f38ab7e6f0919e181b1b57e1fd15e7fb4077d9a7" dependencies = [ "addr2line", "cc", "cfg-if", "libc", "miniz_oxide", - "object 0.27.1", + "object", "rustc-demangle", ] [[package]] name = "base-x" -version = "0.2.8" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" +checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" [[package]] name = "base16ct" @@ -404,21 +406,21 @@ checksum = "6107fe1be6682a68940da878d9e9f5e90ca5745b3dec9fd1bb393c8777d4f581" [[package]] name = "base64" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64ct" -version = "1.5.2" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea2b2456fd614d856680dcd9fcc660a51a820fa09daef2e49772b56a193c8474" +checksum = "b645a089122eccb6111b4f81cbc1a49f5900ac4666bb93ac027feaecf15607bf" [[package]] name = "beef" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bed554bd50246729a1ec158d08aa3235d1b69d94ad120ebe187e28894787e736" +checksum = "3a8241f3ebb85c056b509d4327ad0358fbbba6ffb340bf388f26350aeda225b1" dependencies = [ "serde", ] @@ -426,7 +428,7 @@ dependencies = [ [[package]] name = "beefy-gadget" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "array-bytes", "async-trait", @@ -435,7 +437,7 @@ dependencies = [ "futures", "futures-timer", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parking_lot 0.12.1", "sc-chain-spec", "sc-client-api", @@ -463,14 +465,14 @@ dependencies = [ [[package]] name = "beefy-gadget-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "beefy-gadget", "beefy-primitives", "futures", "jsonrpsee", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parking_lot 0.12.1", "sc-rpc", "sc-utils", @@ -483,7 +485,7 @@ dependencies = [ [[package]] name = "beefy-merkle-tree" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "beefy-primitives", "sp-api", @@ -493,9 +495,9 @@ dependencies = [ [[package]] name = "beefy-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "serde", "sp-api", @@ -567,11 +569,11 @@ dependencies = [ [[package]] name = "blake2" -version = "0.10.4" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9cf849ee05b2ee5fba5e36f97ff8ec2533916700fc0758d40d92136a42f3388" +checksum = "b12e5fd123190ce1c2e559308a94c9bacad77907d4c6005d9e58fe1a0689e55e" dependencies = [ - "digest 0.10.3", + "digest 0.10.5", ] [[package]] @@ -607,7 +609,7 @@ dependencies = [ "cc", "cfg-if", "constant_time_eq", - "digest 0.10.3", + "digest 0.10.5", ] [[package]] @@ -628,16 +630,16 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4152116fd6e9dadb291ae18fc1ec3575ed6d84c29642d97890f4b4a3417297e4" dependencies = [ - "generic-array 0.14.4", + "generic-array 0.14.6", ] [[package]] name = "block-buffer" -version = "0.10.0" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1d36a02058e76b040de25a4464ba1c80935655595b661505c8b39b664828b95" +checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" dependencies = [ - "generic-array 0.14.4", + "generic-array 0.14.6", ] [[package]] @@ -651,9 +653,9 @@ dependencies = [ [[package]] name = "blocking" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046e47d4b2d391b1f6f8b407b1deb8dee56c1852ccd868becf2710f601b5f427" +checksum = "c6ccb65d468978a086b69884437ded69a90faab3bbe6e67f242173ea728acccc" dependencies = [ "async-channel", "async-task", @@ -684,9 +686,19 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba3569f383e8f1598449f1a423e72e99569137b47740b1da11ef19af3d5c3223" dependencies = [ - "lazy_static", "memchr", +] + +[[package]] +name = "bstr" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fca0852af221f458706eb0725c03e4ed6c46af9ac98e6a689d5e634215d594dd" +dependencies = [ + "memchr", + "once_cell", "regex-automata", + "serde", ] [[package]] @@ -700,15 +712,15 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.8.0" +version = "3.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f1e260c3a9040a7c19a12468758f4c16f31a81a1fe087482be9570ec864bb6c" +checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" [[package]] name = "byte-slice-cast" -version = "1.2.0" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d30c751592b77c499e7bce34d99d67c2c11bdc0574e9a488ddade14150a4698" +checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "byte-tools" @@ -741,15 +753,15 @@ dependencies = [ [[package]] name = "cache-padded" -version = "1.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "631ae5198c9be5e753e5cc215e1bd73c2b466a3565173db433f52bb9d3e66dba" +checksum = "c1db59621ec70f09c5e9b597b220c7a2b43611f4710dc03ceb8748637775692c" [[package]] name = "camino" -version = "1.0.5" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52d74260d9bf6944e2208aa46841b4b8f0d7ffc0849a06837b2f510337f86b2b" +checksum = "88ad0e1e3e88dd237a156ab9f571021b8a158caa0ae44b1968a241efb5144c1e" dependencies = [ "serde", ] @@ -771,16 +783,16 @@ checksum = "4acbb09d9ee8e23699b9634375c72795d095bf268439da88562cf9b501f181fa" dependencies = [ "camino", "cargo-platform", - "semver 1.0.4", + "semver 1.0.14", "serde", "serde_json", ] [[package]] name = "cc" -version = "1.0.73" +version = "1.0.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" +checksum = "76a284da2e6fe2092f2353e51713435363112dfd60030e22add80be333fb928f" dependencies = [ "jobserver", ] @@ -842,14 +854,16 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.19" +version = "0.4.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" dependencies = [ - "libc", + "iana-time-zone", + "js-sys", "num-integer", "num-traits", "time", + "wasm-bindgen", "winapi", ] @@ -872,7 +886,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ee52072ec15386f770805afd189a01c8841be8696bed250fa2f13c4c0d6dfb7" dependencies = [ - "generic-array 0.14.4", + "generic-array 0.14.6", ] [[package]] @@ -886,9 +900,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa66045b9cb23c2e9c1520732030608b02ee07e5cfaa5a521ec15ded7fa24c90" +checksum = "fa2e27ae6ab525c3d369ded447057bca5438d86dc3a68f6faafb8269ba82ebf3" dependencies = [ "glob", "libc", @@ -897,9 +911,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.0.15" +version = "4.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bf8832993da70a4c6d13c581f4463c2bdda27b9bf1c5498dc4365543abe6d6f" +checksum = "0eb41c13df48950b20eb4cd0eefa618819469df1bffc49d11e8487c4ba0037e5" dependencies = [ "atty", "bitflags", @@ -912,9 +926,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.0.13" +version = "4.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c42f169caba89a7d512b5418b09864543eeb4d497416c917d7137863bd2076ad" +checksum = "0177313f9f02afc995627906bbd8967e2be069f5261954222dac78290c2b9014" dependencies = [ "heck", "proc-macro-error", @@ -956,9 +970,9 @@ dependencies = [ [[package]] name = "color-eyre" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ebf286c900a6d5867aeff75cfee3192857bb7f24b547d4f0df2ed6baa812c90" +checksum = "5a667583cca8c4f8436db8de46ea8233c42a7d9ae424a82d338f2e4675229204" dependencies = [ "backtrace", "eyre", @@ -969,9 +983,9 @@ dependencies = [ [[package]] name = "comfy-table" -version = "6.0.0" +version = "6.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "121d8a5b0346092c18a4b2fd6f620d7a06f0eb7ac0a45860939a0884bc579c56" +checksum = "1090f39f45786ec6dc6286f8ea9c75d0a7ef0a0d3cda674cef0c3af7b307fbc2" dependencies = [ "strum", "strum_macros", @@ -980,18 +994,27 @@ dependencies = [ [[package]] name = "concurrent-queue" -version = "1.2.2" +version = "1.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ed07550be01594c6026cff2a1d7fe9c8f683caa798e12b68694ac9e88286a3" +checksum = "af4780a44ab5696ea9e28294517f1fffb421a83a25af521333c838635509db9c" dependencies = [ "cache-padded", ] +[[package]] +name = "concurrent-queue" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd7bef69dc86e3c610e4e7aed41035e2a7ed12e72dd7530f61327a6579a4390b" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "const-oid" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "722e23542a15cea1f65d4a1419c4cfd7a26706c70871a13a04238ca3f40f1661" +checksum = "cec318a675afcb6a1ea1d4340e2d377e56e47c266f28043ceccbf4412ddfdd3b" [[package]] name = "constant_time_eq" @@ -1007,9 +1030,9 @@ checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" [[package]] name = "core-foundation" -version = "0.9.2" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6888e10551bb93e424d8df1d07f1a8b4fceb0001a3a4b048bfc47554946f47b3" +checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" dependencies = [ "core-foundation-sys", "libc", @@ -1032,36 +1055,36 @@ dependencies = [ [[package]] name = "cpp_demangle" -version = "0.3.4" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "931ab2a3e6330a07900b8e7ca4e106cdcbb93f2b9a52df55e54ee53d8305b55d" +checksum = "eeaa953eaad386a53111e47172c2fedba671e5684c8dd601a5f474f4f118710f" dependencies = [ "cfg-if", ] [[package]] name = "cpufeatures" -version = "0.2.1" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469" +checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" dependencies = [ "libc", ] [[package]] name = "cranelift-bforest" -version = "0.88.0" +version = "0.88.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b27bbd3e6c422cf6282b047bcdd51ecd9ca9f3497a3be0132ffa08e509b824b0" +checksum = "52056f6d0584484b57fa6c1a65c1fcb15f3780d8b6a758426d9e3084169b2ddd" dependencies = [ "cranelift-entity", ] [[package]] name = "cranelift-codegen" -version = "0.88.0" +version = "0.88.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "872f5d4557a411b087bd731df6347c142ae1004e6467a144a7e33662e5715a01" +checksum = "18fed94c8770dc25d01154c3ffa64ed0b3ba9d583736f305fed7beebe5d9cf74" dependencies = [ "arrayvec 0.7.2", "bumpalo", @@ -1079,33 +1102,33 @@ dependencies = [ [[package]] name = "cranelift-codegen-meta" -version = "0.88.0" +version = "0.88.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b49fdebb29c62c1fc4da1eeebd609e9d530ecde24a9876def546275f73a244" +checksum = "1c451b81faf237d11c7e4f3165eeb6bac61112762c5cfe7b4c0fb7241474358f" dependencies = [ "cranelift-codegen-shared", ] [[package]] name = "cranelift-codegen-shared" -version = "0.88.0" +version = "0.88.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc0c091e2db055d4d7f6b7cec2d2ead286bcfaea3357c6a52c2a2613a8cb5ac" +checksum = "e7c940133198426d26128f08be2b40b0bd117b84771fd36798969c4d712d81fc" [[package]] name = "cranelift-entity" -version = "0.88.0" +version = "0.88.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "354a9597be87996c9b278655e68b8447f65dd907256855ad773864edee8d985c" +checksum = "87a0f1b2fdc18776956370cf8d9b009ded3f855350c480c1c52142510961f352" dependencies = [ "serde", ] [[package]] name = "cranelift-frontend" -version = "0.88.0" +version = "0.88.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cd8dd3fb8b82c772f4172e87ae1677b971676fffa7c4e3398e3047e650a266b" +checksum = "34897538b36b216cc8dd324e73263596d51b8cf610da6498322838b2546baf8a" dependencies = [ "cranelift-codegen", "log", @@ -1115,15 +1138,15 @@ dependencies = [ [[package]] name = "cranelift-isle" -version = "0.88.0" +version = "0.88.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b82527802b1f7d8da288adc28f1dc97ea52943f5871c041213f7b5035ac698a7" +checksum = "1b2629a569fae540f16a76b70afcc87ad7decb38dc28fa6c648ac73b51e78470" [[package]] name = "cranelift-native" -version = "0.88.0" +version = "0.88.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c30ba8b910f1be023af0c39109cb28a8809734942a6b3eecbf2de8993052ea5e" +checksum = "20937dab4e14d3e225c5adfc9c7106bafd4ac669bdb43027b911ff794c6fb318" dependencies = [ "cranelift-codegen", "libc", @@ -1132,9 +1155,9 @@ dependencies = [ [[package]] name = "cranelift-wasm" -version = "0.88.0" +version = "0.88.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "776a8916d201894aca9637a20814f1e11abc62acd5cfbe0b4eb2e63922756971" +checksum = "80fc2288957a94fd342a015811479de1837850924166d1f1856d8406e6f3609b" dependencies = [ "cranelift-codegen", "cranelift-entity", @@ -1148,18 +1171,18 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.3.0" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "738c290dfaea84fc1ca15ad9c168d083b05a714e1efddd8edaab678dc28d2836" +checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ "cfg-if", ] [[package]] name = "crossbeam-channel" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c02a4d71819009c192cf4872265391563fd6a84c81ff2c0f2a7026ca4c1d85c" +checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" dependencies = [ "cfg-if", "crossbeam-utils", @@ -1167,9 +1190,9 @@ dependencies = [ [[package]] name = "crossbeam-deque" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" +checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" dependencies = [ "cfg-if", "crossbeam-epoch", @@ -1178,22 +1201,22 @@ dependencies = [ [[package]] name = "crossbeam-epoch" -version = "0.9.5" +version = "0.9.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd" +checksum = "f916dfc5d356b0ed9dae65f1db9fc9770aa2851d2662b988ccf4fe3516e86348" dependencies = [ + "autocfg", "cfg-if", "crossbeam-utils", - "lazy_static", "memoffset", "scopeguard", ] [[package]] name = "crossbeam-queue" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f25d8400f4a7a5778f0e4e52384a48cbd9b5c495d110786187fc750075277a2" +checksum = "1cd42583b04998a5363558e5f9291ee5a5ff6b49944332103f251e7479a82aa7" dependencies = [ "cfg-if", "crossbeam-utils", @@ -1201,12 +1224,11 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.9" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ff1f980957787286a554052d03c7aee98d99cc32e09f6d45f0a814133c87978" +checksum = "edbafec5fa1f196ca66527c1b12c2ec4745ca14b50f1ad8f9f6f720b55d11fac" dependencies = [ "cfg-if", - "once_cell", ] [[package]] @@ -1221,19 +1243,19 @@ version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" dependencies = [ - "generic-array 0.14.4", - "rand_core 0.6.3", + "generic-array 0.14.6", + "rand_core 0.6.4", "subtle", "zeroize", ] [[package]] name = "crypto-common" -version = "0.1.3" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "generic-array 0.14.4", + "generic-array 0.14.6", "typenum", ] @@ -1243,7 +1265,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" dependencies = [ - "generic-array 0.14.4", + "generic-array 0.14.6", "subtle", ] @@ -1253,15 +1275,15 @@ version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" dependencies = [ - "generic-array 0.14.4", + "generic-array 0.14.6", "subtle", ] [[package]] name = "ctor" -version = "0.1.21" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccc0a48a9b826acdf4028595adc9db92caea352f7af011a3034acd172a52a0aa" +checksum = "6d2301688392eb071b0bf1a37be05c469d3cc4dbbd95df672fe28ab021e6a096" dependencies = [ "quote", "syn", @@ -1310,16 +1332,16 @@ checksum = "4033478fbf70d6acf2655ac70da91ee65852d69daf7a67bf7a2f518fb47aafcf" dependencies = [ "byteorder", "digest 0.9.0", - "rand_core 0.6.3", + "rand_core 0.6.4", "subtle", "zeroize", ] [[package]] name = "cxx" -version = "1.0.80" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b7d4e43b25d3c994662706a1d4fcfc32aaa6afd287502c111b237093bb23f3a" +checksum = "97abf9f0eca9e52b7f81b945524e76710e6cb2366aead23b7d4fbf72e281f888" dependencies = [ "cc", "cxxbridge-flags", @@ -1329,9 +1351,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.80" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84f8829ddc213e2c1368e51a2564c552b65a8cb6a28f31e576270ac81d5e5827" +checksum = "7cc32cc5fea1d894b77d269ddb9f192110069a8a9c1f1d441195fba90553dea3" dependencies = [ "cc", "codespan-reporting", @@ -1344,15 +1366,15 @@ dependencies = [ [[package]] name = "cxxbridge-flags" -version = "1.0.80" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e72537424b474af1460806647c41d4b6d35d09ef7fe031c5c2fa5766047cc56a" +checksum = "8ca220e4794c934dc6b1207c3b42856ad4c302f2df1712e9f8d2eec5afaacf1f" [[package]] name = "cxxbridge-macro" -version = "1.0.80" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "309e4fb93eed90e1e14bea0da16b209f81813ba9fc7830c20ed151dd7bc0a4d7" +checksum = "b846f081361125bfc8dc9d3940c84e1fd83ba54bbca7b17cd29483c828be0704" dependencies = [ "proc-macro2", "quote", @@ -1387,9 +1409,9 @@ dependencies = [ [[package]] name = "debugid" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f91cf5a8c2f2097e2a32627123508635d47ce10563d999ec1a95addf08b502ba" +checksum = "d6ee87af31d84ef885378aebca32be3d682b0e0dc119d5b4860a2c5bb5046730" dependencies = [ "uuid", ] @@ -1430,9 +1452,9 @@ dependencies = [ [[package]] name = "diff" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e25ea47919b1560c4e3b7fe0aaab9becf5b84a10325ddf7db0f0ba5e1026499" +checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" [[package]] name = "difflib" @@ -1455,16 +1477,16 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "generic-array 0.14.4", + "generic-array 0.14.6", ] [[package]] name = "digest" -version = "0.10.3" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506" +checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c" dependencies = [ - "block-buffer 0.10.0", + "block-buffer 0.10.3", "crypto-common", "subtle", ] @@ -1490,9 +1512,9 @@ dependencies = [ [[package]] name = "dirs-sys" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03d86534ed367a67548dc68113a0f5db55432fdfbb6e6f9d77704397d95d5780" +checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" dependencies = [ "libc", "redox_users", @@ -1512,9 +1534,9 @@ dependencies = [ [[package]] name = "dissimilar" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31ad93652f40969dead8d4bf897a41e9462095152eb21c56e5830537e41179dd" +checksum = "8c97b9233581d84b8e1e689cdd3a47b6f69770084fc246e86a7f78b0d9c1d4a5" [[package]] name = "dlmalloc" @@ -1532,7 +1554,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4d33be9473d06f75f58220f71f7a9317aca647dc061dbd3c361b0bef505fbea" dependencies = [ "byteorder", - "quick-error 1.2.3", + "quick-error", ] [[package]] @@ -1555,9 +1577,9 @@ checksum = "9ea835d29036a4087793836fa931b08837ad5e957da9e23886b29586fb9b6650" [[package]] name = "dtoa" -version = "1.0.2" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5caaa75cbd2b960ff1e5392d2cfb1f44717fffe12fc1f32b7b5d1267f99732a6" +checksum = "f8a6eee2d5d0d113f015688310da018bd1d864d86bd567c8fca9c266889e1bfa" [[package]] name = "dyn-clonable" @@ -1582,9 +1604,9 @@ dependencies = [ [[package]] name = "dyn-clone" -version = "1.0.4" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2626afccd7561a06cf1367e2950c4718ea04565e20fb5029b6c7d8ad09abcf" +checksum = "4f94fa09c2aeea5b8839e414b7b841bf429fd25b9c522116ac97ee87856d88b2" [[package]] name = "ecdsa" @@ -1600,9 +1622,9 @@ dependencies = [ [[package]] name = "ed25519" -version = "1.3.0" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74e1069e39f1454367eb2de793ed062fac4c35c2934b76a81d90dd9abcd28816" +checksum = "1e9c280362032ea4203659fc489832d0204ef09f247a0506f170dafcac08c369" dependencies = [ "signature", ] @@ -1617,7 +1639,7 @@ dependencies = [ "ed25519", "rand 0.7.3", "serde", - "sha2 0.9.8", + "sha2 0.9.9", "zeroize", ] @@ -1630,16 +1652,16 @@ dependencies = [ "curve25519-dalek 3.2.0", "hashbrown", "hex", - "rand_core 0.6.3", - "sha2 0.9.8", + "rand_core 0.6.4", + "sha2 0.9.9", "zeroize", ] [[package]] name = "either" -version = "1.6.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" +checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" [[package]] name = "elliptic-curve" @@ -1650,11 +1672,11 @@ dependencies = [ "base16ct", "crypto-bigint", "der", - "digest 0.10.3", + "digest 0.10.5", "ff", - "generic-array 0.14.4", + "generic-array 0.14.6", "group", - "rand_core 0.6.3", + "rand_core 0.6.4", "sec1", "subtle", "zeroize", @@ -1662,9 +1684,9 @@ dependencies = [ [[package]] name = "encoding_rs" -version = "0.8.30" +version = "0.8.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dc8abb250ffdda33912550faa54c88ec8b998dec0b2c55ab224921ce11df" +checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b" dependencies = [ "cfg-if", ] @@ -1714,25 +1736,12 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" -dependencies = [ - "atty", - "humantime 1.3.0", - "log", - "regex", - "termcolor", -] - -[[package]] -name = "env_logger" -version = "0.9.0" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b2cf0344971ee6c64c31be0d530793fba457d322dfec2810c453d0ef228f9c3" +checksum = "a12e6657c4c97ebab115a42dcee77225f7f482cdd841cf7088c657a42e9e00e7" dependencies = [ "atty", - "humantime 2.1.0", + "humantime", "log", "regex", "termcolor", @@ -1746,9 +1755,9 @@ checksum = "68b91989ae21441195d7d9b9993a2f9295c7e1a8c96255d8b729accddc124797" [[package]] name = "erased-serde" -version = "0.3.20" +version = "0.3.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad132dd8d0d0b546348d7d86cb3191aad14b34e5f979781fc005c80d4ac67ffd" +checksum = "54558e0ba96fbe24280072642eceb9d7d442e32c7ec0ea9e7ecd7b4ea2cf4e11" dependencies = [ "serde", ] @@ -1776,9 +1785,9 @@ dependencies = [ [[package]] name = "event-listener" -version = "2.5.1" +version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7531096570974c3a9dcf9e4b8e1cede1ec26cf5046219fb3b9d897503b9be59" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" [[package]] name = "exit-future" @@ -1822,9 +1831,9 @@ dependencies = [ [[package]] name = "eyre" -version = "0.6.5" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "221239d1d5ea86bf5d6f91c9d6bc3646ffe471b08ff9b0f91c44f115ac969d2b" +checksum = "4c2b6b5a29c02cdc822728b7d7b8ae1bab3e3b05d44522770ddd49722eeac7eb" dependencies = [ "indenter", "once_cell", @@ -1844,9 +1853,9 @@ checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" [[package]] name = "fastrand" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" +checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499" dependencies = [ "instant", ] @@ -1907,30 +1916,30 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" dependencies = [ - "rand_core 0.6.3", + "rand_core 0.6.4", "subtle", ] [[package]] name = "file-per-thread-logger" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fdbe0d94371f9ce939b555dd342d0686cc4c0cadbcd4b61d70af5ff97eb4126" +checksum = "21e16290574b39ee41c71aeb90ae960c504ebaf1e2a1c87bd52aa56ed6e1a02f" dependencies = [ - "env_logger 0.7.1", + "env_logger", "log", ] [[package]] name = "filetime" -version = "0.2.17" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94a7bbaa59354bc20dd75b67f23e2797b4490e9d6928203fb105c79e448c86c" +checksum = "4b9663d381d07ae25dc88dbdf27df458faa83a9b25336bcac83d5e452b5fc9d3" dependencies = [ "cfg-if", "libc", "redox_syscall", - "windows-sys 0.36.1", + "windows-sys 0.42.0", ] [[package]] @@ -1944,7 +1953,7 @@ dependencies = [ "futures-timer", "log", "num-traits", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parking_lot 0.12.1", "scale-info", ] @@ -1975,19 +1984,17 @@ dependencies = [ [[package]] name = "fixedbitset" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "398ea4fabe40b9b0d885340a2a991a44c8a645624075ad966d21f88688e2b69e" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.22" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e6988e897c1c9c485f43b47a529cef42fde0547f9d8d41a7062518f1d8fc53f" +checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6" dependencies = [ - "cfg-if", "crc32fast", - "libc", "libz-sys", "miniz_oxide", ] @@ -2025,37 +2032,36 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "fork-tree" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", ] [[package]] name = "form_urlencoded" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" dependencies = [ - "matches", "percent-encoding", ] [[package]] name = "fragile" -version = "1.2.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85dcb89d2b10c5f6133de2efd8c11959ce9dbb46a2f7a4cab208c4eeda6ce1ab" +checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" [[package]] name = "frame-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-support", "frame-system", "linregress", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "paste", "scale-info", "serde", @@ -2072,7 +2078,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "Inflector", "array-bytes", @@ -2091,7 +2097,7 @@ dependencies = [ "linked-hash-map", "log", "memory-db", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "rand 0.8.5", "rand_pcg 0.3.1", "sc-block-builder", @@ -2124,7 +2130,7 @@ dependencies = [ [[package]] name = "frame-election-provider-solution-type" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2135,12 +2141,12 @@ dependencies = [ [[package]] name = "frame-election-provider-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-election-provider-solution-type", "frame-support", "frame-system", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-arithmetic", "sp-npos-elections", @@ -2151,12 +2157,12 @@ dependencies = [ [[package]] name = "frame-executive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-support", "frame-system", "frame-try-runtime", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-core", "sp-io", @@ -2172,7 +2178,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df6bb8542ef006ef0de09a5c4420787d79823c0ed7924225822362fd2bf2ff2d" dependencies = [ "cfg-if", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "serde", ] @@ -2180,7 +2186,7 @@ dependencies = [ [[package]] name = "frame-support" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "bitflags", "frame-metadata", @@ -2189,7 +2195,7 @@ dependencies = [ "k256", "log", "once_cell", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "paste", "scale-info", "serde", @@ -2212,7 +2218,7 @@ dependencies = [ [[package]] name = "frame-support-procedural" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "Inflector", "cfg-expr", @@ -2226,7 +2232,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", @@ -2238,7 +2244,7 @@ dependencies = [ [[package]] name = "frame-support-procedural-tools-derive" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "proc-macro2", "quote", @@ -2248,12 +2254,12 @@ dependencies = [ [[package]] name = "frame-support-test" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-support", "frame-support-test-pallet", "frame-system", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "pretty_assertions", "rustversion", "scale-info", @@ -2271,22 +2277,22 @@ dependencies = [ [[package]] name = "frame-support-test-pallet" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-support", "frame-system", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", ] [[package]] name = "frame-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-support", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "serde", "sp-core", @@ -2300,12 +2306,12 @@ dependencies = [ [[package]] name = "frame-system-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-core", "sp-runtime", @@ -2315,19 +2321,19 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sp-api", ] [[package]] name = "frame-try-runtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-support", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sp-api", "sp-runtime", "sp-std", @@ -2335,9 +2341,9 @@ dependencies = [ [[package]] name = "fs-err" -version = "2.6.0" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ebd3504ad6116843b8375ad70df74e7bfe83cac77a1f3fe73200c844d43bfe0" +checksum = "0845fa252299212f0389d64ba26f34fa32cfe41588355f21ed507c59a0f64541" [[package]] name = "fs2" @@ -2369,9 +2375,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" +checksum = "38390104763dc37a5145a53c29c63c1290b5d316d6086ec32c293f6736051bb0" dependencies = [ "futures-channel", "futures-core", @@ -2384,9 +2390,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.23" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bfc52cbddcfd745bf1740338492bb0bd83d76c67b445f91c5fb29fae29ecaa1" +checksum = "52ba265a92256105f45b719605a571ffe2d1f0fea3807304b522c1d778f79eed" dependencies = [ "futures-core", "futures-sink", @@ -2394,15 +2400,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.23" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2acedae88d38235936c3922476b10fced7b2b68136f5e3c03c2d5be348a1115" +checksum = "04909a7a7e4633ae6c4a9ab280aeb86da1236243a77b694a49eacd659a4bd3ac" [[package]] name = "futures-executor" -version = "0.3.21" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6" +checksum = "7acc85df6714c176ab5edf386123fafe217be88c0840ec11f199441134a074e2" dependencies = [ "futures-core", "futures-task", @@ -2412,9 +2418,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.23" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93a66fc6d035a26a3ae255a6d2bca35eda63ae4c5512bef54449113f7a1228e5" +checksum = "00f5fb52a06bdcadeb54e8d3671f8888a39697dcb0b81b23b55174030427f4eb" [[package]] name = "futures-lite" @@ -2427,15 +2433,15 @@ dependencies = [ "futures-io", "memchr", "parking", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.9", "waker-fn", ] [[package]] name = "futures-macro" -version = "0.3.23" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0db9cce532b0eae2ccf2766ab246f114b56b9cf6d445e00c2549fbc100ca045d" +checksum = "bdfb8ce053d86b91919aad980c220b1fb8401a9394410e1c289ed7e66b61835d" dependencies = [ "proc-macro2", "quote", @@ -2444,9 +2450,9 @@ dependencies = [ [[package]] name = "futures-rustls" -version = "0.22.1" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e01fe9932a224b72b45336d96040aa86386d674a31d0af27d800ea7bc8ca97fe" +checksum = "d2411eed028cdf8c8034eaf21f9915f956b6c3abec4d4c7949ee67f0721127bd" dependencies = [ "futures-io", "rustls", @@ -2455,15 +2461,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.23" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca0bae1fe9752cf7fd9b0064c674ae63f97b37bc714d745cbde0afb7ec4e6765" +checksum = "39c15cf1a4aa79df40f1bb462fb39676d0ad9e366c2a33b590d7c66f4f81fcf9" [[package]] name = "futures-task" -version = "0.3.23" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "842fc63b931f4056a24d59de13fb1272134ce261816e063e634ad0c15cdc5306" +checksum = "2ffb393ac5d9a6eaa9d3fdf37ae2776656b706e200c8e16b1bdb227f5198e6ea" [[package]] name = "futures-timer" @@ -2473,9 +2479,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.23" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0828a5471e340229c11c77ca80017937ce3c58cb788a17e5f1c2d5c485a9577" +checksum = "197676987abd2f9cadff84926f410af1c183608d36641465df73ae8211dc65d6" dependencies = [ "futures-channel", "futures-core", @@ -2484,7 +2490,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.9", "pin-utils", "slab", ] @@ -2501,7 +2507,7 @@ dependencies = [ [[package]] name = "generate-bags" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "chrono", "frame-election-provider-support", @@ -2524,9 +2530,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.4" +version = "0.14.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" +checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" dependencies = [ "typenum", "version_check", @@ -2557,13 +2563,13 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.3" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" +checksum = "c05aeb6a22b8f62540c194aac980f2115af067bfe15a0734d7277a768d396b31" dependencies = [ "cfg-if", "libc", - "wasi 0.10.0+wasi-snapshot-preview1", + "wasi 0.11.0+wasi-snapshot-preview1", ] [[package]] @@ -2578,9 +2584,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.26.1" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" +checksum = "22030e2c5a68ec659fde1e949a745124b48e6fa8b045b7ed5bd1fe4ccc5c4e5d" dependencies = [ "fallible-iterator", "indexmap", @@ -2589,9 +2595,9 @@ dependencies = [ [[package]] name = "git2" -version = "0.14.2" +version = "0.14.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3826a6e0e2215d7a41c2bfc7c9244123969273f3476b939a226aac0ab56e9e3c" +checksum = "d0155506aab710a86160ddb504a480d2964d7ab5b9e62419be69e0032bc5931c" dependencies = [ "bitflags", "libc", @@ -2608,12 +2614,12 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "globset" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10463d9ff00a2a068db14231982f5132edebad0d7660cd956a1c30292dbcbfbd" +checksum = "0a1e17342619edbc21a964c2afbeb6c820c6a2560032872f397bb97ea127bd0a" dependencies = [ "aho-corasick", - "bstr", + "bstr 0.2.17", "fnv", "log", "regex", @@ -2621,15 +2627,14 @@ dependencies = [ [[package]] name = "gloo-timers" -version = "0.2.1" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47204a46aaff920a1ea58b11d03dec6f704287d27561724a4631e450654a891f" +checksum = "5fb7d06c1c8cc2a29bee7ec961009a0b2caa0793ee4900c2ffb348734ba1c8f9" dependencies = [ "futures-channel", "futures-core", "js-sys", "wasm-bindgen", - "web-sys", ] [[package]] @@ -2639,15 +2644,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" dependencies = [ "ff", - "rand_core 0.6.3", + "rand_core 0.6.4", "subtle", ] [[package]] name = "h2" -version = "0.3.11" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9f1f717ddc7b2ba36df7e871fd88db79326551d3d6f1fc406fbfd28b582ff8e" +checksum = "5f9f29bc9dda355256b2916cf526ab02ce0aeaaaf2bad60d65ef3f12f11dd0f4" dependencies = [ "bytes", "fnv", @@ -2658,22 +2663,22 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util 0.6.9", + "tokio-util", "tracing", ] [[package]] name = "handlebars" -version = "4.2.2" +version = "4.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99d6a30320f094710245150395bc763ad23128d6a1ebbad7594dc4164b62c56b" +checksum = "433e4ab33f1213cdc25b5fa45c76881240cfe79284cf2b395e8b9e312a30a2fd" dependencies = [ "log", "pest", "pest_derive", - "quick-error 2.0.1", "serde", "serde_json", + "thiserror", ] [[package]] @@ -2753,7 +2758,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "digest 0.10.3", + "digest 0.10.5", ] [[package]] @@ -2763,7 +2768,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" dependencies = [ "digest 0.9.0", - "generic-array 0.14.4", + "generic-array 0.14.6", "hmac 0.8.1", ] @@ -2803,20 +2808,20 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.9", ] [[package]] name = "httparse" -version = "1.6.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9100414882e15fb7feccb4897e5f0ff0ff1ca7d1a86a23208ada4d7a18e6c6c4" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" @@ -2824,15 +2829,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" -[[package]] -name = "humantime" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f" -dependencies = [ - "quick-error 1.2.3", -] - [[package]] name = "humantime" version = "2.1.0" @@ -2841,9 +2837,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.20" +version = "0.14.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02c929dc5c39e335a03c405292728118860721b10190d98c2a0f0efd5baafbac" +checksum = "034711faac9d2166cb1baf1a2fb0b60b1f277f8492fd72176c17f3515e1abd3c" dependencies = [ "bytes", "futures-channel", @@ -2855,7 +2851,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.9", "socket2", "tokio", "tower-service", @@ -2891,6 +2887,30 @@ dependencies = [ "tokio-native-tls", ] +[[package]] +name = "iana-time-zone" +version = "0.1.53" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64c122667b287044802d6ce17ee2ddf13207ed924c712de9a66a5814d5b64765" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "winapi", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0703ae284fc167426161c2e3f1da3ea71d94b21bedbcc9494e92b28e334e3dca" +dependencies = [ + "cxx", + "cxx-build", +] + [[package]] name = "idna" version = "0.2.3" @@ -2902,6 +2922,16 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "idna" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "if-addrs" version = "0.7.0" @@ -2936,7 +2966,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", ] [[package]] @@ -2987,9 +3017,9 @@ dependencies = [ [[package]] name = "integer-encoding" -version = "3.0.2" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90c11140ffea82edce8dcd74137ce9324ec24b3cf0175fc9d7e29164da9915b8" +checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" [[package]] name = "integer-sqrt" @@ -3002,9 +3032,19 @@ dependencies = [ [[package]] name = "io-lifetimes" -version = "0.7.2" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ce5ef949d49ee85593fc4d3f3f95ad61657076395cbbce23e2121fc5542074" + +[[package]] +name = "io-lifetimes" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24c3f4eff5495aee4c0399d7b6a0dc2b6e81be84242ffbfcf253ebacccc1d0cb" +checksum = "a7d367024b3f3414d8e01f437f704f41a9f64ab36f9067fa73e526ad4c763c87" +dependencies = [ + "libc", + "windows-sys 0.42.0", +] [[package]] name = "ip_network" @@ -3014,27 +3054,27 @@ checksum = "aa2f047c0a98b2f299aa5d6d7088443570faae494e9ae1305e48be000c9e0eb1" [[package]] name = "ipconfig" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "723519edce41262b05d4143ceb95050e4c614f483e78e9fd9e39a8275a84ad98" +checksum = "bd302af1b90f2463a98fa5ad469fc212c8e3175a41c3068601bfa2727591c5be" dependencies = [ "socket2", "widestring", "winapi", - "winreg 0.7.0", + "winreg", ] [[package]] name = "ipnet" -version = "2.3.1" +version = "2.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" +checksum = "f88c5561171189e69df9d98bcf18fd5f9558300f7ea7b801eb8a0fd748bd8745" [[package]] name = "itertools" -version = "0.10.3" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ "either", ] @@ -3047,18 +3087,18 @@ checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" [[package]] name = "jobserver" -version = "0.1.24" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af25a77299a7f711a01975c35a6a424eb6862092cc2d6c72c4ed6cbc56dfc1fa" +checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b" dependencies = [ "libc", ] [[package]] name = "js-sys" -version = "0.3.55" +version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cc9ffccd38c451a86bf13657df244e9c3f37493cce8e5e21e940963777acc84" +checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" dependencies = [ "wasm-bindgen", ] @@ -3094,7 +3134,7 @@ dependencies = [ "thiserror", "tokio", "tokio-rustls", - "tokio-util 0.7.1", + "tokio-util", "tracing", "webpki-roots", ] @@ -3202,7 +3242,7 @@ dependencies = [ "soketto", "tokio", "tokio-stream", - "tokio-util 0.7.1", + "tokio-util", "tracing", "tracing-futures", ] @@ -3216,14 +3256,14 @@ dependencies = [ "cfg-if", "ecdsa", "elliptic-curve", - "sha2 0.10.2", + "sha2 0.10.6", ] [[package]] name = "keccak" -version = "0.1.0" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" +checksum = "f9b7d56ba4a8344d6be9729995e6b06f928af29998cdf79fe390cbf6b1fee838" [[package]] name = "kusama-runtime" @@ -3288,7 +3328,7 @@ dependencies = [ "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-parachains", @@ -3409,15 +3449,15 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.126" +version = "0.2.137" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" +checksum = "fc7fcc620a3bff7cdd7a365be3376c97191aeaccc2a603e600951e452615bf89" [[package]] name = "libgit2-sys" -version = "0.13.2+1.4.2" +version = "0.13.4+1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a42de9a51a5c12e00fc0e4ca6bc2ea43582fc6418488e8f615e905d886f258b" +checksum = "d0fa6563431ede25f5cc7f6d803c6afbc1c5d3ad3d4925d12c882bf2b526f5d1" dependencies = [ "cc", "libc", @@ -3427,9 +3467,9 @@ dependencies = [ [[package]] name = "libloading" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afe203d669ec979b7128619bae5a63b7b42e9203c1b29146079ee05e2f604b52" +checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" dependencies = [ "cfg-if", "winapi", @@ -3437,9 +3477,9 @@ dependencies = [ [[package]] name = "libm" -version = "0.2.1" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" +checksum = "348108ab3fba42ec82ff6e9564fc4ca0247bdccdc68dd8af9764bbc79c3c8ffb" [[package]] name = "libp2p" @@ -3450,7 +3490,7 @@ dependencies = [ "bytes", "futures", "futures-timer", - "getrandom 0.2.3", + "getrandom 0.2.8", "instant", "lazy_static", "libp2p-core", @@ -3500,7 +3540,7 @@ dependencies = [ "prost-build", "rand 0.8.5", "rw-stream-sink", - "sha2 0.10.2", + "sha2 0.10.6", "smallvec", "thiserror", "unsigned-varint", @@ -3564,7 +3604,7 @@ dependencies = [ "prost", "prost-build", "rand 0.8.5", - "sha2 0.10.2", + "sha2 0.10.6", "smallvec", "thiserror", "uint", @@ -3639,7 +3679,7 @@ dependencies = [ "prost", "prost-build", "rand 0.8.5", - "sha2 0.10.2", + "sha2 0.10.6", "snow", "static_assertions", "x25519-dalek", @@ -3791,9 +3831,9 @@ dependencies = [ [[package]] name = "libsecp256k1" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0452aac8bab02242429380e9b2f94ea20cea2b37e2c1777a1358799bbe97f37" +checksum = "95b09eff1b35ed3b33b877ced3a691fc7a481919c7e29c53c906226fcf55e2a1" dependencies = [ "arrayref", "base64", @@ -3804,7 +3844,7 @@ dependencies = [ "libsecp256k1-gen-genmult", "rand 0.8.5", "serde", - "sha2 0.9.8", + "sha2 0.9.9", "typenum", ] @@ -3839,9 +3879,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.3" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de5435b8549c16d423ed0c03dbaafe57cf6c3344744f1242520d59c9d8ecec66" +checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf" dependencies = [ "cc", "libc", @@ -3860,9 +3900,9 @@ dependencies = [ [[package]] name = "linked-hash-map" -version = "0.5.4" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "linked_hash_set" @@ -3889,12 +3929,19 @@ version = "0.0.46" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4d2456c373231a208ad294c33dc5bff30051eafd954cd4caae83a712b12854d" +[[package]] +name = "linux-raw-sys" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb68f22743a3fb35785f1e7f844ca5a3de2dde5bd0c0ef5b372065814699b121" + [[package]] name = "lock_api" -version = "0.4.6" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88943dd7ef4a2e5a4bfa2753aaab3013e34ce2533d1996fb18ef591e315e2b3b" +checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" dependencies = [ + "autocfg", "scopeguard", ] @@ -4003,33 +4050,33 @@ dependencies = [ [[package]] name = "memchr" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memfd" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "480b5a5de855d11ff13195950bdc8b98b5e942ef47afc447f6615cdcc4e15d80" +checksum = "b20a59d985586e4a5aef64564ac77299f8586d8be6cf9106a5a40207e8908efb" dependencies = [ - "rustix", + "rustix 0.36.1", ] [[package]] name = "memmap2" -version = "0.5.0" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4647a11b578fead29cdbb34d4adef8dd3dc35b876c9c6d5240d83f205abfe96e" +checksum = "4b182332558b18d807c4ce1ca8ca983b34c3ee32765e47b3f0f69b90355cc1dc" dependencies = [ "libc", ] [[package]] name = "memoffset" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59accc507f1338036a0477ef61afdae33cde60840f4dfe481319ce3ad116ddf9" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" dependencies = [ "autocfg", ] @@ -4097,42 +4144,30 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.4.4" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" +checksum = "96590ba8f175222643a85693f33d26e9c8a015f599c216509b1a6894af675d34" dependencies = [ "adler", - "autocfg", ] [[package]] name = "mio" -version = "0.8.2" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52da4364ffb0e4fe33a9841a98a3f3014fb964045ce4f7a45a398243c8d6b0c9" +checksum = "e5d732bc30207a6423068df043e3d02e0735b155ad7ce1a6f76fe2baa5b158de" dependencies = [ "libc", "log", - "miow", - "ntapi", "wasi 0.11.0+wasi-snapshot-preview1", - "winapi", -] - -[[package]] -name = "miow" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" -dependencies = [ - "winapi", + "windows-sys 0.42.0", ] [[package]] name = "mockall" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2be9a9090bc1cac2930688fa9478092a64c6a92ddc6ae0692d46b37d9cab709" +checksum = "50e4a1c770583dac7ab5e2f6c139153b783a53a1bbee9729613f193e59828326" dependencies = [ "cfg-if", "downcast", @@ -4145,9 +4180,9 @@ dependencies = [ [[package]] name = "mockall_derive" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86d702a0530a0141cf4ed147cf5ec7be6f2c187d4e37fcbefc39cf34116bfe8f" +checksum = "832663583d5fa284ca8810bf7015e46c9fff9622d3cf34bd1eea5003fec06dd0" dependencies = [ "cfg-if", "proc-macro2", @@ -4186,17 +4221,17 @@ dependencies = [ [[package]] name = "multihash" -version = "0.16.2" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3db354f401db558759dfc1e568d010a5d4146f4d3f637be1275ec4a3cf09689" +checksum = "1c346cf9999c631f002d8f977c4eaeaa0e6386f16007202308d0b3757522c2cc" dependencies = [ "blake2b_simd", "blake2s_simd", "blake3", "core2", - "digest 0.10.3", + "digest 0.10.5", "multihash-derive", - "sha2 0.10.2", + "sha2 0.10.6", "sha3", "unsigned-varint", ] @@ -4281,9 +4316,9 @@ checksum = "6a51313c5820b0b02bd422f4b44776fbf47961755c74ce64afc73bfad10226c3" [[package]] name = "native-tls" -version = "0.2.8" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48ba9f7719b5a0f42f338907614285fb5fd70e53858141f69898a1fb7203b24d" +checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" dependencies = [ "lazy_static", "libc", @@ -4378,9 +4413,9 @@ dependencies = [ [[package]] name = "nix" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f17df307904acd05aa8e32e97bb20f2a0df1728bbc2d771ae8f9a90463441e9" +checksum = "195cdbc1741b8134346d515b3a56a1c94b0912758009cfd53f99ea0f57b065fc" dependencies = [ "bitflags", "cfg-if", @@ -4396,13 +4431,12 @@ checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" [[package]] name = "nom" -version = "7.1.0" +version = "7.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1d11e1ef389c76fe5b81bcaf2ea32cf88b62bc494e19f493d0b30e7a930109" +checksum = "a8903e5a29a317527874d0402f867152a3d21c908bb0b933e416c65e301d4c36" dependencies = [ "memchr", "minimal-lexical", - "version_check", ] [[package]] @@ -4412,11 +4446,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" [[package]] -name = "ntapi" -version = "0.3.6" +name = "nu-ansi-term" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" dependencies = [ + "overload", "winapi", ] @@ -4433,9 +4468,9 @@ dependencies = [ [[package]] name = "num-complex" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26873667bbbb7c5182d4a37c1add32cdf09f841af72da53318fdb81543c15085" +checksum = "7ae39348c8bc5fbd7f40c727a9925f03517afd2ab27d46702108b6a7e5414c19" dependencies = [ "num-traits", ] @@ -4452,9 +4487,9 @@ dependencies = [ [[package]] name = "num-integer" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" dependencies = [ "autocfg", "num-traits", @@ -4474,9 +4509,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", "libm", @@ -4484,23 +4519,14 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.13.0" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +checksum = "f6058e64324c71e02bc2b150e4f3bc8286db6c83092132ffa3f6b1eab0f9def5" dependencies = [ "hermit-abi", "libc", ] -[[package]] -name = "object" -version = "0.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67ac1d3f9a1d3616fd9a60c8d74296f22406a238b6a72f5cc1e6f314df4ffbf9" -dependencies = [ - "memchr", -] - [[package]] name = "object" version = "0.29.0" @@ -4515,9 +4541,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.12.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7709cef83f0c1f58f666e746a08b21e0085f7440fa6a29cc194d68aac97a4225" +checksum = "86f0b0d4bf799edbc74508c1e8bf170ff5f41238e5f8225603ca7caaae2b7860" [[package]] name = "opaque-debug" @@ -4533,29 +4559,41 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl" -version = "0.10.38" +version = "0.10.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c7ae222234c30df141154f159066c5093ff73b63204dcda7121eb082fc56a95" +checksum = "12fc0523e3bd51a692c8850d075d74dc062ccf251c0110668cbd921917118a13" dependencies = [ "bitflags", "cfg-if", "foreign-types", "libc", "once_cell", + "openssl-macros", "openssl-sys", ] +[[package]] +name = "openssl-macros" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b501e44f11665960c7e7fcf062c7d96a14ade4aa98116c004b2e37b5be7d736c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "openssl-probe" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.72" +version = "0.9.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e46109c383602735fa0a2e48dd2b7c892b048e1bf69e5c3b1d804b7d9c203cb" +checksum = "b03b84c3b2d099b81f0953422b4d4ad58761589d0229b5506356afca05a3670a" dependencies = [ "autocfg", "cc", @@ -4607,34 +4645,40 @@ dependencies = [ [[package]] name = "os_str_bytes" -version = "6.0.0" +version = "6.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64" +checksum = "7b5bf27447411e9ee3ff51186bf7a08e16c341efdde93f4d823e8844429bed7e" [[package]] name = "output_vt100" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53cdc5b785b7a58c5aad8216b3dfa114df64b0b06ae6e1501cef91df2fbdf8f9" +checksum = "628223faebab4e3e40667ee0b2336d34a5b960ff60ea743ddfdbcf7770bcfb66" dependencies = [ "winapi", ] +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + [[package]] name = "owo-colors" -version = "3.2.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20448fd678ec04e6ea15bbe0476874af65e98a01515d667aa49f1434dc44ebf4" +checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" [[package]] name = "pallet-assets" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-runtime", "sp-std", @@ -4643,12 +4687,12 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-support", "frame-system", "pallet-session", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-application-crypto", "sp-authority-discovery", @@ -4659,12 +4703,12 @@ dependencies = [ [[package]] name = "pallet-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-support", "frame-system", "impl-trait-for-tuples", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-authorship", "sp-runtime", @@ -4674,7 +4718,7 @@ dependencies = [ [[package]] name = "pallet-babe" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-support", @@ -4683,7 +4727,7 @@ dependencies = [ "pallet-authorship", "pallet-session", "pallet-timestamp", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-application-crypto", "sp-consensus-babe", @@ -4698,7 +4742,7 @@ dependencies = [ [[package]] name = "pallet-bags-list" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -4706,7 +4750,7 @@ dependencies = [ "frame-system", "log", "pallet-balances", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-core", "sp-io", @@ -4718,7 +4762,7 @@ dependencies = [ [[package]] name = "pallet-bags-list-remote-tests" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-election-provider-support", "frame-support", @@ -4737,13 +4781,13 @@ dependencies = [ [[package]] name = "pallet-balances" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-runtime", "sp-std", @@ -4752,13 +4796,13 @@ dependencies = [ [[package]] name = "pallet-beefy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "beefy-primitives", "frame-support", "frame-system", "pallet-session", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "serde", "sp-runtime", @@ -4768,7 +4812,7 @@ dependencies = [ [[package]] name = "pallet-beefy-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "array-bytes", "beefy-merkle-tree", @@ -4779,7 +4823,7 @@ dependencies = [ "pallet-beefy", "pallet-mmr", "pallet-session", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "serde", "sp-core", @@ -4791,14 +4835,14 @@ dependencies = [ [[package]] name = "pallet-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "log", "pallet-treasury", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-core", "sp-io", @@ -4809,7 +4853,7 @@ dependencies = [ [[package]] name = "pallet-child-bounties" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-support", @@ -4817,7 +4861,7 @@ dependencies = [ "log", "pallet-bounties", "pallet-treasury", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-core", "sp-io", @@ -4828,13 +4872,13 @@ dependencies = [ [[package]] name = "pallet-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-core", "sp-io", @@ -4845,13 +4889,13 @@ dependencies = [ [[package]] name = "pallet-conviction-voting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "assert_matches", "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "serde", "sp-io", @@ -4862,13 +4906,13 @@ dependencies = [ [[package]] name = "pallet-democracy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "serde", "sp-core", @@ -4880,7 +4924,7 @@ dependencies = [ [[package]] name = "pallet-election-provider-multi-phase" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -4888,7 +4932,7 @@ dependencies = [ "frame-system", "log", "pallet-election-provider-support-benchmarking", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "rand 0.7.3", "scale-info", "sp-arithmetic", @@ -4904,12 +4948,12 @@ dependencies = [ [[package]] name = "pallet-election-provider-support-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-election-provider-support", "frame-system", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sp-npos-elections", "sp-runtime", ] @@ -4917,13 +4961,13 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-core", "sp-io", @@ -4935,14 +4979,14 @@ dependencies = [ [[package]] name = "pallet-fast-unstake" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-election-provider-support", "frame-support", "frame-system", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-io", "sp-runtime", @@ -4953,12 +4997,12 @@ dependencies = [ [[package]] name = "pallet-gilt" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-arithmetic", "sp-runtime", @@ -4968,7 +5012,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-support", @@ -4976,7 +5020,7 @@ dependencies = [ "log", "pallet-authorship", "pallet-session", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-application-crypto", "sp-core", @@ -4991,13 +5035,13 @@ dependencies = [ [[package]] name = "pallet-identity" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "enumflags2", "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-io", "sp-runtime", @@ -5007,14 +5051,14 @@ dependencies = [ [[package]] name = "pallet-im-online" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "log", "pallet-authorship", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-application-crypto", "sp-core", @@ -5027,12 +5071,12 @@ dependencies = [ [[package]] name = "pallet-indices" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-core", "sp-io", @@ -5044,13 +5088,13 @@ dependencies = [ [[package]] name = "pallet-membership" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-core", "sp-io", @@ -5061,13 +5105,13 @@ dependencies = [ [[package]] name = "pallet-mmr" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "ckb-merkle-mountain-range", "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-core", "sp-io", @@ -5079,11 +5123,11 @@ dependencies = [ [[package]] name = "pallet-mmr-rpc" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "anyhow", "jsonrpsee", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "serde", "sp-api", "sp-blockchain", @@ -5095,13 +5139,13 @@ dependencies = [ [[package]] name = "pallet-multisig" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-io", "sp-runtime", @@ -5111,12 +5155,12 @@ dependencies = [ [[package]] name = "pallet-nomination-pools" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-core", "sp-io", @@ -5128,7 +5172,7 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-benchmarking" version = "1.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5137,7 +5181,7 @@ dependencies = [ "pallet-bags-list", "pallet-nomination-pools", "pallet-staking", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-runtime", "sp-runtime-interface", @@ -5148,9 +5192,9 @@ dependencies = [ [[package]] name = "pallet-nomination-pools-runtime-api" version = "1.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sp-api", "sp-std", ] @@ -5158,13 +5202,13 @@ dependencies = [ [[package]] name = "pallet-offences" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-support", "frame-system", "log", "pallet-balances", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "serde", "sp-runtime", @@ -5175,7 +5219,7 @@ dependencies = [ [[package]] name = "pallet-offences-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5188,7 +5232,7 @@ dependencies = [ "pallet-offences", "pallet-session", "pallet-staking", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-runtime", "sp-staking", @@ -5198,13 +5242,13 @@ dependencies = [ [[package]] name = "pallet-preimage" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-core", "sp-io", @@ -5215,12 +5259,12 @@ dependencies = [ [[package]] name = "pallet-proxy" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-io", "sp-runtime", @@ -5230,13 +5274,13 @@ dependencies = [ [[package]] name = "pallet-ranked-collective" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-arithmetic", "sp-core", @@ -5248,12 +5292,12 @@ dependencies = [ [[package]] name = "pallet-recovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-io", "sp-runtime", @@ -5263,13 +5307,13 @@ dependencies = [ [[package]] name = "pallet-referenda" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "assert_matches", "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "serde", "sp-arithmetic", @@ -5281,13 +5325,13 @@ dependencies = [ [[package]] name = "pallet-scheduler" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-io", "sp-runtime", @@ -5297,14 +5341,14 @@ dependencies = [ [[package]] name = "pallet-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-support", "frame-system", "impl-trait-for-tuples", "log", "pallet-timestamp", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-core", "sp-io", @@ -5318,7 +5362,7 @@ dependencies = [ [[package]] name = "pallet-session-benchmarking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-support", @@ -5334,11 +5378,11 @@ dependencies = [ [[package]] name = "pallet-society" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-support", "frame-system", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "rand_chacha 0.2.2", "scale-info", "sp-runtime", @@ -5348,7 +5392,7 @@ dependencies = [ [[package]] name = "pallet-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-election-provider-support", @@ -5357,7 +5401,7 @@ dependencies = [ "log", "pallet-authorship", "pallet-session", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "rand_chacha 0.2.2", "scale-info", "serde", @@ -5371,7 +5415,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -5382,7 +5426,7 @@ dependencies = [ [[package]] name = "pallet-staking-reward-fn" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "log", "sp-arithmetic", @@ -5391,13 +5435,13 @@ dependencies = [ [[package]] name = "pallet-state-trie-migration" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#4a5a9dea00c9b4e4d34ff56368451aa4dac09d77" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.2.1", "scale-info", "sp-core", "sp-io", @@ -5408,11 +5452,11 @@ dependencies = [ [[package]] name = "pallet-sudo" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-support", "frame-system", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-io", "sp-runtime", @@ -5422,13 +5466,13 @@ dependencies = [ [[package]] name = "pallet-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-inherents", "sp-io", @@ -5440,14 +5484,14 @@ dependencies = [ [[package]] name = "pallet-tips" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "log", "pallet-treasury", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "serde", "sp-core", @@ -5459,11 +5503,11 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-support", "frame-system", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "serde", "sp-core", @@ -5475,11 +5519,11 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sp-api", "sp-blockchain", "sp-core", @@ -5491,10 +5535,10 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "pallet-transaction-payment", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sp-api", "sp-runtime", "sp-weights", @@ -5503,14 +5547,14 @@ dependencies = [ [[package]] name = "pallet-treasury" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "impl-trait-for-tuples", "pallet-balances", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "serde", "sp-runtime", @@ -5520,12 +5564,12 @@ dependencies = [ [[package]] name = "pallet-utility" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-core", "sp-io", @@ -5536,13 +5580,13 @@ dependencies = [ [[package]] name = "pallet-vesting" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-runtime", "sp-std", @@ -5551,12 +5595,12 @@ dependencies = [ [[package]] name = "pallet-whitelist" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-api", "sp-runtime", @@ -5571,7 +5615,7 @@ dependencies = [ "frame-system", "log", "pallet-balances", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-parachain", "polkadot-runtime-parachains", "scale-info", @@ -5596,7 +5640,7 @@ dependencies = [ "pallet-assets", "pallet-balances", "pallet-xcm", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-primitives", "polkadot-runtime-common", "scale-info", @@ -5644,9 +5688,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.1.5" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9182e4a71cae089267ab03e67c99368db7cd877baf50f931e5d6d4b71e195ac0" +checksum = "366e44391a8af4cfd6002ef6ba072bae071a96aafca98d7d448a34c5dca38b6a" dependencies = [ "arrayvec 0.7.2", "bitvec 1.0.1", @@ -5734,7 +5778,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core 0.9.1", + "parking_lot_core 0.9.4", ] [[package]] @@ -5753,22 +5797,22 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.1" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28141e0cc4143da2443301914478dc976a61ffdb3f043058310c70df2fed8954" +checksum = "4dc9e0dc2adc1c69d09143aff38d3d30c5c3f0df0dad82e6d25547af174ebec0" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", - "windows-sys 0.32.0", + "windows-sys 0.42.0", ] [[package]] name = "paste" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c520e05135d6e763148b6426a837e239041653ba7becd2e538c076c738025fc" +checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1" [[package]] name = "pbkdf2" @@ -5796,24 +5840,25 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "percent-encoding" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "pest" -version = "2.1.3" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10f4872ae94d7b90ae48754df22fd42ad52ce740b8f370b03da4835417403e53" +checksum = "a528564cc62c19a7acac4d81e01f39e53e25e17b934878f4c6d25cc2836e62f8" dependencies = [ + "thiserror", "ucd-trie", ] [[package]] name = "pest_derive" -version = "2.1.0" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "833d1ae558dc601e9a60366421196a8d94bc0ac980476d0b67e1d0988d72b2d0" +checksum = "d5fd9bc6500181952d34bd0b2b0163a54d794227b498be0b7afa7698d0a7b18f" dependencies = [ "pest", "pest_generator", @@ -5821,9 +5866,9 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.1.3" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99b8db626e31e5b81787b9783425769681b347011cc59471e33ea46d2ea0cf55" +checksum = "d2610d5ac5156217b4ff8e46ddcef7cdf44b273da2ac5bca2ecbfa86a330e7c4" dependencies = [ "pest", "pest_meta", @@ -5834,20 +5879,20 @@ dependencies = [ [[package]] name = "pest_meta" -version = "2.1.3" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54be6e404f5317079812fc8f9f5279de376d8856929e21c184ecf6bbd692a11d" +checksum = "824749bf7e21dd66b36fbe26b3f45c713879cccd4a009a917ab8e045ca8246fe" dependencies = [ - "maplit", + "once_cell", "pest", - "sha-1 0.8.2", + "sha1", ] [[package]] name = "petgraph" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a13a2fa9d0b63e5f22328828741e523766fff0ee9e779316902290dff3f824f" +checksum = "e6d5014253a1331579ce62aa67443b4a658c5e7dd03d4bc6d302b94474888143" dependencies = [ "fixedbitset", "indexmap", @@ -5881,9 +5926,9 @@ checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" [[package]] name = "pin-project-lite" -version = "0.2.7" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d31d11c69a6b52a174b42bdc0c30e5e11670f90788b2c471c31c1d17d449443" +checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" [[package]] name = "pin-utils" @@ -5903,9 +5948,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.22" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12295df4f294471248581bc09bef3c38a5e46f1e36d6a37353621a0c6c357e1f" +checksum = "6ac9a59f73473f1b8d852421e59e64809f025994837ef743615c6d0c5b305160" [[package]] name = "platforms" @@ -5919,7 +5964,7 @@ version = "0.9.31" dependencies = [ "assert_cmd", "color-eyre", - "nix 0.24.1", + "nix 0.24.2", "parity-util-mem", "polkadot-cli", "polkadot-core-primitives", @@ -5933,7 +5978,7 @@ name = "polkadot-approval-distribution" version = "0.9.31" dependencies = [ "assert_matches", - "env_logger 0.9.0", + "env_logger", "futures", "log", "polkadot-node-network-protocol", @@ -5959,7 +6004,7 @@ dependencies = [ "always-assert", "assert_matches", "bitvec 1.0.1", - "env_logger 0.9.0", + "env_logger", "futures", "log", "maplit", @@ -5988,7 +6033,7 @@ dependencies = [ "futures", "futures-timer", "lru", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-erasure-coding", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -6012,13 +6057,13 @@ name = "polkadot-availability-recovery" version = "0.9.31" dependencies = [ "assert_matches", - "env_logger 0.9.0", + "env_logger", "fatality", "futures", "futures-timer", "log", "lru", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-erasure-coding", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -6111,12 +6156,12 @@ dependencies = [ "always-assert", "assert_matches", "bitvec 1.0.1", - "env_logger 0.9.0", + "env_logger", "fatality", "futures", "futures-timer", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -6137,7 +6182,7 @@ dependencies = [ name = "polkadot-core-primitives" version = "0.9.31" dependencies = [ - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parity-util-mem", "scale-info", "sp-core", @@ -6158,7 +6203,7 @@ dependencies = [ "indexmap", "lazy_static", "lru", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-erasure-coding", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -6181,7 +6226,7 @@ dependencies = [ name = "polkadot-erasure-coding" version = "0.9.31" dependencies = [ - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-node-primitives", "polkadot-primitives", "reed-solomon-novelpoly", @@ -6228,7 +6273,7 @@ dependencies = [ "fatality", "futures", "futures-timer", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parking_lot 0.12.1", "polkadot-node-network-protocol", "polkadot-node-subsystem", @@ -6251,7 +6296,7 @@ name = "polkadot-node-collation-generation" version = "0.9.31" dependencies = [ "futures", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-erasure-coding", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -6279,7 +6324,7 @@ dependencies = [ "kvdb-memorydb", "lru", "merlin", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parking_lot 0.12.1", "polkadot-node-jaeger", "polkadot-node-primitives", @@ -6310,13 +6355,13 @@ version = "0.9.31" dependencies = [ "assert_matches", "bitvec 1.0.1", - "env_logger 0.9.0", + "env_logger", "futures", "futures-timer", "kvdb", "kvdb-memorydb", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parking_lot 0.12.1", "polkadot-erasure-coding", "polkadot-node-primitives", @@ -6382,7 +6427,7 @@ dependencies = [ "async-trait", "futures", "futures-timer", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-node-core-pvf", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -6403,7 +6448,7 @@ version = "0.9.31" dependencies = [ "futures", "maplit", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", @@ -6425,7 +6470,7 @@ dependencies = [ "futures-timer", "kvdb", "kvdb-memorydb", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parking_lot 0.12.1", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -6448,7 +6493,7 @@ dependencies = [ "kvdb", "kvdb-memorydb", "lru", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", @@ -6530,7 +6575,7 @@ dependencies = [ "futures", "futures-timer", "hex-literal", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "pin-project", "polkadot-core-primitives", "polkadot-node-metrics", @@ -6606,7 +6651,7 @@ dependencies = [ "lazy_static", "log", "mick-jaeger", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parking_lot 0.12.1", "polkadot-node-primitives", "polkadot-primitives", @@ -6625,8 +6670,8 @@ dependencies = [ "futures-timer", "hyper", "log", - "nix 0.24.1", - "parity-scale-codec 3.1.5", + "nix 0.24.2", + "parity-scale-codec 3.2.1", "polkadot-primitives", "polkadot-test-service", "prioritized-metered-channel", @@ -6652,7 +6697,7 @@ dependencies = [ "fatality", "futures", "hex", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-node-jaeger", "polkadot-node-primitives", "polkadot-primitives", @@ -6672,7 +6717,7 @@ version = "0.9.31" dependencies = [ "bounded-vec", "futures", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-erasure-coding", "polkadot-parachain", "polkadot-primitives", @@ -6744,7 +6789,7 @@ dependencies = [ "assert_matches", "async-trait", "derive_more", - "env_logger 0.9.0", + "env_logger", "fatality", "futures", "itertools", @@ -6755,7 +6800,7 @@ dependencies = [ "log", "lru", "parity-db", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parity-util-mem", "parking_lot 0.11.2", "pin-project", @@ -6810,7 +6855,7 @@ version = "0.9.31" dependencies = [ "derive_more", "frame-support", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parity-util-mem", "polkadot-core-primitives", "scale-info", @@ -6824,7 +6869,7 @@ dependencies = [ name = "polkadot-performance-test" version = "0.9.31" dependencies = [ - "env_logger 0.9.0", + "env_logger", "kusama-runtime", "log", "polkadot-erasure-coding", @@ -6840,7 +6885,7 @@ version = "0.9.31" dependencies = [ "bitvec 1.0.1", "hex-literal", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parity-util-mem", "polkadot-core-primitives", "polkadot-parachain", @@ -6958,7 +7003,7 @@ dependencies = [ "pallet-utility", "pallet-vesting", "pallet-xcm", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-constants", @@ -7027,7 +7072,7 @@ dependencies = [ "pallet-transaction-payment", "pallet-treasury", "pallet-vesting", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-runtime-parachains", @@ -7067,7 +7112,7 @@ name = "polkadot-runtime-metrics" version = "0.9.31" dependencies = [ "bs58", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-primitives", "sp-std", "sp-tracing", @@ -7096,7 +7141,7 @@ dependencies = [ "pallet-staking", "pallet-timestamp", "pallet-vesting", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-runtime-metrics", @@ -7133,7 +7178,7 @@ dependencies = [ "async-trait", "beefy-gadget", "beefy-primitives", - "env_logger 0.9.0", + "env_logger", "frame-support", "frame-system-rpc-runtime-api", "futures", @@ -7247,7 +7292,7 @@ dependencies = [ "futures", "futures-timer", "indexmap", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -7272,7 +7317,7 @@ dependencies = [ name = "polkadot-statement-table" version = "0.9.31" dependencies = [ - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-primitives", "sp-core", ] @@ -7282,7 +7327,7 @@ name = "polkadot-test-client" version = "0.9.31" dependencies = [ "futures", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-node-subsystem", "polkadot-primitives", "polkadot-test-runtime", @@ -7361,7 +7406,7 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "pallet-vesting", "pallet-xcm", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-parachain", "polkadot-primitives", "polkadot-runtime-common", @@ -7466,10 +7511,11 @@ dependencies = [ [[package]] name = "polling" -version = "2.2.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "685404d509889fade3e86fe3a5803bca2ec09b0c0778d5ada6ec8bf7a8de5259" +checksum = "ab4609a838d88b73d8238967b60dd115cc08d38e2bbaf51ee1e4b695f89122e2" dependencies = [ + "autocfg", "cfg-if", "libc", "log", @@ -7522,15 +7568,15 @@ dependencies = [ [[package]] name = "ppv-lite86" -version = "0.2.15" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed0cfbc8191465bed66e1718596ee0b0b35d5ee1f41c5df2189d0fe8bde535ba" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "predicates" -version = "2.1.0" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95e5a7689e456ab905c22c2b48225bb921aba7c8dfa58440d68ba13f6222a715" +checksum = "ed6bd09a7f7e68f3f0bf710fb7ab9c4615a488b58b5f653382a687701e458c92" dependencies = [ "difflib", "float-cmp", @@ -7542,15 +7588,15 @@ dependencies = [ [[package]] name = "predicates-core" -version = "1.0.2" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57e35a3326b75e49aa85f5dc6ec15b41108cf5aee58eabb1f274dd18b73c2451" +checksum = "72f883590242d3c6fc5bf50299011695fa6590c2c70eac95ee1bdb9a733ad1a2" [[package]] name = "predicates-tree" -version = "1.0.4" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "338c7be2905b732ae3984a2f40032b5e94fd8f52505b186c7d4d68d193445df7" +checksum = "54ff541861505aabf6ea722d2131ee980b8276e10a1297b94e896dd8b621850d" dependencies = [ "predicates-core", "termtree", @@ -7558,21 +7604,31 @@ dependencies = [ [[package]] name = "pretty_assertions" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c89f989ac94207d048d92db058e4f6ec7342b0971fc58d1271ca148b799b3563" +checksum = "a25e9bcb20aa780fd0bb16b72403a9064d6b3f22f026946029acb941a50af755" dependencies = [ - "ansi_term", "ctor", "diff", "output_vt100", + "yansi", +] + +[[package]] +name = "prettyplease" +version = "0.1.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c142c0e46b57171fe0c528bee8c5b7569e80f0c17e377cd0e30ea57dbc11bb51" +dependencies = [ + "proc-macro2", + "syn", ] [[package]] name = "primitive-types" -version = "0.12.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cfd65aea0c5fa0bfcc7c9e7ca828c921ef778f43d325325ec84bda371bfa75a" +checksum = "9f3486ccba82358b11a77516035647c34ba167dfa53312630de83b12bd4f3d66" dependencies = [ "fixed-hash", "impl-codec", @@ -7599,10 +7655,11 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "1.1.3" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" +checksum = "eda0fc3b0fb7c975631757e14d9049da17374063edb6ebbcbc54d880d4fe94e9" dependencies = [ + "once_cell", "thiserror", "toml", ] @@ -7633,24 +7690,24 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.43" +version = "1.0.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a2ca2c61bc9f3d74d2886294ab7b9853abd9c1ad903a3ac7815c58989bb7bab" +checksum = "5ea3d908b0e36316caf9e9e2c4625cdde190a7e6f440d794667ed17a1855e725" dependencies = [ "unicode-ident", ] [[package]] name = "prometheus" -version = "0.13.0" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7f64969ffd5dd8f39bd57a68ac53c163a095ed9d0fb707146da1b27025a3504" +checksum = "449811d15fbdf5ceb5c1144416066429cf82316e2ec8ce0c1f6f8a02e7bbcf8c" dependencies = [ "cfg-if", "fnv", "lazy_static", "memchr", - "parking_lot 0.11.2", + "parking_lot 0.12.1", "thiserror", ] @@ -7679,20 +7736,21 @@ dependencies = [ [[package]] name = "prometheus-parse" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c996f3caea1c51aa034c0d2dfd8447a12c555f4567b02677ef8a865ac4cce712" +checksum = "ef7a8ed15bcffc55fe0328931ef20d393bb89ad704756a37bd20cffb4804f306" dependencies = [ "chrono", + "itertools", "lazy_static", "regex", ] [[package]] name = "prost" -version = "0.11.0" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "399c3c31cdec40583bb68f0b18403400d01ec4289c383aa047560439952c4dd7" +checksum = "a0841812012b2d4a6145fae9a6af1534873c32aa67fff26bd09f8fa42c83f95a" dependencies = [ "bytes", "prost-derive", @@ -7700,9 +7758,9 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f835c582e6bd972ba8347313300219fed5bfa52caf175298d860b61ff6069bb" +checksum = "1d8b442418ea0822409d9e7d047cbf1e7e9e1760b172bf9982cf29d517c93511" dependencies = [ "bytes", "heck", @@ -7711,9 +7769,11 @@ dependencies = [ "log", "multimap", "petgraph", + "prettyplease", "prost", "prost-types", "regex", + "syn", "tempfile", "which", ] @@ -7733,9 +7793,9 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.11.0" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7345d5f0e08c0536d7ac7229952590239e77abf0a0100a1b1d890add6ea96364" +checksum = "164ae68b6587001ca506d3bf7f1000bfa248d0e1217b618108fba4ec1d0cc306" dependencies = [ "anyhow", "itertools", @@ -7746,9 +7806,9 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4dfaa718ad76a44b3415e6c4d53b17c8f99160dcb3a99b10470fce8ad43f6e3e" +checksum = "747761bc3dc48f9a34553bf65605cf6cb6288ba219f3450b4275dbd81539551a" dependencies = [ "bytes", "prost", @@ -7756,9 +7816,9 @@ dependencies = [ [[package]] name = "psm" -version = "0.1.16" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd136ff4382c4753fc061cb9e4712ab2af263376b95bbd5bd8cd50c020b78e69" +checksum = "5787f7cda34e3033a72192c018bc5883100330f362ef279a8cbccfce8bb4e874" dependencies = [ "cc", ] @@ -7782,12 +7842,6 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" -[[package]] -name = "quick-error" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" - [[package]] name = "quicksink" version = "0.1.2" @@ -7801,9 +7855,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.20" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bcdf212e9776fbcb2d23ab029360416bb1706b1aea2d1a5ba002727cbcab804" +checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" dependencies = [ "proc-macro2", ] @@ -7842,7 +7896,7 @@ checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha 0.3.1", - "rand_core 0.6.3", + "rand_core 0.6.4", ] [[package]] @@ -7862,7 +7916,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.3", + "rand_core 0.6.4", ] [[package]] @@ -7876,18 +7930,18 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.8", ] [[package]] name = "rand_distr" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "964d548f8e7d12e102ef183a0de7e98180c9f8729f555897a857b96e48122d2f" +checksum = "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" dependencies = [ "num-traits", "rand 0.8.5", @@ -7917,7 +7971,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59cad018caf63deb318e5a4586d99a24424a364f40f1e5778c29aca23f4fc73e" dependencies = [ - "rand_core 0.6.3", + "rand_core 0.6.4", ] [[package]] @@ -7928,9 +7982,9 @@ checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" [[package]] name = "rayon" -version = "1.5.1" +version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06aca804d41dbc8ba42dfd964f0d01334eceb64314b9ecf7c5fad5188a06d90" +checksum = "bd99e5772ead8baa5215278c9b15bf92087709e9c1b2d1f97cdb5a183c933a7d" dependencies = [ "autocfg", "crossbeam-deque", @@ -7940,34 +7994,34 @@ dependencies = [ [[package]] name = "rayon-core" -version = "1.9.1" +version = "1.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d78120e2c850279833f1dd3582f730c4ab53ed95aeaaaa862a2a5c71b1656d8e" +checksum = "258bcdb5ac6dad48491bb2992db6b7cf74878b0384908af124823d118c99683f" dependencies = [ "crossbeam-channel", "crossbeam-deque", "crossbeam-utils", - "lazy_static", "num_cpus", ] [[package]] name = "redox_syscall" -version = "0.2.10" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ "bitflags", ] [[package]] name = "redox_users" -version = "0.4.0" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" +checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.8", "redox_syscall", + "thiserror", ] [[package]] @@ -7985,18 +8039,18 @@ dependencies = [ [[package]] name = "ref-cast" -version = "1.0.6" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" +checksum = "53b15debb4f9d60d767cd8ca9ef7abb2452922f3214671ff052defc7f3502c44" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.6" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" +checksum = "abfa8511e9e94fd3de6585a3d3cd00e01ed556dc9814829280af0e8dc72a8f36" dependencies = [ "proc-macro2", "quote", @@ -8017,9 +8071,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" +checksum = "e076559ef8e241f2ae3479e36f97bd5741c0330689e217ad51ce2c76808b868a" dependencies = [ "aho-corasick", "memchr", @@ -8037,9 +8091,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.27" +version = "0.6.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" +checksum = "456c603be3e8d448b072f410900c09faf164fbce2d480456f50eea6e25f9c848" [[package]] name = "remote-ext-tests-bags-list" @@ -8063,11 +8117,11 @@ dependencies = [ [[package]] name = "remote-externalities" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ - "env_logger 0.9.0", + "env_logger", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "serde", "serde_json", "sp-core", @@ -8088,9 +8142,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.11" +version = "0.11.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b75aa69a3f06bbcc66ede33af2af253c6f7a86b1ca0033f60c580a27074fbf92" +checksum = "431949c384f4e2ae07605ccaa56d1d9d2ecdb5cadd4f9577ccfab29f2e5149fc" dependencies = [ "base64", "bytes", @@ -8104,12 +8158,12 @@ dependencies = [ "hyper-tls", "ipnet", "js-sys", - "lazy_static", "log", "mime", "native-tls", + "once_cell", "percent-encoding", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.9", "serde", "serde_json", "serde_urlencoded", @@ -8120,7 +8174,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "winreg 0.10.1", + "winreg", ] [[package]] @@ -8130,7 +8184,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52e44394d2086d010551b14b53b1f24e31647570cd1deb0379e2c21b329aba00" dependencies = [ "hostname", - "quick-error 1.2.3", + "quick-error", ] [[package]] @@ -8221,7 +8275,7 @@ dependencies = [ "pallet-vesting", "pallet-xcm", "pallet-xcm-benchmarks", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-parachain", "polkadot-primitives", "polkadot-runtime-common", @@ -8271,9 +8325,9 @@ dependencies = [ [[package]] name = "rpassword" -version = "7.0.0" +version = "7.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b763cb66df1c928432cc35053f8bd4cec3335d8559fc16010017d16b3c1680" +checksum = "20c9f5d2a0c3e2ea729ab3706d22217177770654c3ef5056b68b69d07332d3f5" dependencies = [ "libc", "winapi", @@ -8290,7 +8344,7 @@ dependencies = [ "log", "netlink-packet-route", "netlink-proto", - "nix 0.24.1", + "nix 0.24.2", "thiserror", ] @@ -8318,28 +8372,42 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.4", + "semver 1.0.14", ] [[package]] name = "rustix" -version = "0.35.9" +version = "0.35.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72c825b8aa8010eb9ee99b75f05e10180b9278d161583034d7574c9d617aeada" +checksum = "727a1a6d65f786ec22df8a81ca3121107f235970dc1705ed681d3e6e8b9cd5f9" dependencies = [ "bitflags", "errno", - "io-lifetimes", + "io-lifetimes 0.7.5", "libc", - "linux-raw-sys", - "windows-sys 0.36.1", + "linux-raw-sys 0.0.46", + "windows-sys 0.42.0", +] + +[[package]] +name = "rustix" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "812a2ec2043c4d6bc6482f5be2ab8244613cac2493d128d36c0759e52a626ab3" +dependencies = [ + "bitflags", + "errno", + "io-lifetimes 1.0.1", + "libc", + "linux-raw-sys 0.1.2", + "windows-sys 0.42.0", ] [[package]] name = "rustls" -version = "0.20.2" +version = "0.20.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d37e5e2290f3e040b594b1a9e04377c2c671f1a1cfd9bfdef82106ac1c113f84" +checksum = "539a2bfe908f471bfa933876bd1eb6a19cf2176d375f82ef7f99530a40e48c2c" dependencies = [ "log", "ring", @@ -8349,9 +8417,9 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca9ebdfa27d3fc180e42879037b5338ab1c040c06affd00d8338598e7800943" +checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" dependencies = [ "openssl-probe", "rustls-pemfile", @@ -8361,18 +8429,18 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "0.2.1" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" +checksum = "0864aeff53f8c05aa08d86e5ef839d3dfcf07aeba2db32f12db0ef716e87bd55" dependencies = [ "base64", ] [[package]] name = "rustversion" -version = "1.0.6" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" +checksum = "97477e48b4cf8603ad5f7aaf897467cf42ab4218a38ef76fb14c2d6773a6d6a8" [[package]] name = "rw-stream-sink" @@ -8387,9 +8455,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.6" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c9613b5a66ab9ba26415184cfc41156594925a9cf3a2057e57f31ff145f6568" +checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" [[package]] name = "same-file" @@ -8403,7 +8471,7 @@ dependencies = [ [[package]] name = "sc-allocator" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "log", "sp-core", @@ -8414,7 +8482,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "async-trait", "futures", @@ -8422,7 +8490,7 @@ dependencies = [ "ip_network", "libp2p", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "prost", "prost-build", "rand 0.7.3", @@ -8441,12 +8509,12 @@ dependencies = [ [[package]] name = "sc-basic-authorship" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "futures", "futures-timer", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sc-block-builder", "sc-client-api", "sc-proposer-metrics", @@ -8464,9 +8532,9 @@ dependencies = [ [[package]] name = "sc-block-builder" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sc-client-api", "sp-api", "sp-block-builder", @@ -8480,11 +8548,11 @@ dependencies = [ [[package]] name = "sc-chain-spec" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "impl-trait-for-tuples", "memmap2", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sc-chain-spec-derive", "sc-network-common", "sc-telemetry", @@ -8497,7 +8565,7 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -8508,7 +8576,7 @@ dependencies = [ [[package]] name = "sc-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "array-bytes", "chrono", @@ -8518,7 +8586,7 @@ dependencies = [ "libp2p", "log", "names", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "rand 0.7.3", "regex", "rpassword", @@ -8548,13 +8616,13 @@ dependencies = [ [[package]] name = "sc-client-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "fnv", "futures", "hash-db", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parking_lot 0.12.1", "sc-executor", "sc-transaction-pool-api", @@ -8576,7 +8644,7 @@ dependencies = [ [[package]] name = "sc-client-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "hash-db", "kvdb", @@ -8585,7 +8653,7 @@ dependencies = [ "linked-hash-map", "log", "parity-db", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parking_lot 0.12.1", "sc-client-api", "sc-state-db", @@ -8601,7 +8669,7 @@ dependencies = [ [[package]] name = "sc-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "async-trait", "futures", @@ -8625,7 +8693,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "async-trait", "fork-tree", @@ -8635,7 +8703,7 @@ dependencies = [ "num-bigint", "num-rational", "num-traits", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parking_lot 0.12.1", "sc-client-api", "sc-consensus", @@ -8666,7 +8734,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "futures", "jsonrpsee", @@ -8688,10 +8756,10 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "fork-tree", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sc-client-api", "sc-consensus", "sp-blockchain", @@ -8701,13 +8769,13 @@ dependencies = [ [[package]] name = "sc-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "async-trait", "futures", "futures-timer", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sc-client-api", "sc-consensus", "sc-telemetry", @@ -8725,11 +8793,11 @@ dependencies = [ [[package]] name = "sc-executor" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "lazy_static", "lru", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parking_lot 0.12.1", "sc-executor-common", "sc-executor-wasmi", @@ -8741,7 +8809,6 @@ dependencies = [ "sp-io", "sp-panic-handler", "sp-runtime-interface", - "sp-tasks", "sp-trie", "sp-version", "sp-wasm-interface", @@ -8752,10 +8819,10 @@ dependencies = [ [[package]] name = "sc-executor-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "environmental", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sc-allocator", "sp-maybe-compressed-blob", "sp-sandbox", @@ -8768,10 +8835,10 @@ dependencies = [ [[package]] name = "sc-executor-wasmi" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sc-allocator", "sc-executor-common", "sp-runtime-interface", @@ -8783,15 +8850,15 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "cfg-if", "libc", "log", "once_cell", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parity-wasm", - "rustix", + "rustix 0.35.13", "sc-allocator", "sc-executor-common", "sp-runtime-interface", @@ -8803,7 +8870,7 @@ dependencies = [ [[package]] name = "sc-finality-grandpa" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "ahash", "array-bytes", @@ -8814,7 +8881,7 @@ dependencies = [ "futures", "futures-timer", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parking_lot 0.12.1", "rand 0.8.5", "sc-block-builder", @@ -8844,13 +8911,13 @@ dependencies = [ [[package]] name = "sc-finality-grandpa-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "finality-grandpa", "futures", "jsonrpsee", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sc-client-api", "sc-finality-grandpa", "sc-rpc", @@ -8865,7 +8932,7 @@ dependencies = [ [[package]] name = "sc-informant" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "ansi_term", "futures", @@ -8882,7 +8949,7 @@ dependencies = [ [[package]] name = "sc-keystore" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "array-bytes", "async-trait", @@ -8897,7 +8964,7 @@ dependencies = [ [[package]] name = "sc-network" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "array-bytes", "async-trait", @@ -8916,7 +8983,7 @@ dependencies = [ "linked_hash_set", "log", "lru", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parking_lot 0.12.1", "pin-project", "prost", @@ -8944,7 +9011,7 @@ dependencies = [ [[package]] name = "sc-network-bitswap" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "cid", "futures", @@ -8964,7 +9031,7 @@ dependencies = [ [[package]] name = "sc-network-common" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "async-trait", "bitflags", @@ -8973,7 +9040,7 @@ dependencies = [ "futures-timer", "libp2p", "linked_hash_set", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "prost-build", "sc-consensus", "sc-peerset", @@ -8990,7 +9057,7 @@ dependencies = [ [[package]] name = "sc-network-gossip" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "ahash", "futures", @@ -9008,13 +9075,13 @@ dependencies = [ [[package]] name = "sc-network-light" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "array-bytes", "futures", "libp2p", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "prost", "prost-build", "sc-client-api", @@ -9029,7 +9096,7 @@ dependencies = [ [[package]] name = "sc-network-sync" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "array-bytes", "fork-tree", @@ -9038,7 +9105,7 @@ dependencies = [ "log", "lru", "mockall", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "prost", "prost-build", "sc-client-api", @@ -9059,14 +9126,14 @@ dependencies = [ [[package]] name = "sc-network-transactions" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "array-bytes", "futures", "hex", "libp2p", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "pin-project", "sc-network-common", "sc-peerset", @@ -9078,7 +9145,7 @@ dependencies = [ [[package]] name = "sc-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "array-bytes", "bytes", @@ -9090,7 +9157,7 @@ dependencies = [ "libp2p", "num_cpus", "once_cell", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parking_lot 0.12.1", "rand 0.7.3", "sc-client-api", @@ -9108,7 +9175,7 @@ dependencies = [ [[package]] name = "sc-peerset" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "futures", "libp2p", @@ -9121,7 +9188,7 @@ dependencies = [ [[package]] name = "sc-proposer-metrics" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "log", "substrate-prometheus-endpoint", @@ -9130,13 +9197,13 @@ dependencies = [ [[package]] name = "sc-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "futures", "hash-db", "jsonrpsee", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parking_lot 0.12.1", "sc-block-builder", "sc-chain-spec", @@ -9160,12 +9227,12 @@ dependencies = [ [[package]] name = "sc-rpc-api" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "futures", "jsonrpsee", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parking_lot 0.12.1", "sc-chain-spec", "sc-transaction-pool-api", @@ -9183,7 +9250,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "futures", "jsonrpsee", @@ -9196,12 +9263,12 @@ dependencies = [ [[package]] name = "sc-rpc-spec-v2" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "futures", "hex", "jsonrpsee", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sc-chain-spec", "sc-transaction-pool-api", "serde", @@ -9215,7 +9282,7 @@ dependencies = [ [[package]] name = "sc-service" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "async-trait", "directories", @@ -9225,7 +9292,7 @@ dependencies = [ "hash-db", "jsonrpsee", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parity-util-mem", "parking_lot 0.12.1", "pin-project", @@ -9286,10 +9353,10 @@ dependencies = [ [[package]] name = "sc-state-db" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parity-util-mem", "parity-util-mem-derive", "parking_lot 0.12.1", @@ -9300,10 +9367,10 @@ dependencies = [ [[package]] name = "sc-sync-state-rpc" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "jsonrpsee", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sc-chain-spec", "sc-client-api", "sc-consensus-babe", @@ -9319,7 +9386,7 @@ dependencies = [ [[package]] name = "sc-sysinfo" version = "6.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "futures", "libc", @@ -9338,7 +9405,7 @@ dependencies = [ [[package]] name = "sc-telemetry" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "chrono", "futures", @@ -9356,7 +9423,7 @@ dependencies = [ [[package]] name = "sc-tracing" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "ansi_term", "atty", @@ -9387,7 +9454,7 @@ dependencies = [ [[package]] name = "sc-tracing-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -9398,14 +9465,14 @@ dependencies = [ [[package]] name = "sc-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "async-trait", "futures", "futures-timer", "linked-hash-map", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parity-util-mem", "parking_lot 0.12.1", "sc-client-api", @@ -9425,7 +9492,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "async-trait", "futures", @@ -9439,7 +9506,7 @@ dependencies = [ [[package]] name = "sc-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "futures", "futures-timer", @@ -9451,23 +9518,23 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.1.2" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c46be926081c9f4dd5dd9b6f1d3e3229f2360bc6502dd8836f84a93b7c75e99a" +checksum = "88d8a765117b237ef233705cc2cc4c6a27fccd46eea6ef0c8c6dae5f3ef407f8" dependencies = [ "bitvec 1.0.1", "cfg-if", "derive_more", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info-derive", "serde", ] [[package]] name = "scale-info-derive" -version = "2.1.2" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50e334bb10a245e28e5fd755cabcafd96cfcd167c99ae63a46924ca8d8703a3c" +checksum = "cdcd47b380d8c4541044e341dcd9475f55ba37ddc50c908d945fc036a8642496" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -9477,12 +9544,12 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.19" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" dependencies = [ "lazy_static", - "winapi", + "windows-sys 0.36.1", ] [[package]] @@ -9533,7 +9600,7 @@ checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" dependencies = [ "base16ct", "der", - "generic-array 0.14.4", + "generic-array 0.14.6", "pkcs8", "subtle", "zeroize", @@ -9541,18 +9608,18 @@ dependencies = [ [[package]] name = "secp256k1" -version = "0.24.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7649a0b3ffb32636e60c7ce0d70511eda9c52c658cd0634e194d5a19943aeff" +checksum = "ff55dc09d460954e9ef2fa8a7ced735a964be9981fd50e870b2b3b0705e14964" dependencies = [ "secp256k1-sys", ] [[package]] name = "secp256k1-sys" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7058dc8eaf3f2810d7828680320acda0b25a288f6d288e19278e249bbf74226b" +checksum = "83080e2c2fc1006e625be82e5d1eb6a43b7fd9578b617fcc55814daf286bba4b" dependencies = [ "cc", ] @@ -9568,9 +9635,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.4.2" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525bc1abfda2e1998d152c45cf13e696f76d0a4972310b22fac1658b05df7c87" +checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c" dependencies = [ "bitflags", "core-foundation", @@ -9581,9 +9648,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.4.2" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9dd14d83160b528b7bfd66439110573efcfbe281b17fc2ca9f39f550d619c7e" +checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" dependencies = [ "core-foundation-sys", "libc", @@ -9600,9 +9667,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.4" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "568a8e6258aa33c13358f81fd834adb854c6f7c9468520910a9b1e8fac068012" +checksum = "e25dfac463d778e353db5be2449d1cce89bd6fd23c9f1ea21310ce6e5a1b29c4" dependencies = [ "serde", ] @@ -9621,18 +9688,18 @@ checksum = "f97841a747eef040fcd2e7b3b9a220a7205926e60488e673d9e4926d27772ce5" [[package]] name = "serde" -version = "1.0.137" +version = "1.0.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1" +checksum = "d193d69bae983fc11a79df82342761dfbf28a99fc8d203dca4c3c1b590948965" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.137" +version = "1.0.147" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f26faba0c3959972377d3b2d306ee9f71faee9714294e41bb777f83f88578be" +checksum = "4f1d362ca8fc9c3e3a7484440752472d68a6caa98f1ab81d99b5dfe517cec852" dependencies = [ "proc-macro2", "quote", @@ -9650,9 +9717,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.85" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e55a28e3aaef9d5ce0506d0a14dbba8054ddc7e499ef522dd8b26859ec9d4a44" +checksum = "6ce777b7b150d76b9cf60d28b55f5847135a003f7d7350c6be7a773508ce7d45" dependencies = [ "itoa", "ryu", @@ -9680,18 +9747,6 @@ dependencies = [ "serde", ] -[[package]] -name = "sha-1" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7d94d0bede923b3cea61f3f1ff57ff8cdfd77b400fb8f9998949e0cf04163df" -dependencies = [ - "block-buffer 0.7.3", - "digest 0.8.1", - "fake-simd", - "opaque-debug 0.2.3", -] - [[package]] name = "sha-1" version = "0.9.8" @@ -9713,7 +9768,18 @@ checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.3", + "digest 0.10.5", +] + +[[package]] +name = "sha1" +version = "0.10.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.5", ] [[package]] @@ -9730,9 +9796,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.9.8" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b69f9a4c9740d74c5baa3fd2e547f9525fa8088a8a958e0ca2409a514e33f5fa" +checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ "block-buffer 0.9.0", "cfg-if", @@ -9743,22 +9809,22 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.2" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55deaec60f81eefe3cce0dc50bda92d6d8e88f2a27df7c5033b42afeb1ed2676" +checksum = "82e6b795fe2e3b1e845bafcb27aa35405c4d47cdfc92af5fc8d3002f76cebdc0" dependencies = [ "cfg-if", "cpufeatures", - "digest 0.10.3", + "digest 0.10.5", ] [[package]] name = "sha3" -version = "0.10.0" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31f935e31cf406e8c0e96c2815a5516181b7004ae8c5f296293221e9b1e356bd" +checksum = "bdf0c33fae925bdc080598b84bc15c55e7b9a4a43b3c704da051f977469691c9" dependencies = [ - "digest 0.10.3", + "digest 0.10.5", "keccak", ] @@ -9814,8 +9880,8 @@ version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ - "digest 0.10.3", - "rand_core 0.6.3", + "digest 0.10.5", + "rand_core 0.6.4", ] [[package]] @@ -9832,9 +9898,12 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.5" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" +checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" +dependencies = [ + "autocfg", +] [[package]] name = "slice-group-by" @@ -9847,7 +9916,7 @@ name = "slot-range-helper" version = "0.9.31" dependencies = [ "enumn", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "paste", "sp-runtime", "sp-std", @@ -9864,9 +9933,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.8.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" +checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "snap" @@ -9884,18 +9953,18 @@ dependencies = [ "blake2", "chacha20poly1305", "curve25519-dalek 4.0.0-pre.1", - "rand_core 0.6.3", + "rand_core 0.6.4", "ring", "rustc_version", - "sha2 0.10.2", + "sha2 0.10.6", "subtle", ] [[package]] name = "socket2" -version = "0.4.4" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" +checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" dependencies = [ "libc", "winapi", @@ -9920,11 +9989,11 @@ dependencies = [ [[package]] name = "sp-api" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "hash-db", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sp-api-proc-macro", "sp-core", "sp-runtime", @@ -9938,7 +10007,7 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "blake2", "proc-macro-crate", @@ -9950,9 +10019,9 @@ dependencies = [ [[package]] name = "sp-application-crypto" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "serde", "sp-core", @@ -9963,11 +10032,11 @@ dependencies = [ [[package]] name = "sp-arithmetic" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "integer-sqrt", "num-traits", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "serde", "sp-debug-derive", @@ -9978,9 +10047,9 @@ dependencies = [ [[package]] name = "sp-authority-discovery" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-api", "sp-application-crypto", @@ -9991,10 +10060,10 @@ dependencies = [ [[package]] name = "sp-authorship" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "async-trait", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sp-inherents", "sp-runtime", "sp-std", @@ -10003,9 +10072,9 @@ dependencies = [ [[package]] name = "sp-block-builder" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sp-api", "sp-inherents", "sp-runtime", @@ -10015,12 +10084,12 @@ dependencies = [ [[package]] name = "sp-blockchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "futures", "log", "lru", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parking_lot 0.12.1", "sp-api", "sp-consensus", @@ -10033,13 +10102,13 @@ dependencies = [ [[package]] name = "sp-consensus" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "async-trait", "futures", "futures-timer", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sp-core", "sp-inherents", "sp-runtime", @@ -10052,11 +10121,11 @@ dependencies = [ [[package]] name = "sp-consensus-babe" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "async-trait", "merlin", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "serde", "sp-api", @@ -10075,9 +10144,9 @@ dependencies = [ [[package]] name = "sp-consensus-slots" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "serde", "sp-arithmetic", @@ -10089,9 +10158,9 @@ dependencies = [ [[package]] name = "sp-consensus-vrf" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "schnorrkel", "sp-core", @@ -10102,7 +10171,7 @@ dependencies = [ [[package]] name = "sp-core" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "array-bytes", "base58", @@ -10120,8 +10189,7 @@ dependencies = [ "log", "merlin", "num-traits", - "parity-scale-codec 3.1.5", - "parity-util-mem", + "parity-scale-codec 3.2.1", "parking_lot 0.12.1", "primitive-types", "rand 0.7.3", @@ -10148,12 +10216,12 @@ dependencies = [ [[package]] name = "sp-core-hashing" version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "blake2", "byteorder", - "digest 0.10.3", - "sha2 0.10.2", + "digest 0.10.5", + "sha2 0.10.6", "sha3", "sp-std", "twox-hash", @@ -10162,7 +10230,7 @@ dependencies = [ [[package]] name = "sp-core-hashing-proc-macro" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "proc-macro2", "quote", @@ -10173,7 +10241,7 @@ dependencies = [ [[package]] name = "sp-database" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "kvdb", "parking_lot 0.12.1", @@ -10182,7 +10250,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "proc-macro2", "quote", @@ -10192,10 +10260,10 @@ dependencies = [ [[package]] name = "sp-externalities" version = "0.12.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "environmental", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sp-std", "sp-storage", ] @@ -10203,11 +10271,11 @@ dependencies = [ [[package]] name = "sp-finality-grandpa" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "finality-grandpa", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "serde", "sp-api", @@ -10221,11 +10289,11 @@ dependencies = [ [[package]] name = "sp-inherents" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "async-trait", "impl-trait-for-tuples", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sp-core", "sp-runtime", "sp-std", @@ -10235,14 +10303,14 @@ dependencies = [ [[package]] name = "sp-io" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "bytes", "futures", "hash-db", "libsecp256k1", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parking_lot 0.12.1", "secp256k1", "sp-core", @@ -10261,7 +10329,7 @@ dependencies = [ [[package]] name = "sp-keyring" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "lazy_static", "sp-core", @@ -10272,12 +10340,12 @@ dependencies = [ [[package]] name = "sp-keystore" version = "0.12.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "async-trait", "futures", "merlin", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parking_lot 0.12.1", "schnorrkel", "serde", @@ -10289,7 +10357,7 @@ dependencies = [ [[package]] name = "sp-maybe-compressed-blob" version = "4.1.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "thiserror", "zstd", @@ -10298,10 +10366,10 @@ dependencies = [ [[package]] name = "sp-mmr-primitives" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "serde", "sp-api", @@ -10315,9 +10383,9 @@ dependencies = [ [[package]] name = "sp-npos-elections" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "serde", "sp-arithmetic", @@ -10329,7 +10397,7 @@ dependencies = [ [[package]] name = "sp-offchain" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "sp-api", "sp-core", @@ -10339,7 +10407,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "backtrace", "lazy_static", @@ -10349,7 +10417,7 @@ dependencies = [ [[package]] name = "sp-rpc" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "rustc-hash", "serde", @@ -10359,13 +10427,13 @@ dependencies = [ [[package]] name = "sp-runtime" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "either", "hash256-std-hasher", "impl-trait-for-tuples", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parity-util-mem", "paste", "rand 0.7.3", @@ -10382,11 +10450,11 @@ dependencies = [ [[package]] name = "sp-runtime-interface" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "bytes", "impl-trait-for-tuples", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "primitive-types", "sp-externalities", "sp-runtime-interface-proc-macro", @@ -10400,7 +10468,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "Inflector", "proc-macro-crate", @@ -10412,10 +10480,10 @@ dependencies = [ [[package]] name = "sp-sandbox" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sp-core", "sp-io", "sp-std", @@ -10426,9 +10494,9 @@ dependencies = [ [[package]] name = "sp-session" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-api", "sp-core", @@ -10440,9 +10508,9 @@ dependencies = [ [[package]] name = "sp-staking" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-runtime", "sp-std", @@ -10451,12 +10519,12 @@ dependencies = [ [[package]] name = "sp-state-machine" version = "0.12.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "hash-db", "log", "num-traits", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parking_lot 0.12.1", "rand 0.7.3", "smallvec", @@ -10473,43 +10541,30 @@ dependencies = [ [[package]] name = "sp-std" version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" [[package]] name = "sp-storage" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "impl-serde", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "ref-cast", "serde", "sp-debug-derive", "sp-std", ] -[[package]] -name = "sp-tasks" -version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" -dependencies = [ - "log", - "sp-core", - "sp-externalities", - "sp-io", - "sp-runtime-interface", - "sp-std", -] - [[package]] name = "sp-timestamp" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "async-trait", "futures-timer", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sp-api", "sp-inherents", "sp-runtime", @@ -10520,9 +10575,9 @@ dependencies = [ [[package]] name = "sp-tracing" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sp-std", "tracing", "tracing-core", @@ -10532,7 +10587,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "sp-api", "sp-runtime", @@ -10541,11 +10596,11 @@ dependencies = [ [[package]] name = "sp-transaction-storage-proof" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "async-trait", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-core", "sp-inherents", @@ -10557,7 +10612,7 @@ dependencies = [ [[package]] name = "sp-trie" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "ahash", "hash-db", @@ -10566,7 +10621,7 @@ dependencies = [ "lru", "memory-db", "nohash-hasher", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parking_lot 0.12.1", "scale-info", "sp-core", @@ -10580,10 +10635,10 @@ dependencies = [ [[package]] name = "sp-version" version = "5.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "impl-serde", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "parity-wasm", "scale-info", "serde", @@ -10597,9 +10652,9 @@ dependencies = [ [[package]] name = "sp-version-proc-macro" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "proc-macro2", "quote", "syn", @@ -10608,11 +10663,11 @@ dependencies = [ [[package]] name = "sp-wasm-interface" version = "6.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "impl-trait-for-tuples", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sp-std", "wasmi", "wasmtime", @@ -10621,10 +10676,10 @@ dependencies = [ [[package]] name = "sp-weights" version = "4.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "impl-trait-for-tuples", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "serde", "smallvec", @@ -10652,9 +10707,9 @@ dependencies = [ [[package]] name = "ss58-registry" -version = "1.29.0" +version = "1.34.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0837b5d62f42082c9d56cd946495ae273a3c68083b637b9153341d5e465146d" +checksum = "37a9821878e1f13aba383aa40a86fb1b33c7265774ec91e32563cb1dd1577496" dependencies = [ "Inflector", "num-format", @@ -10689,7 +10744,7 @@ dependencies = [ "pallet-election-provider-multi-phase", "pallet-staking", "pallet-transaction-payment", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "paste", "polkadot-core-primitives", "polkadot-runtime", @@ -10708,7 +10763,7 @@ dependencies = [ "sub-tokens", "thiserror", "tokio", - "tracing-subscriber 0.3.11", + "tracing-subscriber 0.3.16", "westend-runtime", ] @@ -10801,9 +10856,9 @@ dependencies = [ [[package]] name = "strum_macros" -version = "0.24.0" +version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6878079b17446e4d3eba6192bb0a2950d5b14f0ed8424b852310e5a94345d0ef" +checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ "heck", "proc-macro2", @@ -10829,14 +10884,14 @@ dependencies = [ "hmac 0.11.0", "pbkdf2 0.8.0", "schnorrkel", - "sha2 0.9.8", + "sha2 0.9.9", "zeroize", ] [[package]] name = "substrate-build-script-utils" version = "3.0.0" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "platforms", ] @@ -10844,13 +10899,13 @@ dependencies = [ [[package]] name = "substrate-frame-rpc-system" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "frame-system-rpc-runtime-api", "futures", "jsonrpsee", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sc-client-api", "sc-rpc-api", "sc-transaction-pool-api", @@ -10865,7 +10920,7 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "futures-util", "hyper", @@ -10878,7 +10933,7 @@ dependencies = [ [[package]] name = "substrate-rpc-client" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "async-trait", "jsonrpsee", @@ -10891,11 +10946,11 @@ dependencies = [ [[package]] name = "substrate-state-trie-migration-rpc" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "jsonrpsee", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sc-client-api", "sc-rpc-api", "scale-info", @@ -10912,12 +10967,12 @@ dependencies = [ [[package]] name = "substrate-test-client" version = "2.0.1" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "array-bytes", "async-trait", "futures", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sc-client-api", "sc-client-db", "sc-consensus", @@ -10938,7 +10993,7 @@ dependencies = [ [[package]] name = "substrate-test-utils" version = "4.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "futures", "substrate-test-utils-derive", @@ -10948,7 +11003,7 @@ dependencies = [ [[package]] name = "substrate-test-utils-derive" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -10959,7 +11014,7 @@ dependencies = [ [[package]] name = "substrate-wasm-builder" version = "5.0.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "ansi_term", "build-helper", @@ -10990,9 +11045,9 @@ dependencies = [ [[package]] name = "symbolic-common" -version = "8.6.0" +version = "8.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e92a52f07eed9afba3d6f883652cde7cd75fcf327dd44e84f210958379158737" +checksum = "f551f902d5642e58039aee6a9021a61037926af96e071816361644983966f540" dependencies = [ "debugid", "memmap2", @@ -11002,9 +11057,9 @@ dependencies = [ [[package]] name = "symbolic-demangle" -version = "8.6.0" +version = "8.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9abc81544d9964975269165bfe5ad198d8b9e2e809c46527323f95588a57693" +checksum = "4564ca7b4e6eb14105aa8bbbce26e080f6b5d9c4373e67167ab31f7b86443750" dependencies = [ "cpp_demangle", "rustc-demangle", @@ -11013,9 +11068,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.98" +version = "1.0.103" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c50aef8a904de4c23c788f104b7dddc7d6f79c647c7c8ce4cc8f73eb0ca773dd" +checksum = "a864042229133ada95abf3b54fdc62ef5ccabe9515b64717bcb9a1919e59445d" dependencies = [ "proc-macro2", "quote", @@ -11063,9 +11118,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "target-lexicon" -version = "0.12.2" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9bffcddbc2458fa3e6058414599e3c838a022abae82e5c67b4f7f80298d5bff" +checksum = "9410d0f6853b1d94f0e519fb95df60f29d2c1eff2d921ffdf01a4c8a3b54f12d" [[package]] name = "tempfile" @@ -11083,25 +11138,25 @@ dependencies = [ [[package]] name = "termcolor" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dfed899f0eb03f32ee8c6a0aabdb8a7949659e3466561fc0adf54e26d88c5f4" +checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" dependencies = [ "winapi-util", ] [[package]] name = "termtree" -version = "0.2.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13a4ec180a2de59b57434704ccfad967f789b12737738798fa08798cd5824c16" +checksum = "95059e91184749cb66be6dc994f67f182b6d897cb3df74a5bf66b5e709295fd8" [[package]] name = "test-parachain-adder" version = "0.9.31" dependencies = [ "dlmalloc", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-parachain", "sp-io", "sp-std", @@ -11117,7 +11172,7 @@ dependencies = [ "futures", "futures-timer", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-cli", "polkadot-node-core-pvf", "polkadot-node-primitives", @@ -11148,7 +11203,7 @@ version = "0.9.31" dependencies = [ "dlmalloc", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-parachain", "sp-io", "sp-std", @@ -11164,7 +11219,7 @@ dependencies = [ "futures", "futures-timer", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-cli", "polkadot-node-core-pvf", "polkadot-node-primitives", @@ -11186,7 +11241,7 @@ dependencies = [ name = "test-parachains" version = "0.9.31" dependencies = [ - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sp-core", "test-parachain-adder", "test-parachain-halt", @@ -11316,7 +11371,7 @@ dependencies = [ "pbkdf2 0.4.0", "rand 0.7.3", "rustc-hash", - "sha2 0.9.8", + "sha2 0.9.9", "thiserror", "unicode-normalization", "wasm-bindgen", @@ -11334,9 +11389,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c1c1d5a42b6245520c249549ec267180beaffcc0615401ac8e31853d4b6d8d2" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ "tinyvec_macros", ] @@ -11349,18 +11404,18 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.19.2" +version = "1.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c51a52ed6686dd62c320f9b89299e9dfb46f730c7a48e635c19f21d116cb1439" +checksum = "a9e03c497dc955702ba729190dc4aac6f2a0ce97f913e5b1b5912fc5039d9099" dependencies = [ + "autocfg", "bytes", "libc", "memchr", "mio", "num_cpus", - "once_cell", "parking_lot 0.12.1", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.9", "signal-hook-registry", "socket2", "tokio-macros", @@ -11369,9 +11424,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" +checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" dependencies = [ "proc-macro2", "quote", @@ -11390,9 +11445,9 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.23.2" +version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a27d5f2b839802bd8267fa19b0530f5a08b9c08cd417976be2a65d130fe1c11b" +checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ "rustls", "tokio", @@ -11401,20 +11456,20 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.9" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df54d54117d6fdc4e4fea40fe1e4e566b3505700e148a6827e59b34b0d2600d9" +checksum = "d660770404473ccd7bc9f8b28494a811bc18542b915c0855c51e8f419d5223ce" dependencies = [ "futures-core", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.9", "tokio", ] [[package]] name = "tokio-tungstenite" -version = "0.17.1" +version = "0.17.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06cda1232a49558c46f8a504d5b93101d42c0bf7f911f12a105ba48168f821ae" +checksum = "f714dd15bead90401d77e04243611caec13726c2408afd5b31901dfcdcb3b181" dependencies = [ "futures-util", "log", @@ -11424,64 +11479,51 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.6.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e99e1983e5d376cd8eb4b66604d2e99e79f5bd988c3055891dcd8c9e2604cc0" -dependencies = [ - "bytes", - "futures-core", - "futures-sink", - "log", - "pin-project-lite 0.2.7", - "tokio", -] - -[[package]] -name = "tokio-util" -version = "0.7.1" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0edfdeb067411dba2044da6d1cb2df793dd35add7888d73c16e3381ded401764" +checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" dependencies = [ "bytes", "futures-core", "futures-io", "futures-sink", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.9", "tokio", + "tracing", ] [[package]] name = "toml" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" dependencies = [ "serde", ] [[package]] name = "tower-service" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.35" +version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a400e31aa60b9d44a52a8ee0343b5b18566b03a8321e0d321f695cf56e940160" +checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ "cfg-if", - "pin-project-lite 0.2.7", + "pin-project-lite 0.2.9", "tracing-attributes", "tracing-core", ] [[package]] name = "tracing-attributes" -version = "0.1.20" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e65ce065b4b5c53e73bb28912318cb8c9e9ad3921f1d669eb0e68b4c8143a2b" +checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ "proc-macro2", "quote", @@ -11490,9 +11532,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.28" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b7358be39f2f274f322d2aaed611acc57f382e8eb1e5b48cb9ae30933495ce7" +checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" dependencies = [ "once_cell", "valuable", @@ -11543,9 +11585,9 @@ dependencies = [ [[package]] name = "tracing-serde" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb65ea441fbb84f9f6748fd496cf7f63ec9af5bca94dd86456978d055e8eb28b" +checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" dependencies = [ "serde", "tracing-core", @@ -11576,13 +11618,13 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.11" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bc28f93baff38037f64e6f43d34cfa1605f27a49c34e8a04c5e78b0babf2596" +checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" dependencies = [ - "ansi_term", - "lazy_static", "matchers 0.1.0", + "nu-ansi-term", + "once_cell", "regex", "sharded-slab", "smallvec", @@ -11627,7 +11669,7 @@ dependencies = [ "futures-channel", "futures-io", "futures-util", - "idna", + "idna 0.2.3", "ipnet", "lazy_static", "rand 0.8.5", @@ -11666,12 +11708,12 @@ checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" [[package]] name = "try-runtime-cli" version = "0.10.0-dev" -source = "git+https://github.com/paritytech/substrate?branch=master#a6da808575fe403ab2bd7f6cd009896cdc6fd71a" +source = "git+https://github.com/paritytech/substrate?branch=master#e6768a3bd553ddbed12fe1a0e4a2ef8d4f8fdf52" dependencies = [ "clap", "frame-try-runtime", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "remote-externalities", "sc-chain-spec", "sc-cli", @@ -11692,9 +11734,9 @@ dependencies = [ [[package]] name = "trybuild" -version = "1.0.61" +version = "1.0.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fc92f558afb6d1d7c6f175eb8d615b8ef49c227543e68e19c123d4ee43d8a7d" +checksum = "ea496675d71016e9bc76aa42d87f16aefd95447cc5818e671e12b2d7e269075d" dependencies = [ "dissimilar", "glob", @@ -11714,9 +11756,9 @@ checksum = "5e66dcbec4290c69dd03c57e76c2469ea5c7ce109c6dd4351c13055cf71ea055" [[package]] name = "tungstenite" -version = "0.17.2" +version = "0.17.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d96a2dea40e7570482f28eb57afbe42d97551905da6a9400acc5c328d24004f5" +checksum = "e27992fd6a8c29ee7eef28fc78349aa244134e10ad447ce3b9f0ac0ed0fa4ce0" dependencies = [ "base64", "byteorder", @@ -11738,28 +11780,28 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ "cfg-if", - "digest 0.10.3", + "digest 0.10.5", "rand 0.8.5", "static_assertions", ] [[package]] name = "typenum" -version = "1.14.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b63708a265f51345575b27fe43f9500ad611579e764c79edbc2037b1121959ec" +checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" [[package]] name = "ucd-trie" -version = "0.1.3" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56dee185309b50d1f11bfedef0fe6d036842e3fb77413abef29f8f8d1c5d4c1c" +checksum = "9e79c4d996edb816c91e4308506774452e55e95c3c9de07b6729e17e15a5ef81" [[package]] name = "uint" -version = "0.9.1" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6470ab50f482bde894a037a57064480a246dbfdd5960bd65a44824693f08da5f" +checksum = "a45526d29728d135c2900b0d30573fe3ee79fceb12ef534c7bb30e810a91b601" dependencies = [ "byteorder", "crunchy", @@ -11778,36 +11820,36 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a01404663e3db436ed2746d9fefef640d868edae3cceb81c3b8d5732fda678f" +checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" [[package]] name = "unicode-ident" -version = "1.0.0" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d22af068fba1eb5edcb4aea19d382b2a3deb4c8f9d475c589b6ada9e0fd493ee" +checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" [[package]] name = "unicode-normalization" -version = "0.1.19" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] [[package]] name = "unicode-width" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ed742d4ea2bd1176e236172c8429aaf54486e7ac098db29ffe6529e0ce50973" +checksum = "c0edd1e5b14653f783770bce4a4dabb4a5108a5370a5f5d8cfe8710c361f6c8b" [[package]] name = "unicode-xid" -version = "0.2.2" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" +checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" [[package]] name = "universal-hash" @@ -11815,7 +11857,7 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9f214e8f697e925001e66ec2c6e37a4ef93f0f78c2eed7814394e10c62025b05" dependencies = [ - "generic-array 0.14.4", + "generic-array 0.14.6", "subtle", ] @@ -11839,13 +11881,12 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.2.2" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" +checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" dependencies = [ "form_urlencoded", - "idna", - "matches", + "idna 0.3.0", "percent-encoding", ] @@ -11889,9 +11930,9 @@ checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "version_check" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "void" @@ -11955,9 +11996,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.78" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "632f73e236b219150ea279196e54e610f5dbafa5d61786303d4da54f84e47fce" +checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" dependencies = [ "cfg-if", "serde", @@ -11967,13 +12008,13 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.78" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a317bf8f9fba2476b4b2c85ef4c4af8ff39c3c7f0cdfeed4f82c34a880aa837b" +checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" dependencies = [ "bumpalo", - "lazy_static", "log", + "once_cell", "proc-macro2", "quote", "syn", @@ -11982,9 +12023,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.28" +version = "0.4.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e8d7523cb1f2a4c96c1317ca690031b714a51cc14e05f712446691f413f5d39" +checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d" dependencies = [ "cfg-if", "js-sys", @@ -11994,9 +12035,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.78" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d56146e7c495528bf6587663bea13a8eb588d39b36b679d83972e1a2dbbdacf9" +checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -12004,9 +12045,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.78" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7803e0eea25835f8abdc585cd3021b3deb11543c6fe226dcd30b228857c5c5ab" +checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" dependencies = [ "proc-macro2", "quote", @@ -12017,9 +12058,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.78" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0237232789cf037d5480773fe568aac745bfe2afbc11a863e97901780a6b47cc" +checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" [[package]] name = "wasm-instrument" @@ -12088,9 +12129,9 @@ dependencies = [ [[package]] name = "wasmi" -version = "0.13.0" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc13b3c219ca9aafeec59150d80d89851df02e0061bc357b4d66fc55a8d38787" +checksum = "06c326c93fbf86419608361a2c925a31754cf109da1b8b55737070b4d6669422" dependencies = [ "parity-wasm", "wasmi-validation", @@ -12108,9 +12149,9 @@ dependencies = [ [[package]] name = "wasmi_core" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a088e8c4c59c6f2b9eae169bf86328adccc477c00b56d3661e3e9fb397b184" +checksum = "57d20cb3c59b788653d99541c646c561c9dd26506f25c0cebfe810659c54c6d7" dependencies = [ "downcast-rs", "libm", @@ -12130,9 +12171,9 @@ dependencies = [ [[package]] name = "wasmtime" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a10dc9784d8c3a33c970e3939180424955f08af2e7f20368ec02685a0e8f065" +checksum = "4ad5af6ba38311282f2a21670d96e78266e8c8e2f38cbcd52c254df6ccbc7731" dependencies = [ "anyhow", "bincode", @@ -12140,7 +12181,7 @@ dependencies = [ "indexmap", "libc", "log", - "object 0.29.0", + "object", "once_cell", "paste", "psm", @@ -12158,18 +12199,18 @@ dependencies = [ [[package]] name = "wasmtime-asm-macros" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee4dbdc6daf68528cad1275ac91e3f51848ce9824385facc94c759f529decdf8" +checksum = "45de63ddfc8b9223d1adc8f7b2ee5f35d1f6d112833934ad7ea66e4f4339e597" dependencies = [ "cfg-if", ] [[package]] name = "wasmtime-cache" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f507f3fa1ee1b2f9a83644e2514242b1dfe580782c0eb042f1ef70255bc4ffe" +checksum = "bcd849399d17d2270141cfe47fa0d91ee52d5f8ea9b98cf7ddde0d53e5f79882" dependencies = [ "anyhow", "base64", @@ -12177,9 +12218,9 @@ dependencies = [ "directories-next", "file-per-thread-logger", "log", - "rustix", + "rustix 0.35.13", "serde", - "sha2 0.9.8", + "sha2 0.9.9", "toml", "windows-sys 0.36.1", "zstd", @@ -12187,9 +12228,9 @@ dependencies = [ [[package]] name = "wasmtime-cranelift" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f03cf79d982fc68e94ba0bea6a300a3b94621c4eb9705eece0a4f06b235a3b5" +checksum = "4bd91339b742ff20bfed4532a27b73c86b5bcbfedd6bea2dcdf2d64471e1b5c6" dependencies = [ "anyhow", "cranelift-codegen", @@ -12199,7 +12240,7 @@ dependencies = [ "cranelift-wasm", "gimli", "log", - "object 0.29.0", + "object", "target-lexicon", "thiserror", "wasmparser", @@ -12208,16 +12249,16 @@ dependencies = [ [[package]] name = "wasmtime-environ" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c587c62e91c5499df62012b87b88890d0eb470b2ffecc5964e9da967b70c77c" +checksum = "ebb881c61f4f627b5d45c54e629724974f8a8890d455bcbe634330cc27309644" dependencies = [ "anyhow", "cranelift-entity", "gimli", "indexmap", "log", - "object 0.29.0", + "object", "serde", "target-lexicon", "thiserror", @@ -12227,9 +12268,9 @@ dependencies = [ [[package]] name = "wasmtime-jit" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "047839b5dabeae5424a078c19b8cc897e5943a7fadc69e3d888b9c9a897666b3" +checksum = "1985c628011fe26adf5e23a5301bdc79b245e0e338f14bb58b39e4e25e4d8681" dependencies = [ "addr2line", "anyhow", @@ -12238,9 +12279,9 @@ dependencies = [ "cpp_demangle", "gimli", "log", - "object 0.29.0", + "object", "rustc-demangle", - "rustix", + "rustix 0.35.13", "serde", "target-lexicon", "thiserror", @@ -12252,20 +12293,20 @@ dependencies = [ [[package]] name = "wasmtime-jit-debug" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b299569abf6f99b7b8e020afaf84a700e8636c6a42e242069267322cd5818235" +checksum = "f671b588486f5ccec8c5a3dba6b4c07eac2e66ab8c60e6f4e53717c77f709731" dependencies = [ - "object 0.29.0", + "object", "once_cell", - "rustix", + "rustix 0.35.13", ] [[package]] name = "wasmtime-runtime" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae79e0515160bd5abee5df50a16c4eb8db9f71b530fc988ae1d9ce34dcb8dd01" +checksum = "ee8f92ad4b61736339c29361da85769ebc200f184361959d1792832e592a1afd" dependencies = [ "anyhow", "cc", @@ -12278,7 +12319,7 @@ dependencies = [ "memoffset", "paste", "rand 0.8.5", - "rustix", + "rustix 0.35.13", "thiserror", "wasmtime-asm-macros", "wasmtime-environ", @@ -12288,9 +12329,9 @@ dependencies = [ [[package]] name = "wasmtime-types" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "790cf43ee8e2d5dad1780af30f00d7a972b74725fb1e4f90c28d62733819b185" +checksum = "d23d61cb4c46e837b431196dd06abb11731541021916d03476a178b54dc07aeb" dependencies = [ "cranelift-entity", "serde", @@ -12300,9 +12341,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.55" +version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38eb105f1c59d9eaa6b5cdc92b859d85b926e82cb2e0945cd0c9259faa6fe9fb" +checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" dependencies = [ "js-sys", "wasm-bindgen", @@ -12320,9 +12361,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.22.2" +version = "0.22.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "552ceb903e957524388c4d3475725ff2c8b7960922063af6ce53c9a43da07449" +checksum = "368bfe657969fb01238bb756d351dcade285e0f6fcbd36dcb23359a5169975be" dependencies = [ "webpki", ] @@ -12392,7 +12433,7 @@ dependencies = [ "pallet-vesting", "pallet-xcm", "pallet-xcm-benchmarks", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-parachain", "polkadot-primitives", "polkadot-runtime-common", @@ -12445,13 +12486,13 @@ dependencies = [ [[package]] name = "which" -version = "4.2.2" +version = "4.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea187a8ef279bc014ec368c27a920da2024d2a711109bfbe3440585d5cf27ad9" +checksum = "1c831fbbee9e129a8cf93e7747a82da9d95ba8e16621cae60ec2cdc849bacb7b" dependencies = [ "either", - "lazy_static", "libc", + "once_cell", ] [[package]] @@ -12504,19 +12545,6 @@ dependencies = [ "windows_x86_64_msvc 0.34.0", ] -[[package]] -name = "windows-sys" -version = "0.32.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3df6e476185f92a12c072be4a189a0210dcdcf512a1891d6dff9edb874deadc6" -dependencies = [ - "windows_aarch64_msvc 0.32.0", - "windows_i686_gnu 0.32.0", - "windows_i686_msvc 0.32.0", - "windows_x86_64_gnu 0.32.0", - "windows_x86_64_msvc 0.32.0", -] - [[package]] name = "windows-sys" version = "0.36.1" @@ -12531,10 +12559,25 @@ dependencies = [ ] [[package]] -name = "windows_aarch64_msvc" -version = "0.32.0" +name = "windows-sys" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a3e1820f08b8513f676f7ab6c1f99ff312fb97b553d30ff4dd86f9f15728aa7" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc 0.42.0", + "windows_i686_gnu 0.42.0", + "windows_i686_msvc 0.42.0", + "windows_x86_64_gnu 0.42.0", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc 0.42.0", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8e92753b1c443191654ec532f14c199742964a061be25d77d7a96f09db20bf5" +checksum = "41d2aa71f6f0cbe00ae5167d90ef3cfe66527d6f613ca78ac8024c3ccab9a19e" [[package]] name = "windows_aarch64_msvc" @@ -12549,10 +12592,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" [[package]] -name = "windows_i686_gnu" -version = "0.32.0" +name = "windows_aarch64_msvc" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a711c68811799e017b6038e0922cb27a5e2f43a2ddb609fe0b6f3eeda9de615" +checksum = "dd0f252f5a35cac83d6311b2e795981f5ee6e67eb1f9a7f64eb4500fbc4dcdb4" [[package]] name = "windows_i686_gnu" @@ -12567,10 +12610,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" [[package]] -name = "windows_i686_msvc" -version = "0.32.0" +name = "windows_i686_gnu" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "146c11bb1a02615db74680b32a68e2d61f553cc24c4eb5b4ca10311740e44172" +checksum = "fbeae19f6716841636c28d695375df17562ca208b2b7d0dc47635a50ae6c5de7" [[package]] name = "windows_i686_msvc" @@ -12585,10 +12628,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" [[package]] -name = "windows_x86_64_gnu" -version = "0.32.0" +name = "windows_i686_msvc" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c912b12f7454c6620635bbff3450962753834be2a594819bd5e945af18ec64bc" +checksum = "84c12f65daa39dd2babe6e442988fc329d6243fdce47d7d2d155b8d874862246" [[package]] name = "windows_x86_64_gnu" @@ -12603,10 +12646,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" [[package]] -name = "windows_x86_64_msvc" -version = "0.32.0" +name = "windows_x86_64_gnu" +version = "0.42.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf7b1b21b5362cbc318f686150e5bcea75ecedc74dd157d874d754a2ca44b0ed" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "504a2476202769977a040c6364301a3f65d0cc9e3fb08600b2bda150a0488316" +checksum = "09d525d2ba30eeb3297665bd434a54297e4170c7f1a44cad4ef58095b4cd2028" [[package]] name = "windows_x86_64_msvc" @@ -12621,13 +12670,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" [[package]] -name = "winreg" -version = "0.7.0" +name = "windows_x86_64_msvc" +version = "0.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0120db82e8a1e0b9fb3345a539c478767c0048d842860994d96113d5b667bd69" -dependencies = [ - "winapi", -] +checksum = "f40009d85759725a34da6d89a94e63d7bdc50a862acf0dbc7c8e488f1edcb6f5" [[package]] name = "winreg" @@ -12671,7 +12717,7 @@ dependencies = [ "derivative", "impl-trait-for-tuples", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "scale-info", "sp-runtime", "xcm-procedural", @@ -12687,7 +12733,7 @@ dependencies = [ "pallet-balances", "pallet-transaction-payment", "pallet-xcm", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-parachain", "polkadot-runtime-parachains", "scale-info", @@ -12708,7 +12754,7 @@ dependencies = [ "frame-support", "impl-trait-for-tuples", "log", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "sp-arithmetic", "sp-core", "sp-io", @@ -12752,7 +12798,7 @@ name = "xcm-simulator" version = "0.9.31" dependencies = [ "frame-support", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "paste", "polkadot-core-primitives", "polkadot-parachain", @@ -12771,7 +12817,7 @@ dependencies = [ "frame-system", "pallet-balances", "pallet-xcm", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-core-primitives", "polkadot-parachain", "polkadot-runtime-parachains", @@ -12795,7 +12841,7 @@ dependencies = [ "honggfuzz", "pallet-balances", "pallet-xcm", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "polkadot-core-primitives", "polkadot-parachain", "polkadot-runtime-parachains", @@ -12824,6 +12870,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "yansi" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" + [[package]] name = "zeroize" version = "1.5.7" @@ -12851,7 +12903,7 @@ version = "0.9.31" dependencies = [ "futures-util", "lazy_static", - "parity-scale-codec 3.1.5", + "parity-scale-codec 3.2.1", "reqwest", "serde", "serde_json", From 8b1eb1578195733e8f64fda33e66c6c7d4e17fc9 Mon Sep 17 00:00:00 2001 From: Chris Sosnin <48099298+slumber@users.noreply.github.com> Date: Sun, 13 Nov 2022 23:38:52 +0400 Subject: [PATCH 21/76] Implement `StagingValidityConstraints` Runtime API method (#6258) * Implement StagingValidityConstraints * spellcheck * fix ump params * Update hrmp comment * Introduce ump per candidate limit * hypothetical earliest block * refactor primitives usage * hypothetical earliest block number test * fix build --- .../src/fragment_tree.rs | 1 + .../src/inclusion_emulator/staging.rs | 45 +++++++++++++ primitives/src/vstaging/mod.rs | 19 ++++-- runtime/parachains/src/hrmp.rs | 36 +++++++++- runtime/parachains/src/paras/mod.rs | 2 + .../src/runtime_api_impl/vstaging.rs | 66 +++++++++++++++++-- runtime/parachains/src/shared.rs | 12 ++++ runtime/parachains/src/shared/tests.rs | 28 ++++++++ runtime/parachains/src/ump.rs | 1 + 9 files changed, 195 insertions(+), 15 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index ab9d678f77b0..eb803808c7c9 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -707,6 +707,7 @@ mod tests { max_code_size: 1_000_000, ump_remaining: 10, ump_remaining_bytes: 1_000, + max_ump_num_per_candidate: 10, dmp_remaining_messages: 10, hrmp_inbound: InboundHrmpLimitations { valid_watermarks }, hrmp_channels_out: HashMap::new(), diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 60eecb9b5180..c875d5b65870 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -151,6 +151,8 @@ pub struct Constraints { pub ump_remaining: usize, /// The amount of UMP bytes remaining. pub ump_remaining_bytes: usize, + /// The maximum number of UMP messages allowed per candidate. + pub max_ump_num_per_candidate: usize, /// The amount of remaining DMP messages. pub dmp_remaining_messages: usize, /// The limitations of all registered inbound HRMP channels. @@ -178,6 +180,7 @@ impl From for Constraints { max_code_size: c.max_code_size as _, ump_remaining: c.ump_remaining as _, ump_remaining_bytes: c.ump_remaining_bytes as _, + max_ump_num_per_candidate: c.max_ump_num_per_candidate as _, dmp_remaining_messages: c.dmp_remaining_messages as _, hrmp_inbound: InboundHrmpLimitations { valid_watermarks: c.hrmp_inbound.valid_watermarks, @@ -561,6 +564,13 @@ pub enum FragmentValidityError { /// /// Min allowed, current. RelayParentTooOld(BlockNumber, BlockNumber), + /// Too many messages upward messages submitted. + UmpMessagesPerCandidateOverflow { + /// The amount of messages a single candidate can submit. + messages_allowed: usize, + /// The amount of messages sent to all HRMP channels. + messages_submitted: usize, + }, /// Too many messages submitted to all HRMP channels. HrmpMessagesPerCandidateOverflow { /// The amount of messages a single candidate can submit. @@ -757,6 +767,13 @@ fn validate_against_constraints( }) } + if candidate.commitments.upward_messages.len() > constraints.max_ump_num_per_candidate { + return Err(FragmentValidityError::UmpMessagesPerCandidateOverflow { + messages_allowed: constraints.max_ump_num_per_candidate, + messages_submitted: candidate.commitments.upward_messages.len(), + }) + } + constraints .check_modifications(&modifications) .map_err(FragmentValidityError::OutputsInvalid) @@ -881,6 +898,7 @@ mod tests { max_code_size: 1000, ump_remaining: 10, ump_remaining_bytes: 1024, + max_ump_num_per_candidate: 5, dmp_remaining_messages: 5, hrmp_inbound: InboundHrmpLimitations { valid_watermarks: vec![6, 8] }, hrmp_channels_out: { @@ -1256,6 +1274,33 @@ mod tests { ); } + #[test] + fn fragment_ump_messages_overflow() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + let max_ump = constraints.max_ump_num_per_candidate; + + candidate + .commitments + .upward_messages + .extend((0..max_ump + 1).map(|i| vec![i as u8])); + + assert_eq!( + Fragment::new(relay_parent, constraints, candidate), + Err(FragmentValidityError::UmpMessagesPerCandidateOverflow { + messages_allowed: max_ump, + messages_submitted: max_ump + 1, + }), + ); + } + #[test] fn fragment_code_upgrade_restricted() { let relay_parent = RelayChainBlockInfo { diff --git a/primitives/src/vstaging/mod.rs b/primitives/src/vstaging/mod.rs index 87cf8c8ba85c..16bfc085570a 100644 --- a/primitives/src/vstaging/mod.rs +++ b/primitives/src/vstaging/mod.rs @@ -33,9 +33,12 @@ pub type ParaId = Id; /// Constraints on inbound HRMP channels. #[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] #[cfg_attr(feature = "std", derive(MallocSizeOf))] -pub struct InboundHrmpLimitations { - /// An exhaustive set of all valid watermarks, sorted ascending - pub valid_watermarks: Vec, +pub struct InboundHrmpLimitations { + /// An exhaustive set of all valid watermarks, sorted ascending. + /// + /// It's only expected to contain block numbers at which messages were + /// previously sent to a para, excluding most recent head. + pub valid_watermarks: Vec, } /// Constraints on outbound HRMP channels. @@ -53,9 +56,9 @@ pub struct OutboundHrmpChannelLimitations { /// parachain, which should be apparent from usage. #[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] #[cfg_attr(feature = "std", derive(MallocSizeOf))] -pub struct Constraints { +pub struct Constraints { /// The minimum relay-parent number accepted under these constraints. - pub min_relay_parent_number: BlockNumber, + pub min_relay_parent_number: N, /// The maximum Proof-of-Validity size allowed, in bytes. pub max_pov_size: u32, /// The maximum new validation code size allowed, in bytes. @@ -64,10 +67,12 @@ pub struct Constraints { pub ump_remaining: u32, /// The amount of UMP bytes remaining. pub ump_remaining_bytes: u32, + /// The maximum number of UMP messages allowed per candidate. + pub max_ump_num_per_candidate: u32, /// The amount of remaining DMP messages. pub dmp_remaining_messages: u32, /// The limitations of all registered inbound HRMP channels. - pub hrmp_inbound: InboundHrmpLimitations, + pub hrmp_inbound: InboundHrmpLimitations, /// The limitations of all registered outbound HRMP channels. pub hrmp_channels_out: Vec<(ParaId, OutboundHrmpChannelLimitations)>, /// The maximum number of HRMP messages allowed per candidate. @@ -80,5 +85,5 @@ pub struct Constraints { pub upgrade_restriction: Option, /// The future validation code hash, if any, and at what relay-parent /// number the upgrade would be minimally applied. - pub future_validation_code: Option<(BlockNumber, ValidationCodeHash)>, + pub future_validation_code: Option<(N, ValidationCodeHash)>, } diff --git a/runtime/parachains/src/hrmp.rs b/runtime/parachains/src/hrmp.rs index 53ad6781048f..f61ffaf0912c 100644 --- a/runtime/parachains/src/hrmp.rs +++ b/runtime/parachains/src/hrmp.rs @@ -921,6 +921,14 @@ impl Pallet { } } + /// Returns HRMP watermarks of previously sent messages to a given para. + pub(crate) fn valid_watermarks(recipient: ParaId) -> Vec { + ::HrmpChannelDigests::get(&recipient) + .into_iter() + .map(|(block_no, _)| block_no) + .collect() + } + pub(crate) fn check_outbound_hrmp( config: &HostConfiguration, sender: ParaId, @@ -985,6 +993,28 @@ impl Pallet { Ok(()) } + /// Returns remaining outbound channels capacity in messages and in bytes per recipient para. + pub(crate) fn outbound_remaining_capacity(sender: ParaId) -> Vec<(ParaId, (u32, u32))> { + let recipients = ::HrmpEgressChannelsIndex::get(&sender); + let mut remaining = Vec::with_capacity(recipients.len()); + + for recipient in recipients { + let Some(channel) = + ::HrmpChannels::get(&HrmpChannelId { sender, recipient }) else { + continue + }; + remaining.push(( + recipient, + ( + channel.max_capacity - channel.msg_count, + channel.max_total_size - channel.total_size, + ), + )); + } + + remaining + } + pub(crate) fn prune_hrmp(recipient: ParaId, new_hrmp_watermark: T::BlockNumber) -> Weight { let mut weight = Weight::zero(); @@ -1086,12 +1116,12 @@ impl Pallet { ::HrmpChannels::insert(&channel_id, channel); ::HrmpChannelContents::append(&channel_id, inbound); - // The digests are sorted in ascending by block number order. Assuming absence of - // contextual execution, there are only two possible scenarios here: + // The digests are sorted in ascending by block number order. There are only two possible + // scenarios here ("the current" is the block of candidate's inclusion): // // (a) It's the first time anybody sends a message to this recipient within this block. // In this case, the digest vector would be empty or the block number of the latest - // entry is smaller than the current. + // entry is smaller than the current. // // (b) Somebody has already sent a message within the current block. That means that // the block number of the latest entry is equal to the current. diff --git a/runtime/parachains/src/paras/mod.rs b/runtime/parachains/src/paras/mod.rs index d85b9f2f3a61..e4c9ed58660d 100644 --- a/runtime/parachains/src/paras/mod.rs +++ b/runtime/parachains/src/paras/mod.rs @@ -697,6 +697,7 @@ pub mod pallet { /// /// Corresponding code can be retrieved with [`CodeByHash`]. #[pallet::storage] + #[pallet::getter(fn future_code_hash)] pub(super) type FutureCodeHash = StorageMap<_, Twox64Concat, ParaId, ValidationCodeHash>; @@ -723,6 +724,7 @@ pub mod pallet { /// NOTE that this field is used by parachains via merkle storage proofs, therefore changing /// the format will require migration of parachains. #[pallet::storage] + #[pallet::getter(fn upgrade_restriction_signal)] pub(super) type UpgradeRestrictionSignal = StorageMap<_, Twox64Concat, ParaId, UpgradeRestriction>; diff --git a/runtime/parachains/src/runtime_api_impl/vstaging.rs b/runtime/parachains/src/runtime_api_impl/vstaging.rs index 23ce8931b2ad..3fe6e2a0b979 100644 --- a/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -16,8 +16,11 @@ //! Put implementations of functions from staging APIs here. -use crate::disputes; -use primitives::v2::{CandidateHash, DisputeState, SessionIndex}; +use crate::{configuration, disputes, dmp, hrmp, initializer, paras, shared, ump}; +use primitives::{ + v2::{CandidateHash, DisputeState, Id as ParaId, SessionIndex}, + vstaging::{Constraints, InboundHrmpLimitations, OutboundHrmpChannelLimitations}, +}; use sp_std::prelude::*; /// Implementation for `get_session_disputes` function from the runtime API @@ -26,6 +29,59 @@ pub fn get_session_disputes( >::disputes() } -// TODO [now]: implicit `validity_constraints`. Ensure that `min_relay_parent` -// never goes lower than the point at which asynchronous backing was enabled. -// Also, never cross session boundaries. +/// Implementation for `StagingValidityConstraints` function from the runtime API +pub fn validity_constraints( + para_id: ParaId, +) -> Option> { + // Async backing is only expected to be enabled with a tracker capacity of 1. + // Subsequent configuration update gets applied on new session, which always + // clears the buffer. + // + // Thus, minimum relay parent is ensured to have asynchronous backing enabled. + let now = >::block_number(); + let min_relay_parent_number = >::allowed_relay_parents() + .hypothetical_earliest_block_number(now, shared::ALLOWED_RELAY_PARENT_LOOKBACK); + + let required_parent = >::para_head(para_id)?; + let validation_code_hash = >::current_code_hash(para_id)?; + + let upgrade_restriction = >::upgrade_restriction_signal(para_id); + let future_validation_code = + >::future_code_upgrade_at(para_id).and_then(|block_num| { + // Only read the storage if there's a pending upgrade. + Some(block_num).zip(>::future_code_hash(para_id)) + }); + + let config = >::config(); + let (ump_msg_count, ump_total_bytes) = >::relay_dispatch_queue_size(para_id); + let ump_remaining = config.max_upward_queue_count - ump_msg_count; + let ump_remaining_bytes = config.max_upward_queue_size - ump_total_bytes; + + let dmp_remaining_messages = >::dmq_length(para_id); + + let valid_watermarks = >::valid_watermarks(para_id); + let hrmp_inbound = InboundHrmpLimitations { valid_watermarks }; + let hrmp_channels_out = >::outbound_remaining_capacity(para_id) + .into_iter() + .map(|(para, (messages_remaining, bytes_remaining))| { + (para, OutboundHrmpChannelLimitations { messages_remaining, bytes_remaining }) + }) + .collect(); + + Some(Constraints { + min_relay_parent_number, + max_pov_size: config.max_pov_size, + max_code_size: config.max_code_size, + ump_remaining, + ump_remaining_bytes, + max_ump_num_per_candidate: config.max_upward_message_num_per_candidate, + dmp_remaining_messages, + hrmp_inbound, + hrmp_channels_out, + max_hrmp_num_per_candidate: config.hrmp_max_message_num_per_candidate, + required_parent, + validation_code_hash, + upgrade_restriction, + future_validation_code, + }) +} diff --git a/runtime/parachains/src/shared.rs b/runtime/parachains/src/shared.rs index 5eeceeb4797b..cb125d8ef884 100644 --- a/runtime/parachains/src/shared.rs +++ b/runtime/parachains/src/shared.rs @@ -108,6 +108,18 @@ impl Some((self.buffer[pos].1, number)) } + + /// Returns block number of the earliest block the buffer would contain if + /// `now` is pushed into it. + pub(crate) fn hypothetical_earliest_block_number( + &self, + now: BlockNumber, + max_len: usize, + ) -> BlockNumber { + let allowed_ancestry_len = max_len.saturating_sub(1).min(self.buffer.len()); + + now - BlockNumber::from(allowed_ancestry_len as u32) + } } #[frame_support::pallet] diff --git a/runtime/parachains/src/shared/tests.rs b/runtime/parachains/src/shared/tests.rs index 0113c3539d9a..5594c9420d03 100644 --- a/runtime/parachains/src/shared/tests.rs +++ b/runtime/parachains/src/shared/tests.rs @@ -20,11 +20,39 @@ use crate::{ mock::{new_test_ext, MockGenesisConfig, ParasShared}, }; use keyring::Sr25519Keyring; +use primitives::v2::Hash; fn validator_pubkeys(val_ids: &[Sr25519Keyring]) -> Vec { val_ids.iter().map(|v| v.public().into()).collect() } +#[test] +fn tracker_earliest_block_number() { + let mut tracker = AllowedRelayParentsTracker::default(); + + // Test it on an empty tracker. + let now: u32 = 1; + let max_len = 5; + assert_eq!(tracker.hypothetical_earliest_block_number(now, max_len), now); + + // Push a single block into the tracker, suppose max capacity is 1. + let max_len = 1; + tracker.update(Hash::zero(), Hash::zero(), 0, max_len); + assert_eq!(tracker.hypothetical_earliest_block_number(now, max_len), now); + + // Test a greater capacity. + let max_len = 5; + let now = 4; + for i in 1..now { + tracker.update(Hash::zero(), Hash::zero(), i, max_len); + assert_eq!(tracker.hypothetical_earliest_block_number(i + 1, max_len), 0); + } + + // Capacity exceeded. + tracker.update(Hash::zero(), Hash::zero(), now, max_len); + assert_eq!(tracker.hypothetical_earliest_block_number(now + 1, max_len), 1); +} + #[test] fn sets_and_shuffles_validators() { let validators = vec![ diff --git a/runtime/parachains/src/ump.rs b/runtime/parachains/src/ump.rs index 5aa7b17d923c..73dde607ff17 100644 --- a/runtime/parachains/src/ump.rs +++ b/runtime/parachains/src/ump.rs @@ -304,6 +304,7 @@ pub mod pallet { // NOTE that this field is used by parachains via merkle storage proofs, therefore changing // the format will require migration of parachains. #[pallet::storage] + #[pallet::getter(fn relay_dispatch_queue_size)] pub type RelayDispatchQueueSize = StorageMap<_, Twox64Concat, ParaId, (u32, u32), ValueQuery>; From c41cbbe4bc0e20116327e23a48f2c7f46e391687 Mon Sep 17 00:00:00 2001 From: Chris Sosnin <48099298+slumber@users.noreply.github.com> Date: Thu, 1 Dec 2022 00:49:00 +0400 Subject: [PATCH 22/76] Prepare the Runtime for asynchronous backing upgrade (#6287) * Introduce async backing params to runtime config * fix cumulus config * use config * finish runtimes * Introduce new staging API * Update collator protocol * Update provisioner * Update prospective parachains * Update backing * Move async backing params lower in the config * make naming consistent * misc --- node/core/backing/src/lib.rs | 26 ++- node/core/backing/src/tests/mod.rs | 10 +- .../src/tests/prospective_parachains.rs | 12 +- node/core/prospective-parachains/src/lib.rs | 27 +-- node/core/provisioner/src/lib.rs | 2 +- node/core/provisioner/src/tests.rs | 16 +- node/core/runtime-api/src/cache.rs | 69 ++++--- node/core/runtime-api/src/lib.rs | 37 +++- .../src/collator_side/mod.rs | 12 +- .../src/collator_side/tests/mod.rs | 12 +- .../tests/prospective_parachains.rs | 16 +- node/network/collator-protocol/src/lib.rs | 9 - .../src/validator_side/collation.rs | 10 +- .../src/validator_side/mod.rs | 16 +- .../src/validator_side/tests/mod.rs | 52 +++-- .../tests/prospective_parachains.rs | 25 ++- node/subsystem-types/src/messages.rs | 8 +- node/subsystem-types/src/runtime_client.rs | 14 ++ node/subsystem-util/src/lib.rs | 16 +- node/subsystem-util/src/runtime/mod.rs | 66 ++++--- primitives/src/runtime_api.rs | 13 +- primitives/src/vstaging/mod.rs | 17 ++ runtime/kusama/src/lib.rs | 6 +- runtime/parachains/src/configuration.rs | 28 ++- .../parachains/src/configuration/migration.rs | 180 +++++++++--------- runtime/parachains/src/configuration/tests.rs | 4 + runtime/parachains/src/inclusion/tests.rs | 26 ++- runtime/parachains/src/paras_inherent/mod.rs | 12 +- .../src/runtime_api_impl/vstaging.rs | 16 +- runtime/parachains/src/shared.rs | 21 +- runtime/parachains/src/shared/tests.rs | 20 +- runtime/polkadot/src/lib.rs | 6 +- runtime/rococo/src/lib.rs | 12 +- runtime/test-runtime/src/lib.rs | 4 - runtime/westend/src/lib.rs | 12 +- 35 files changed, 500 insertions(+), 332 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 4a70555d08a2..e9a1afd5f8b5 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -772,7 +772,7 @@ async fn handle_active_leaves_update( state: &mut State, ) -> Result<(), Error> { enum LeafHasProspectiveParachains { - Enabled(Result, ImplicitViewFetchError>), + Enabled(Result), Disabled, } @@ -788,8 +788,8 @@ async fn handle_active_leaves_update( leaf, match mode { ProspectiveParachainsMode::Disabled => LeafHasProspectiveParachains::Disabled, - ProspectiveParachainsMode::Enabled => LeafHasProspectiveParachains::Enabled( - state.implicit_view.activate_leaf(ctx.sender(), leaf_hash).await, + ProspectiveParachainsMode::Enabled { .. } => LeafHasProspectiveParachains::Enabled( + state.implicit_view.activate_leaf(ctx.sender(), leaf_hash).await.map(|_| mode), ), }, )) @@ -824,7 +824,7 @@ async fn handle_active_leaves_update( // Get relay parents which might be fresh but might be known already // that are explicit or implicit from the new active leaf. - let fresh_relay_parents = match res { + let (fresh_relay_parents, leaf_mode) = match res { None => return Ok(()), Some((leaf, LeafHasProspectiveParachains::Disabled)) => { // defensive in this case - for enabled, this manifests as an error. @@ -844,9 +844,9 @@ async fn handle_active_leaves_update( }, ); - vec![leaf.hash] + (vec![leaf.hash], ProspectiveParachainsMode::Disabled) }, - Some((leaf, LeafHasProspectiveParachains::Enabled(Ok(_)))) => { + Some((leaf, LeafHasProspectiveParachains::Enabled(Ok(prospective_parachains_mode)))) => { let fresh_relay_parents = state.implicit_view.known_allowed_relay_parents_under(&leaf.hash, None); @@ -907,13 +907,10 @@ async fn handle_active_leaves_update( state.per_leaf.insert( leaf.hash, - ActiveLeafState { - prospective_parachains_mode: ProspectiveParachainsMode::Enabled, - seconded_at_depth, - }, + ActiveLeafState { prospective_parachains_mode, seconded_at_depth }, ); - match fresh_relay_parents { + let fresh_relay_parent = match fresh_relay_parents { Some(f) => f.to_vec(), None => { gum::warn!( @@ -924,7 +921,8 @@ async fn handle_active_leaves_update( vec![leaf.hash] }, - } + }; + (fresh_relay_parent, prospective_parachains_mode) }, Some((leaf, LeafHasProspectiveParachains::Enabled(Err(e)))) => { gum::debug!( @@ -951,7 +949,7 @@ async fn handle_active_leaves_update( // subsystem that it is an ancestor of a leaf which // has prospective parachains enabled and that the // block itself did. - ProspectiveParachainsMode::Enabled + leaf_mode }, Some(l) => l.prospective_parachains_mode, }; @@ -1061,7 +1059,7 @@ async fn construct_per_relay_parent_state( let table_context = TableContext { groups, validators, validator }; let table_config = TableConfig { allow_multiple_seconded: match mode { - ProspectiveParachainsMode::Enabled => true, + ProspectiveParachainsMode::Enabled { .. } => true, ProspectiveParachainsMode::Disabled => false, }, }; diff --git a/node/core/backing/src/tests/mod.rs b/node/core/backing/src/tests/mod.rs index dff05c7b76f0..4e6577fa0d7a 100644 --- a/node/core/backing/src/tests/mod.rs +++ b/node/core/backing/src/tests/mod.rs @@ -23,6 +23,7 @@ use assert_matches::assert_matches; use futures::{future, Future}; use polkadot_node_primitives::{BlockData, InvalidCandidate, SignedFullStatement, Statement}; use polkadot_node_subsystem::{ + errors::RuntimeApiError, jaeger, messages::{ AllMessages, CollatorProtocolMessage, RuntimeApiMessage, RuntimeApiRequest, @@ -44,7 +45,8 @@ use std::collections::HashMap; mod prospective_parachains; -const API_VERSION_PROSPECTIVE_DISABLED: u32 = 2; +const ASYNC_BACKING_DISABLED_ERROR: RuntimeApiError = + RuntimeApiError::NotSupported { runtime_api_name: "test-runtime" }; fn validator_pubkeys(val_ids: &[Sr25519Keyring]) -> Vec { val_ids.iter().map(|v| v.public().into()).collect() @@ -242,14 +244,12 @@ async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestS )))) .await; - // Prospective parachains mode is temporarily defined by the Runtime API version. - // Disable it for the test leaf. assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) ) if parent == test_state.relay_parent => { - tx.send(Ok(API_VERSION_PROSPECTIVE_DISABLED)).unwrap(); + tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); } ); diff --git a/node/core/backing/src/tests/prospective_parachains.rs b/node/core/backing/src/tests/prospective_parachains.rs index 86ef1e069977..4d68fd99f798 100644 --- a/node/core/backing/src/tests/prospective_parachains.rs +++ b/node/core/backing/src/tests/prospective_parachains.rs @@ -17,11 +17,15 @@ //! Tests for the backing subsystem with enabled prospective parachains. use polkadot_node_subsystem::{messages::ChainApiMessage, TimeoutExt}; -use polkadot_primitives::v2::{BlockNumber, Header, OccupiedCore}; +use polkadot_primitives::{ + v2::{BlockNumber, Header, OccupiedCore}, + vstaging as vstaging_primitives, +}; use super::*; -const API_VERSION_PROSPECTIVE_ENABLED: u32 = RuntimeApiRequest::VALIDITY_CONSTRAINTS; +const ASYNC_BACKING_PARAMETERS: vstaging_primitives::AsyncBackingParameters = + vstaging_primitives::AsyncBackingParameters { max_candidate_depth: 4, allowed_ancestry_len: 3 }; struct TestLeaf { activated: ActivatedLeaf, @@ -52,9 +56,9 @@ async fn activate_leaf( assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::Version(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) ) if parent == leaf_hash => { - tx.send(Ok(API_VERSION_PROSPECTIVE_ENABLED)).unwrap(); + tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); } ); diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index effa87c3e032..35a9422efc84 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -41,7 +41,7 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_util::{ inclusion_emulator::staging::{Constraints, RelayChainBlockInfo}, - runtime::prospective_parachains_mode, + runtime::{prospective_parachains_mode, ProspectiveParachainsMode}, }; use polkadot_primitives::vstaging::{ BlockNumber, CandidateHash, CommittedCandidateReceipt, CoreState, Hash, Id as ParaId, @@ -58,18 +58,6 @@ mod fragment_tree; const LOG_TARGET: &str = "parachain::prospective-parachains"; -// The maximum depth the subsystem will allow. 'depth' is defined as the -// amount of blocks between the para head in a relay-chain block's state -// and a candidate with a particular relay-parent. -// -// This value is chosen mostly for reasons of resource-limitation. -// Without it, a malicious validator group could create arbitrarily long, -// useless prospective parachains and DoS honest nodes. -const MAX_DEPTH: usize = 4; - -// The maximum ancestry we support. -const MAX_ANCESTRY: usize = 5; - struct RelayBlockViewData { // Scheduling info for paras and upcoming paras. fragment_trees: HashMap, @@ -171,7 +159,8 @@ async fn handle_active_leaves_update( let mode = prospective_parachains_mode(ctx.sender(), hash) .await .map_err(JfyiError::Runtime)?; - if !mode.is_enabled() { + + let ProspectiveParachainsMode::Enabled { max_candidate_depth, allowed_ancestry_len } = mode else { gum::trace!( target: LOG_TARGET, block_hash = ?hash, @@ -180,7 +169,7 @@ async fn handle_active_leaves_update( // Not a part of any allowed ancestry. return Ok(()) - } + }; let scheduled_paras = fetch_upcoming_paras(&mut *ctx, hash).await?; @@ -200,7 +189,7 @@ async fn handle_active_leaves_update( Some(info) => info, }; - let ancestry = fetch_ancestry(&mut *ctx, hash, MAX_ANCESTRY).await?; + let ancestry = fetch_ancestry(&mut *ctx, hash, allowed_ancestry_len).await?; // Find constraints. let mut fragment_trees = HashMap::new(); @@ -230,7 +219,7 @@ async fn handle_active_leaves_update( para, block_info.clone(), constraints, - MAX_DEPTH, + max_candidate_depth, ancestry.iter().cloned(), ) .expect("ancestors are provided in reverse order and correctly; qed"); @@ -627,6 +616,10 @@ async fn fetch_ancestry( relay_hash: Hash, ancestors: usize, ) -> JfyiErrorResult> { + if ancestors == 0 { + return Ok(Vec::new()) + } + let (tx, rx) = oneshot::channel(); ctx.send_message(ChainApiMessage::Ancestors { hash: relay_hash, diff --git a/node/core/provisioner/src/lib.rs b/node/core/provisioner/src/lib.rs index ce32f9a943f4..fbf78416cb70 100644 --- a/node/core/provisioner/src/lib.rs +++ b/node/core/provisioner/src/lib.rs @@ -692,7 +692,7 @@ async fn select_candidates( sender: &mut impl overseer::ProvisionerSenderTrait, ) -> Result, Error> { let selected_candidates = match prospective_parachains_mode { - ProspectiveParachainsMode::Enabled => + ProspectiveParachainsMode::Enabled { .. } => request_backable_candidates(availability_cores, bitfields, relay_parent, sender).await?, ProspectiveParachainsMode::Disabled => select_candidate_hashes_from_tracked( diff --git a/node/core/provisioner/src/tests.rs b/node/core/provisioner/src/tests.rs index 4b34905b77ca..b0c69b54f007 100644 --- a/node/core/provisioner/src/tests.rs +++ b/node/core/provisioner/src/tests.rs @@ -350,7 +350,7 @@ mod select_candidates { AllMessages::ProspectiveParachains( ProspectiveParachainsMessage::GetBackableCandidate(.., tx), ) => match prospective_parachains_mode { - ProspectiveParachainsMode::Enabled => { + ProspectiveParachainsMode::Enabled { .. } => { let _ = tx.send(candidates.next()); }, ProspectiveParachainsMode::Disabled => @@ -572,7 +572,8 @@ mod select_candidates { let expected_candidates: Vec<_> = [1, 4, 7, 8, 10].iter().map(|&idx| candidates[idx].clone()).collect(); // Expect prospective parachains subsystem requests. - let prospective_parachains_mode = ProspectiveParachainsMode::Enabled; + let prospective_parachains_mode = + ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 }; let expected_backed = expected_candidates .iter() @@ -587,7 +588,16 @@ mod select_candidates { .collect(); test_harness( - |r| mock_overseer(r, expected_backed, ProspectiveParachainsMode::Enabled), + |r| { + mock_overseer( + r, + expected_backed, + ProspectiveParachainsMode::Enabled { + max_candidate_depth: 0, + allowed_ancestry_len: 0, + }, + ) + }, |mut tx: TestSubsystemSender| async move { let result = select_candidates( &mock_cores, diff --git a/node/core/runtime-api/src/cache.rs b/node/core/runtime-api/src/cache.rs index 651a921e6a5b..c7ac97e9449b 100644 --- a/node/core/runtime-api/src/cache.rs +++ b/node/core/runtime-api/src/cache.rs @@ -53,6 +53,7 @@ const VERSION_CACHE_SIZE: usize = 4 * 1024; const DISPUTES_CACHE_SIZE: usize = 64 * 1024; const STAGING_VALIDITY_CONSTRAINTS_CACHE_SIZE: usize = 10 * 1024; +const STAGING_ASYNC_BACKING_PARAMETERS_CACHE_SIZE: usize = 10 * 1024; struct ResidentSizeOf(T); @@ -120,15 +121,16 @@ pub(crate) struct RequestResultCache { (Hash, ParaId, OccupiedCoreAssumption), ResidentSizeOf>, >, - - staging_validity_constraints: - MemoryLruCache<(Hash, ParaId), ResidentSizeOf>>, - version: MemoryLruCache>, disputes: MemoryLruCache< Hash, ResidentSizeOf)>>, >, + + staging_validity_constraints: + MemoryLruCache<(Hash, ParaId), ResidentSizeOf>>, + staging_async_backing_parameters: + MemoryLruCache>, } impl Default for RequestResultCache { @@ -155,13 +157,15 @@ impl Default for RequestResultCache { on_chain_votes: MemoryLruCache::new(ON_CHAIN_VOTES_CACHE_SIZE), pvfs_require_precheck: MemoryLruCache::new(PVFS_REQUIRE_PRECHECK_SIZE), validation_code_hash: MemoryLruCache::new(VALIDATION_CODE_HASH_CACHE_SIZE), + version: MemoryLruCache::new(VERSION_CACHE_SIZE), + disputes: MemoryLruCache::new(DISPUTES_CACHE_SIZE), staging_validity_constraints: MemoryLruCache::new( STAGING_VALIDITY_CONSTRAINTS_CACHE_SIZE, ), - - version: MemoryLruCache::new(VERSION_CACHE_SIZE), - disputes: MemoryLruCache::new(DISPUTES_CACHE_SIZE), + staging_async_backing_parameters: MemoryLruCache::new( + STAGING_ASYNC_BACKING_PARAMETERS_CACHE_SIZE, + ), } } } @@ -420,21 +424,6 @@ impl RequestResultCache { self.validation_code_hash.insert(key, ResidentSizeOf(value)); } - pub(crate) fn staging_validity_constraints( - &mut self, - key: (Hash, ParaId), - ) -> Option<&Option> { - self.staging_validity_constraints.get(&key).map(|v| &v.0) - } - - pub(crate) fn cache_staging_validity_constraints( - &mut self, - key: (Hash, ParaId), - value: Option, - ) { - self.staging_validity_constraints.insert(key, ResidentSizeOf(value)); - } - pub(crate) fn version(&mut self, relay_parent: &Hash) -> Option<&u32> { self.version.get(&relay_parent).map(|v| &v.0) } @@ -457,6 +446,36 @@ impl RequestResultCache { ) { self.disputes.insert(relay_parent, ResidentSizeOf(value)); } + + pub(crate) fn staging_validity_constraints( + &mut self, + key: (Hash, ParaId), + ) -> Option<&Option> { + self.staging_validity_constraints.get(&key).map(|v| &v.0) + } + + pub(crate) fn cache_staging_validity_constraints( + &mut self, + key: (Hash, ParaId), + value: Option, + ) { + self.staging_validity_constraints.insert(key, ResidentSizeOf(value)); + } + + pub(crate) fn staging_async_backing_parameters( + &mut self, + key: &Hash, + ) -> Option<&vstaging_primitives::AsyncBackingParameters> { + self.staging_async_backing_parameters.get(&key).map(|v| &v.0) + } + + pub(crate) fn cache_staging_async_backing_parameters( + &mut self, + key: Hash, + value: vstaging_primitives::AsyncBackingParameters, + ) { + self.staging_async_backing_parameters.insert(key, ResidentSizeOf(value)); + } } pub(crate) enum RequestResult { @@ -491,9 +510,9 @@ pub(crate) enum RequestResult { // This is a request with side-effects and no result, hence (). SubmitPvfCheckStatement(Hash, PvfCheckStatement, ValidatorSignature, ()), ValidationCodeHash(Hash, ParaId, OccupiedCoreAssumption, Option), - - StagingValidityConstraints(Hash, ParaId, Option), - Version(Hash, u32), Disputes(Hash, Vec<(SessionIndex, CandidateHash, DisputeState)>), + + StagingValidityConstraints(Hash, ParaId, Option), + StagingAsyncBackingParameters(Hash, vstaging_primitives::AsyncBackingParameters), } diff --git a/node/core/runtime-api/src/lib.rs b/node/core/runtime-api/src/lib.rs index a6d71f351a6a..c354db35fd26 100644 --- a/node/core/runtime-api/src/lib.rs +++ b/node/core/runtime-api/src/lib.rs @@ -151,15 +151,16 @@ where ValidationCodeHash(relay_parent, para_id, assumption, hash) => self .requests_cache .cache_validation_code_hash((relay_parent, para_id, assumption), hash), - - StagingValidityConstraints(relay_parent, para_id, constraints) => self - .requests_cache - .cache_staging_validity_constraints((relay_parent, para_id), constraints), - Version(relay_parent, version) => self.requests_cache.cache_version(relay_parent, version), Disputes(relay_parent, disputes) => self.requests_cache.cache_disputes(relay_parent, disputes), + + StagingValidityConstraints(relay_parent, para_id, constraints) => self + .requests_cache + .cache_staging_validity_constraints((relay_parent, para_id), constraints), + StagingAsyncBackingParameters(relay_parent, params) => + self.requests_cache.cache_staging_async_backing_parameters(relay_parent, params), } } @@ -261,11 +262,14 @@ where Request::ValidationCodeHash(para, assumption, sender) => query!(validation_code_hash(para, assumption), sender) .map(|sender| Request::ValidationCodeHash(para, assumption, sender)), + Request::Disputes(sender) => + query!(disputes(), sender).map(|sender| Request::Disputes(sender)), Request::StagingValidityConstraints(para, sender) => query!(staging_validity_constraints(para), sender) .map(|sender| Request::StagingValidityConstraints(para, sender)), - Request::Disputes(sender) => - query!(disputes(), sender).map(|sender| Request::Disputes(sender)), + Request::StagingAsyncBackingParameters(sender) => + query!(staging_async_backing_parameters(), sender) + .map(|sender| Request::StagingAsyncBackingParameters(sender)), } } @@ -508,10 +512,23 @@ where }, Request::ValidationCodeHash(para, assumption, sender) => query!(ValidationCodeHash, validation_code_hash(para, assumption), ver = 2, sender), - Request::StagingValidityConstraints(para, sender) => { - query!(StagingValidityConstraints, staging_validity_constraints(para), ver = 2, sender) - }, Request::Disputes(sender) => query!(Disputes, disputes(), ver = Request::DISPUTES_RUNTIME_REQUIREMENT, sender), + Request::StagingValidityConstraints(para, sender) => { + query!( + StagingValidityConstraints, + staging_validity_constraints(para), + ver = Request::VALIDITY_CONSTRAINTS, + sender + ) + }, + Request::StagingAsyncBackingParameters(sender) => { + query!( + StagingAsyncBackingParameters, + staging_async_backing_parameters(), + ver = Request::VALIDITY_CONSTRAINTS, + sender + ) + }, } } diff --git a/node/network/collator-protocol/src/collator_side/mod.rs b/node/network/collator-protocol/src/collator_side/mod.rs index 3a9a32587bd1..55537628fb8a 100644 --- a/node/network/collator-protocol/src/collator_side/mod.rs +++ b/node/network/collator-protocol/src/collator_side/mod.rs @@ -57,7 +57,7 @@ use polkadot_primitives::v2::{ GroupIndex, Hash, Id as ParaId, SessionIndex, }; -use super::{LOG_TARGET, MAX_CANDIDATE_DEPTH}; +use super::LOG_TARGET; use crate::{ error::{log_error, Error, FatalError, Result}, modify_reputation, @@ -345,7 +345,7 @@ async fn distribute_collation( let collations_limit = match relay_parent_mode { ProspectiveParachainsMode::Disabled => 1, - ProspectiveParachainsMode::Enabled => MAX_CANDIDATE_DEPTH + 1, + ProspectiveParachainsMode::Enabled { max_candidate_depth, .. } => max_candidate_depth + 1, }; if per_relay_parent.collations.len() >= collations_limit { @@ -455,7 +455,7 @@ async fn distribute_collation( .iter() .filter(|(_, PeerData { view: v, .. })| match relay_parent_mode { ProspectiveParachainsMode::Disabled => v.contains(&candidate_relay_parent), - ProspectiveParachainsMode::Enabled => v.iter().any(|block_hash| { + ProspectiveParachainsMode::Enabled { .. } => v.iter().any(|block_hash| { state .implicit_view .known_allowed_relay_parents_under(block_hash, Some(id)) @@ -1063,7 +1063,7 @@ async fn handle_peer_view_change( .map(|per_relay_parent| per_relay_parent.prospective_parachains_mode) { Some(ProspectiveParachainsMode::Disabled) => std::slice::from_ref(&added), - Some(ProspectiveParachainsMode::Enabled) => state + Some(ProspectiveParachainsMode::Enabled { .. }) => state .implicit_view .known_allowed_relay_parents_under(&added, state.collating_on) .unwrap_or_default(), @@ -1209,7 +1209,7 @@ where state .per_relay_parent .entry(*block_hash) - .or_insert_with(|| PerRelayParent::new(ProspectiveParachainsMode::Enabled)); + .or_insert_with(|| PerRelayParent::new(mode)); } } } @@ -1345,7 +1345,7 @@ pub(crate) async fn run( (ProspectiveParachainsMode::Disabled, VersionedCollationRequest::V1(_)) => { per_relay_parent.collations.values().next() }, - (ProspectiveParachainsMode::Enabled, VersionedCollationRequest::VStaging(req)) => { + (ProspectiveParachainsMode::Enabled { .. }, VersionedCollationRequest::VStaging(req)) => { per_relay_parent.collations.get(&req.payload.candidate_hash) }, _ => { diff --git a/node/network/collator-protocol/src/collator_side/tests/mod.rs b/node/network/collator-protocol/src/collator_side/tests/mod.rs index 99baea31c016..470b78cd3ab3 100644 --- a/node/network/collator-protocol/src/collator_side/tests/mod.rs +++ b/node/network/collator-protocol/src/collator_side/tests/mod.rs @@ -37,6 +37,7 @@ use polkadot_node_network_protocol::{ }; use polkadot_node_primitives::BlockData; use polkadot_node_subsystem::{ + errors::RuntimeApiError, jaeger, messages::{AllMessages, RuntimeApiMessage, RuntimeApiRequest}, ActivatedLeaf, ActiveLeavesUpdate, LeafStatus, @@ -51,7 +52,8 @@ use polkadot_primitives_test_helpers::TestCandidateBuilder; mod prospective_parachains; -const API_VERSION_PROSPECTIVE_DISABLED: u32 = 2; +const ASYNC_BACKING_DISABLED_ERROR: RuntimeApiError = + RuntimeApiError::NotSupported { runtime_api_name: "test-runtime" }; #[derive(Clone)] struct TestState { @@ -193,10 +195,10 @@ impl TestState { overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( relay_parent, - RuntimeApiRequest::Version(tx) + RuntimeApiRequest::StagingAsyncBackingParameters(tx) )) => { assert_eq!(relay_parent, self.relay_parent); - tx.send(Ok(API_VERSION_PROSPECTIVE_DISABLED)).unwrap(); + tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); } ); } @@ -324,10 +326,10 @@ async fn setup_system(virtual_overseer: &mut VirtualOverseer, test_state: &TestS overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( relay_parent, - RuntimeApiRequest::Version(tx) + RuntimeApiRequest::StagingAsyncBackingParameters(tx) )) => { assert_eq!(relay_parent, test_state.relay_parent); - tx.send(Ok(API_VERSION_PROSPECTIVE_DISABLED)).unwrap(); + tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); } ); } diff --git a/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs b/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs index d98db5b8eb82..8fe75e3c28ca 100644 --- a/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs +++ b/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs @@ -19,9 +19,13 @@ use super::*; use polkadot_node_subsystem::messages::{ChainApiMessage, ProspectiveParachainsMessage}; -use polkadot_primitives::v2::{Header, OccupiedCore}; +use polkadot_primitives::{ + v2::{Header, OccupiedCore}, + vstaging as vstaging_primitives, +}; -const ALLOWED_ANCESTRY: u32 = 3; +const ASYNC_BACKING_PARAMETERS: vstaging_primitives::AsyncBackingParameters = + vstaging_primitives::AsyncBackingParameters { max_candidate_depth: 4, allowed_ancestry_len: 3 }; fn get_parent_hash(hash: Hash) -> Hash { Hash::from_low_u64_be(hash.to_low_u64_be() + 1) @@ -51,14 +55,14 @@ async fn update_view( overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( parent, - RuntimeApiRequest::Version(tx), + RuntimeApiRequest::StagingAsyncBackingParameters(tx), )) => { - tx.send(Ok(RuntimeApiRequest::VALIDITY_CONSTRAINTS)).unwrap(); + tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); (parent, new_view.get(&parent).copied().expect("Unknown parent requested")) } ); - let min_number = leaf_number.saturating_sub(ALLOWED_ANCESTRY); + let min_number = leaf_number.saturating_sub(ASYNC_BACKING_PARAMETERS.allowed_ancestry_len); assert_matches!( overseer_recv(virtual_overseer).await, @@ -318,7 +322,7 @@ fn distribute_collation_up_to_limit() { // Activated leaf is `a`, but the collation will be based on `b`. update_view(virtual_overseer, &test_state, vec![(head_a, head_a_num)], 1).await; - for i in 0..(MAX_CANDIDATE_DEPTH + 1) { + for i in 0..(ASYNC_BACKING_PARAMETERS.max_candidate_depth + 1) { let pov = PoV { block_data: BlockData(vec![i as u8]) }; let parent_head_data_hash = Hash::repeat_byte(0xAA); let candidate = TestCandidateBuilder { diff --git a/node/network/collator-protocol/src/lib.rs b/node/network/collator-protocol/src/lib.rs index ca5bf5e297c6..b349a15db47a 100644 --- a/node/network/collator-protocol/src/lib.rs +++ b/node/network/collator-protocol/src/lib.rs @@ -47,15 +47,6 @@ mod validator_side; const LOG_TARGET: &'static str = "parachain::collator-protocol"; -/// The maximum depth a candidate can occupy for any relay parent. -/// 'depth' is defined as the amount of blocks between the para -/// head in a relay-chain block's state and a candidate with a -/// particular relay-parent. -/// -/// This value is only used for limiting the number of candidates -/// we accept and distribute per relay parent. -const MAX_CANDIDATE_DEPTH: usize = 4; - /// A collator eviction policy - how fast to evict collators which are inactive. #[derive(Debug, Clone, Copy)] pub struct CollatorEvictionPolicy { diff --git a/node/network/collator-protocol/src/validator_side/collation.rs b/node/network/collator-protocol/src/validator_side/collation.rs index a18397e09051..a60cd172cf44 100644 --- a/node/network/collator-protocol/src/validator_side/collation.rs +++ b/node/network/collator-protocol/src/validator_side/collation.rs @@ -37,7 +37,7 @@ use polkadot_primitives::v2::{ CandidateHash, CandidateReceipt, CollatorId, Hash, Id as ParaId, PersistedValidationData, }; -use crate::{error::SecondingError, LOG_TARGET, MAX_CANDIDATE_DEPTH}; +use crate::{error::SecondingError, LOG_TARGET}; /// Candidate supplied with a para head it's built on top of. #[derive(Debug, Copy, Clone, Hash, Eq, PartialEq)] @@ -258,7 +258,13 @@ impl Collations { relay_parent_mode: ProspectiveParachainsMode, ) -> bool { let seconded_limit = - if relay_parent_mode.is_enabled() { MAX_CANDIDATE_DEPTH + 1 } else { 1 }; + if let ProspectiveParachainsMode::Enabled { max_candidate_depth, .. } = + relay_parent_mode + { + max_candidate_depth + 1 + } else { + 1 + }; self.seconded_count < seconded_limit } } diff --git a/node/network/collator-protocol/src/validator_side/mod.rs b/node/network/collator-protocol/src/validator_side/mod.rs index a6ef8d45bffc..0f2bc2ef670c 100644 --- a/node/network/collator-protocol/src/validator_side/mod.rs +++ b/node/network/collator-protocol/src/validator_side/mod.rs @@ -66,7 +66,7 @@ use polkadot_primitives::v2::{ use crate::error::{Error, FetchError, Result, SecondingError}; -use super::{modify_reputation, tick_stream, LOG_TARGET, MAX_CANDIDATE_DEPTH}; +use super::{modify_reputation, tick_stream, LOG_TARGET}; mod collation; mod metrics; @@ -274,7 +274,10 @@ impl PeerData { .advertisements .insert(on_relay_parent, HashSet::from_iter(candidate_hash)); }, - (ProspectiveParachainsMode::Enabled, Some(candidate_hash)) => { + ( + ProspectiveParachainsMode::Enabled { max_candidate_depth, .. }, + Some(candidate_hash), + ) => { if state .advertisements .get(&on_relay_parent) @@ -284,7 +287,7 @@ impl PeerData { } let candidates = state.advertisements.entry(on_relay_parent).or_default(); - if candidates.len() >= MAX_CANDIDATE_DEPTH + 1 { + if candidates.len() >= max_candidate_depth + 1 { return Err(InsertAdvertisementError::PeerLimitReached) } candidates.insert(candidate_hash); @@ -461,7 +464,7 @@ fn is_relay_parent_in_implicit_view( ) -> bool { match relay_parent_mode { ProspectiveParachainsMode::Disabled => active_leaves.contains_key(relay_parent), - ProspectiveParachainsMode::Enabled => active_leaves.iter().any(|(hash, mode)| { + ProspectiveParachainsMode::Enabled { .. } => active_leaves.iter().any(|(hash, mode)| { mode.is_enabled() && implicit_view .known_allowed_relay_parents_under(hash, Some(para_id)) @@ -1277,8 +1280,7 @@ where .unwrap_or_default(); for block_hash in allowed_ancestry { if let Entry::Vacant(entry) = state.per_relay_parent.entry(*block_hash) { - let mut per_relay_parent = - PerRelayParent::new(ProspectiveParachainsMode::Enabled); + let mut per_relay_parent = PerRelayParent::new(mode); assign_incoming( sender, &mut per_relay_parent.assignment, @@ -1763,7 +1765,7 @@ async fn kick_off_seconding( let pvd = match (relay_parent_mode, collation_event.1.prospective_candidate) { ( - ProspectiveParachainsMode::Enabled, + ProspectiveParachainsMode::Enabled { .. }, Some(ProspectiveCandidate { parent_head_data_hash, .. }), ) => request_prospective_validation_data( diff --git a/node/network/collator-protocol/src/validator_side/tests/mod.rs b/node/network/collator-protocol/src/validator_side/tests/mod.rs index 3499c8639a6d..ae728e033fe1 100644 --- a/node/network/collator-protocol/src/validator_side/tests/mod.rs +++ b/node/network/collator-protocol/src/validator_side/tests/mod.rs @@ -29,7 +29,10 @@ use polkadot_node_network_protocol::{ ObservedRole, }; use polkadot_node_primitives::BlockData; -use polkadot_node_subsystem::messages::{AllMessages, RuntimeApiMessage, RuntimeApiRequest}; +use polkadot_node_subsystem::{ + errors::RuntimeApiError, + messages::{AllMessages, RuntimeApiMessage, RuntimeApiRequest}, +}; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::TimeoutExt; use polkadot_primitives::v2::{ @@ -45,7 +48,8 @@ mod prospective_parachains; const ACTIVITY_TIMEOUT: Duration = Duration::from_millis(500); const DECLARE_TIMEOUT: Duration = Duration::from_millis(25); -const API_VERSION_PROSPECTIVE_DISABLED: u32 = 2; +const ASYNC_BACKING_DISABLED_ERROR: RuntimeApiError = + RuntimeApiError::NotSupported { runtime_api_name: "test-runtime" }; fn dummy_pvd() -> PersistedValidationData { PersistedValidationData { @@ -280,7 +284,7 @@ async fn assert_candidate_backing_second( tx.send(Ok(Some(pvd.clone()))).unwrap(); } ), - ProspectiveParachainsMode::Enabled => assert_matches!( + ProspectiveParachainsMode::Enabled { .. } => assert_matches!( msg, AllMessages::ProspectiveParachains( ProspectiveParachainsMessage::GetProspectiveValidationData(request, tx), @@ -430,15 +434,18 @@ async fn advertise_collation( .await; } -async fn assert_runtime_version_request(virtual_overseer: &mut VirtualOverseer, hash: Hash) { +async fn assert_async_backing_parameters_request( + virtual_overseer: &mut VirtualOverseer, + hash: Hash, +) { assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( relay_parent, - RuntimeApiRequest::Version(tx) + RuntimeApiRequest::StagingAsyncBackingParameters(tx) )) => { assert_eq!(relay_parent, hash); - tx.send(Ok(API_VERSION_PROSPECTIVE_DISABLED)).unwrap(); + tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); } ); } @@ -462,7 +469,8 @@ fn act_on_advertisement() { ) .await; - assert_runtime_version_request(&mut virtual_overseer, test_state.relay_parent).await; + assert_async_backing_parameters_request(&mut virtual_overseer, test_state.relay_parent) + .await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; let peer_b = PeerId::random(); @@ -511,7 +519,8 @@ fn act_on_advertisement_vstaging() { ) .await; - assert_runtime_version_request(&mut virtual_overseer, test_state.relay_parent).await; + assert_async_backing_parameters_request(&mut virtual_overseer, test_state.relay_parent) + .await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; let peer_b = PeerId::random(); @@ -564,7 +573,8 @@ fn collator_reporting_works() { ) .await; - assert_runtime_version_request(&mut virtual_overseer, test_state.relay_parent).await; + assert_async_backing_parameters_request(&mut virtual_overseer, test_state.relay_parent) + .await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; @@ -682,7 +692,7 @@ fn fetch_one_collation_at_a_time() { // Iter over view since the order may change due to sorted invariant. for hash in our_view.iter() { - assert_runtime_version_request(&mut virtual_overseer, *hash).await; + assert_async_backing_parameters_request(&mut virtual_overseer, *hash).await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; } @@ -780,7 +790,7 @@ fn fetches_next_collation() { .await; for hash in our_view.iter() { - assert_runtime_version_request(&mut virtual_overseer, *hash).await; + assert_async_backing_parameters_request(&mut virtual_overseer, *hash).await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; } @@ -899,7 +909,8 @@ fn reject_connection_to_next_group() { ) .await; - assert_runtime_version_request(&mut virtual_overseer, test_state.relay_parent).await; + assert_async_backing_parameters_request(&mut virtual_overseer, test_state.relay_parent) + .await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; let peer_b = PeerId::random(); @@ -951,7 +962,7 @@ fn fetch_next_collation_on_invalid_collation() { .await; for hash in our_view.iter() { - assert_runtime_version_request(&mut virtual_overseer, *hash).await; + assert_async_backing_parameters_request(&mut virtual_overseer, *hash).await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; } @@ -1062,7 +1073,7 @@ fn inactive_disconnected() { ) .await; - assert_runtime_version_request(&mut virtual_overseer, hash_a).await; + assert_async_backing_parameters_request(&mut virtual_overseer, hash_a).await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; let peer_b = PeerId::random(); @@ -1117,7 +1128,7 @@ fn activity_extends_life() { .await; for hash in our_view.iter() { - assert_runtime_version_request(&mut virtual_overseer, *hash).await; + assert_async_backing_parameters_request(&mut virtual_overseer, *hash).await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; } @@ -1191,7 +1202,8 @@ fn disconnect_if_no_declare() { ) .await; - assert_runtime_version_request(&mut virtual_overseer, test_state.relay_parent).await; + assert_async_backing_parameters_request(&mut virtual_overseer, test_state.relay_parent) + .await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; let peer_b = PeerId::random(); @@ -1230,7 +1242,8 @@ fn disconnect_if_wrong_declare() { ) .await; - assert_runtime_version_request(&mut virtual_overseer, test_state.relay_parent).await; + assert_async_backing_parameters_request(&mut virtual_overseer, test_state.relay_parent) + .await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; let peer_b = PeerId::random(); @@ -1293,7 +1306,8 @@ fn view_change_clears_old_collators() { ) .await; - assert_runtime_version_request(&mut virtual_overseer, test_state.relay_parent).await; + assert_async_backing_parameters_request(&mut virtual_overseer, test_state.relay_parent) + .await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; let peer_b = PeerId::random(); @@ -1318,7 +1332,7 @@ fn view_change_clears_old_collators() { .await; test_state.group_rotation_info = test_state.group_rotation_info.bump_rotation(); - assert_runtime_version_request(&mut virtual_overseer, hash_b).await; + assert_async_backing_parameters_request(&mut virtual_overseer, hash_b).await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; assert_collator_disconnect(&mut virtual_overseer, peer_b.clone()).await; diff --git a/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs b/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs index 3b25f9203fc9..ebaf18f050e0 100644 --- a/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs +++ b/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs @@ -19,12 +19,16 @@ use super::*; use polkadot_node_subsystem::messages::ChainApiMessage; -use polkadot_primitives::v2::{ - BlockNumber, CandidateCommitments, CommittedCandidateReceipt, Header, SigningContext, - ValidatorId, +use polkadot_primitives::{ + v2::{ + BlockNumber, CandidateCommitments, CommittedCandidateReceipt, Header, SigningContext, + ValidatorId, + }, + vstaging as vstaging_primitives, }; -const ALLOWED_ANCESTRY: u32 = 3; +const ASYNC_BACKING_PARAMETERS: vstaging_primitives::AsyncBackingParameters = + vstaging_primitives::AsyncBackingParameters { max_candidate_depth: 4, allowed_ancestry_len: 3 }; fn get_parent_hash(hash: Hash) -> Hash { Hash::from_low_u64_be(hash.to_low_u64_be() + 1) @@ -96,9 +100,9 @@ async fn update_view( overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( parent, - RuntimeApiRequest::Version(tx), + RuntimeApiRequest::StagingAsyncBackingParameters(tx), )) => { - tx.send(Ok(RuntimeApiRequest::VALIDITY_CONSTRAINTS)).unwrap(); + tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); (parent, new_view.get(&parent).copied().expect("Unknown parent requested")) } ); @@ -112,7 +116,7 @@ async fn update_view( ) .await; - let min_number = leaf_number.saturating_sub(ALLOWED_ANCESTRY); + let min_number = leaf_number.saturating_sub(ASYNC_BACKING_PARAMETERS.allowed_ancestry_len); assert_matches!( overseer_recv(virtual_overseer).await, @@ -416,7 +420,7 @@ fn second_multiple_candidates_per_relay_parent() { ) .await; - for i in 0..(MAX_CANDIDATE_DEPTH + 1) { + for i in 0..(ASYNC_BACKING_PARAMETERS.max_candidate_depth + 1) { let mut candidate = dummy_candidate_receipt_bad_sig(head_c, Some(Default::default())); candidate.descriptor.para_id = test_state.chain_ids[0]; candidate.descriptor.relay_parent = head_c; @@ -476,7 +480,10 @@ fn second_multiple_candidates_per_relay_parent() { head_c, test_state.chain_ids[0], &pov, - ProspectiveParachainsMode::Enabled, + ProspectiveParachainsMode::Enabled { + max_candidate_depth: ASYNC_BACKING_PARAMETERS.max_candidate_depth as _, + allowed_ancestry_len: ASYNC_BACKING_PARAMETERS.allowed_ancestry_len as _, + }, ) .await; diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index e51cd9689c79..e71aff6478a8 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -707,11 +707,15 @@ pub enum RuntimeApiRequest { OccupiedCoreAssumption, RuntimeApiSender>, ), + /// Returns all on-chain disputes at given block number. Available in `v3`. + Disputes(RuntimeApiSender)>>), /// Get the validity constraints of the given para. /// This is a staging API that will not be available on production runtimes. StagingValidityConstraints(ParaId, RuntimeApiSender>), - /// Returns all on-chain disputes at given block number. Available in `v3`. - Disputes(RuntimeApiSender)>>), + /// Get candidate's acceptance limitations for asynchronous backing for a relay parent. + /// + /// If it's not supported by the Runtime, the async backing is said to be disabled. + StagingAsyncBackingParameters(RuntimeApiSender), } impl RuntimeApiRequest { diff --git a/node/subsystem-types/src/runtime_client.rs b/node/subsystem-types/src/runtime_client.rs index 65f0cff00262..31aacd3b00b8 100644 --- a/node/subsystem-types/src/runtime_client.rs +++ b/node/subsystem-types/src/runtime_client.rs @@ -199,6 +199,12 @@ pub trait RuntimeApiSubsystemClient { para_id: Id, ) -> Result, ApiError>; + /// Returns candidate's acceptance limitations for asynchronous backing for a relay parent. + async fn staging_async_backing_parameters( + &self, + at: Hash, + ) -> Result; + // === BABE API === /// Returns information regarding the current epoch. @@ -397,4 +403,12 @@ where ) -> Result, ApiError> { self.runtime_api().staging_validity_constraints(&BlockId::Hash(at), para_id) } + + /// Returns candidate's acceptance limitations for asynchronous backing for a relay parent. + async fn staging_async_backing_parameters( + &self, + at: Hash, + ) -> Result { + self.runtime_api().staging_async_backing_parameters(&BlockId::Hash(at)) + } } diff --git a/node/subsystem-util/src/lib.rs b/node/subsystem-util/src/lib.rs index 68fdad77895d..63353d7e6269 100644 --- a/node/subsystem-util/src/lib.rs +++ b/node/subsystem-util/src/lib.rs @@ -40,12 +40,15 @@ pub use polkadot_node_metrics::{metrics, Metronome}; use futures::channel::{mpsc, oneshot}; use parity_scale_codec::Encode; -use polkadot_primitives::v2::{ - AuthorityDiscoveryId, CandidateEvent, CommittedCandidateReceipt, CoreState, EncodeAs, - GroupIndex, GroupRotationInfo, Hash, Id as ParaId, OccupiedCoreAssumption, - PersistedValidationData, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed, - SigningContext, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, - ValidatorSignature, +use polkadot_primitives::{ + v2::{ + AuthorityDiscoveryId, CandidateEvent, CommittedCandidateReceipt, CoreState, EncodeAs, + GroupIndex, GroupRotationInfo, Hash, Id as ParaId, OccupiedCoreAssumption, + PersistedValidationData, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed, + SigningContext, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, + ValidatorSignature, + }, + vstaging as vstaging_primitives, }; pub use rand; use sp_application_crypto::AppKey; @@ -211,6 +214,7 @@ specialize_requests! { fn request_validation_code_hash(para_id: ParaId, assumption: OccupiedCoreAssumption) -> Option; ValidationCodeHash; fn request_on_chain_votes() -> Option; FetchOnChainVotes; + fn request_staging_async_backing_parameters() -> vstaging_primitives::AsyncBackingParameters; StagingAsyncBackingParameters; } /// From the given set of validators, find the first key we can sign with, if any. diff --git a/node/subsystem-util/src/runtime/mod.rs b/node/subsystem-util/src/runtime/mod.rs index 4a2e3c0f4224..699efabe2de1 100644 --- a/node/subsystem-util/src/runtime/mod.rs +++ b/node/subsystem-util/src/runtime/mod.rs @@ -26,19 +26,22 @@ use sp_core::crypto::ByteArray; use sp_keystore::{CryptoStore, SyncCryptoStorePtr}; use polkadot_node_subsystem::{ - messages::{RuntimeApiMessage, RuntimeApiRequest}, - overseer, SubsystemSender, + errors::RuntimeApiError, messages::RuntimeApiMessage, overseer, SubsystemSender, }; -use polkadot_primitives::v2::{ - CandidateEvent, CoreState, EncodeAs, GroupIndex, GroupRotationInfo, Hash, IndexedVec, - OccupiedCore, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed, SigningContext, - UncheckedSigned, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, +use polkadot_primitives::{ + v2::{ + CandidateEvent, CoreState, EncodeAs, GroupIndex, GroupRotationInfo, Hash, IndexedVec, + OccupiedCore, ScrapedOnChainVotes, SessionIndex, SessionInfo, Signed, SigningContext, + UncheckedSigned, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, + }, + vstaging as vstaging_primitives, }; use crate::{ request_availability_cores, request_candidate_events, request_on_chain_votes, - request_runtime_api_version, request_session_index_for_child, request_session_info, - request_validation_code_by_hash, request_validator_groups, + request_session_index_for_child, request_session_info, + request_staging_async_backing_parameters, request_validation_code_by_hash, + request_validator_groups, }; /// Errors that can happen on runtime fetches. @@ -358,13 +361,21 @@ pub enum ProspectiveParachainsMode { /// v2 runtime API: no prospective parachains. Disabled, /// vstaging runtime API: prospective parachains. - Enabled, + Enabled { + /// The maximum number of para blocks between the para head in a relay parent + /// and a new candidate. Restricts nodes from building arbitrary long chains + /// and spamming other validators. + max_candidate_depth: usize, + /// How many ancestors of a relay parent are allowed to build candidates on top + /// of. + allowed_ancestry_len: usize, + }, } impl ProspectiveParachainsMode { /// Returns `true` if mode is enabled, `false` otherwise. pub fn is_enabled(&self) -> bool { - matches!(self, ProspectiveParachainsMode::Enabled) + matches!(self, ProspectiveParachainsMode::Enabled { .. }) } } @@ -377,21 +388,28 @@ pub async fn prospective_parachains_mode( where Sender: SubsystemSender, { - let version = recv_runtime(request_runtime_api_version(relay_parent, sender).await).await?; + let result = + recv_runtime(request_staging_async_backing_parameters(relay_parent, sender).await).await; + + if let Err(error::Error::RuntimeRequest(RuntimeApiError::NotSupported { runtime_api_name })) = + &result + { + gum::trace!( + target: LOG_TARGET, + ?relay_parent, + "Prospective parachains are disabled, {} is not supported by the current Runtime API", + runtime_api_name, + ); - if version >= RuntimeApiRequest::VALIDITY_CONSTRAINTS { - Ok(ProspectiveParachainsMode::Enabled) - } else { - if version < 2 { - gum::warn!( - target: LOG_TARGET, - ?relay_parent, - "Runtime API version is {}, it is expected to be at least 2. Prospective parachains are disabled", - version - ); - } Ok(ProspectiveParachainsMode::Disabled) + } else { + let vstaging_primitives::AsyncBackingParameters { + max_candidate_depth, + allowed_ancestry_len, + } = result?; + Ok(ProspectiveParachainsMode::Enabled { + max_candidate_depth: max_candidate_depth as _, + allowed_ancestry_len: allowed_ancestry_len as _, + }) } } - -// TODO [now] : a way of getting all [`ContextLimitations`] from runtime. diff --git a/primitives/src/runtime_api.rs b/primitives/src/runtime_api.rs index d54ec6919d55..ee041438d6b9 100644 --- a/primitives/src/runtime_api.rs +++ b/primitives/src/runtime_api.rs @@ -216,12 +216,19 @@ sp_api::decl_runtime_apis! { #[changed_in(2)] fn session_info(index: sp_staking::SessionIndex) -> Option; + /// Returns all onchain disputes. + #[api_version(3)] + fn disputes() -> Vec<(v2::SessionIndex, v2::CandidateHash, v2::DisputeState)>; + + /***** Asynchronous backing *****/ + /// Returns the base constraints of the given para, if they exist. /// This is a staging method! Do not use on production runtimes! + #[api_version(99)] fn staging_validity_constraints(_: ppp::Id) -> Option; - /// Returns all onchain disputes. - #[api_version(3)] - fn disputes() -> Vec<(v2::SessionIndex, v2::CandidateHash, v2::DisputeState)>; + /// Returns candidate's acceptance limitations for asynchronous backing for a relay parent. + #[api_version(99)] + fn staging_async_backing_parameters() -> vstaging::AsyncBackingParameters; } } diff --git a/primitives/src/vstaging/mod.rs b/primitives/src/vstaging/mod.rs index 16bfc085570a..9834eb3eac2c 100644 --- a/primitives/src/vstaging/mod.rs +++ b/primitives/src/vstaging/mod.rs @@ -30,6 +30,23 @@ use parity_util_mem::MallocSizeOf; /// Useful type alias for Para IDs. pub type ParaId = Id; +/// Candidate's acceptance limitations for asynchronous backing per relay parent. +#[derive(RuntimeDebug, Copy, Clone, PartialEq, Encode, Decode, TypeInfo)] +#[cfg_attr(feature = "std", derive(MallocSizeOf, serde::Serialize, serde::Deserialize))] +pub struct AsyncBackingParameters { + /// The maximum number of para blocks between the para head in a relay parent + /// and a new candidate. Restricts nodes from building arbitrary long chains + /// and spamming other validators. + /// + /// When async backing is disabled, the only valid value is 0. + pub max_candidate_depth: u32, + /// How many ancestors of a relay parent are allowed to build candidates on top + /// of. + /// + /// When async backing is disabled, the only valid value is 0. + pub allowed_ancestry_len: u32, +} + /// Constraints on inbound HRMP channels. #[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] #[cfg_attr(feature = "std", derive(MallocSizeOf))] diff --git a/runtime/kusama/src/lib.rs b/runtime/kusama/src/lib.rs index 902ea7bdcae7..94e6d5110d99 100644 --- a/runtime/kusama/src/lib.rs +++ b/runtime/kusama/src/lib.rs @@ -1460,7 +1460,7 @@ pub type Executive = frame_executive::Executive< pallet_democracy::migrations::v1::Migration, pallet_multisig::migrations::v1::MigrateToV1, // "Properly migrate weights to v2" - parachains_configuration::migration::v3::MigrateToV3, + parachains_configuration::migration::v4::MigrateToV4, pallet_election_provider_multi_phase::migrations::v1::MigrateToV1, pallet_fast_unstake::migrations::v1::MigrateToV1, ), @@ -1690,10 +1690,6 @@ sp_api::impl_runtime_apis! { { parachains_runtime_api_impl::validation_code_hash::(para_id, assumption) } - - fn staging_validity_constraints(_: ParaId) -> Option { - unimplemented!("Staging API not implemented"); - } } impl beefy_primitives::BeefyApi for Runtime { diff --git a/runtime/parachains/src/configuration.rs b/runtime/parachains/src/configuration.rs index 2ebaff1b8282..d1e65af4444f 100644 --- a/runtime/parachains/src/configuration.rs +++ b/runtime/parachains/src/configuration.rs @@ -22,7 +22,10 @@ use crate::shared; use frame_support::{pallet_prelude::*, weights::constants::WEIGHT_PER_MILLIS}; use frame_system::pallet_prelude::*; use parity_scale_codec::{Decode, Encode}; -use primitives::v2::{Balance, SessionIndex, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE}; +use primitives::{ + v2::{Balance, SessionIndex, MAX_CODE_SIZE, MAX_HEAD_DATA_SIZE, MAX_POV_SIZE}, + vstaging::AsyncBackingParameters, +}; use sp_runtime::traits::Zero; use sp_std::prelude::*; @@ -117,6 +120,8 @@ pub struct HostConfiguration { * The parameters that are not essential, but still may be of interest for parachains. */ + /// Asynchronous backing parameters. + pub async_backing_parameters: AsyncBackingParameters, /// The maximum POV block size, in bytes. pub max_pov_size: u32, /// The maximum size of a message that can be put in a downward message queue. @@ -246,6 +251,10 @@ pub struct HostConfiguration { impl> Default for HostConfiguration { fn default() -> Self { Self { + async_backing_parameters: AsyncBackingParameters { + max_candidate_depth: 0, + allowed_ancestry_len: 0, + }, group_rotation_frequency: 1u32.into(), chain_availability_period: 1u32.into(), thread_availability_period: 1u32.into(), @@ -518,6 +527,23 @@ pub mod pallet { #[pallet::call] impl Pallet { + /// Set the asynchronous backing parameters. + #[pallet::weight(( + T::WeightInfo::set_config_with_option_u32(), // The same size in bytes. + DispatchClass::Operational, + ))] + pub fn set_async_backing_parameters( + origin: OriginFor, + max_candidate_depth: u32, + allowed_ancestry_len: u32, + ) -> DispatchResult { + ensure_root(origin)?; + Self::schedule_config_update(|config| { + config.async_backing_parameters = + AsyncBackingParameters { max_candidate_depth, allowed_ancestry_len }; + }) + } + /// Set the validation upgrade cooldown. #[pallet::weight(( T::WeightInfo::set_config_with_block_number(), diff --git a/runtime/parachains/src/configuration/migration.rs b/runtime/parachains/src/configuration/migration.rs index d87c98f6ae78..d3b50181266c 100644 --- a/runtime/parachains/src/configuration/migration.rs +++ b/runtime/parachains/src/configuration/migration.rs @@ -17,27 +17,23 @@ //! A module that is responsible for migration of storage. use crate::configuration::{self, Config, Pallet, Store, MAX_POV_SIZE}; -use frame_support::{ - pallet_prelude::*, - traits::StorageVersion, - weights::{OldWeight, Weight}, -}; +use frame_support::{pallet_prelude::*, traits::StorageVersion, weights::Weight}; use frame_system::pallet_prelude::BlockNumberFor; +use primitives::vstaging::AsyncBackingParameters; /// The current storage version. /// /// v0-v1: /// v1-v2: /// v2-v3: -pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(3); +/// v3-v4: ... +pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(4); -pub mod v3 { +pub mod v4 { use super::*; - use frame_support::traits::OnRuntimeUpgrade; + use frame_support::{traits::OnRuntimeUpgrade, weights::constants::WEIGHT_PER_MILLIS}; use primitives::v2::{Balance, SessionIndex}; - // Copied over from configuration.rs @ de9e147695b9f1be8bd44e07861a31e483c8343a and removed - // all the comments, and changed the Weight struct to OldWeight #[derive(parity_scale_codec::Encode, parity_scale_codec::Decode, Debug)] pub struct OldHostConfiguration { pub max_code_size: u32, @@ -51,7 +47,7 @@ pub mod v3 { pub validation_upgrade_delay: BlockNumber, pub max_pov_size: u32, pub max_downward_message_size: u32, - pub ump_service_total_weight: OldWeight, + pub ump_service_total_weight: Weight, pub hrmp_max_parachain_outbound_channels: u32, pub hrmp_max_parathread_outbound_channels: u32, pub hrmp_sender_deposit: Balance, @@ -79,7 +75,7 @@ pub mod v3 { pub zeroth_delay_tranche_width: u32, pub needed_approvals: u32, pub relay_vrf_modulo_samples: u32, - pub ump_max_individual_weight: OldWeight, + pub ump_max_individual_weight: Weight, pub pvf_checking_enabled: bool, pub pvf_voting_ttl: SessionIndex, pub minimum_validation_upgrade_delay: BlockNumber, @@ -93,7 +89,7 @@ pub mod v3 { thread_availability_period: 1u32.into(), no_show_slots: 1u32.into(), validation_upgrade_cooldown: Default::default(), - validation_upgrade_delay: Default::default(), + validation_upgrade_delay: 2u32.into(), code_retention_period: Default::default(), max_code_size: Default::default(), max_pov_size: Default::default(), @@ -114,7 +110,7 @@ pub mod v3 { max_upward_queue_count: Default::default(), max_upward_queue_size: Default::default(), max_downward_message_size: Default::default(), - ump_service_total_weight: OldWeight(Default::default()), + ump_service_total_weight: Default::default(), max_upward_message_size: Default::default(), max_upward_message_num_per_candidate: Default::default(), hrmp_sender_deposit: Default::default(), @@ -127,9 +123,8 @@ pub mod v3 { hrmp_max_parachain_outbound_channels: Default::default(), hrmp_max_parathread_outbound_channels: Default::default(), hrmp_max_message_num_per_candidate: Default::default(), - ump_max_individual_weight: OldWeight( - frame_support::weights::constants::WEIGHT_PER_MILLIS.ref_time() * 20, - ), + ump_max_individual_weight: (20u64 * WEIGHT_PER_MILLIS) + .set_proof_size(MAX_POV_SIZE as u64), pvf_checking_enabled: false, pvf_voting_ttl: 2u32.into(), minimum_validation_upgrade_delay: 2.into(), @@ -137,35 +132,39 @@ pub mod v3 { } } - pub struct MigrateToV3(sp_std::marker::PhantomData); - impl OnRuntimeUpgrade for MigrateToV3 { + pub struct MigrateToV4(sp_std::marker::PhantomData); + impl OnRuntimeUpgrade for MigrateToV4 { fn on_runtime_upgrade() -> Weight { - if StorageVersion::get::>() == 2 { - let weight_consumed = migrate_to_v3::(); + if StorageVersion::get::>() == 3 { + let mut weight_consumed = migrate_to_v4::(); - log::info!(target: configuration::LOG_TARGET, "MigrateToV3 executed successfully"); + log::info!(target: configuration::LOG_TARGET, "MigrateToV4 executed successfully"); STORAGE_VERSION.put::>(); + weight_consumed += T::DbWeight::get().reads_writes(1, 1); weight_consumed } else { - log::warn!(target: configuration::LOG_TARGET, "MigrateToV3 should be removed."); + log::warn!(target: configuration::LOG_TARGET, "MigrateToV4 should be removed."); T::DbWeight::get().reads(1) } } } } -fn migrate_to_v3() -> Weight { +fn migrate_to_v4() -> Weight { // Unusual formatting is justified: // - make it easier to verify that fields assign what they supposed to assign. // - this code is transient and will be removed after all migrations are done. // - this code is important enough to optimize for legibility sacrificing consistency. #[rustfmt::skip] let translate = - |pre: v3::OldHostConfiguration>| -> + |pre: v4::OldHostConfiguration>| -> configuration::HostConfiguration> { super::HostConfiguration { +// Default values are zeroes, thus it's ensured allowed ancestry never crosses the upgrade block. +async_backing_parameters : AsyncBackingParameters { max_candidate_depth: 0, allowed_ancestry_len: 0 }, + max_code_size : pre.max_code_size, max_head_data_size : pre.max_head_data_size, max_upward_queue_count : pre.max_upward_queue_count, @@ -207,9 +206,8 @@ relay_vrf_modulo_samples : pre.relay_vrf_modulo_samples, pvf_checking_enabled : pre.pvf_checking_enabled, pvf_voting_ttl : pre.pvf_voting_ttl, minimum_validation_upgrade_delay : pre.minimum_validation_upgrade_delay, - -ump_service_total_weight: Weight::from_ref_time(pre.ump_service_total_weight.0).set_proof_size(MAX_POV_SIZE as u64), -ump_max_individual_weight: Weight::from_ref_time(pre.ump_max_individual_weight.0).set_proof_size(MAX_POV_SIZE as u64), +ump_service_total_weight : pre.ump_service_total_weight, +ump_max_individual_weight :pre.ump_max_individual_weight, } }; @@ -234,31 +232,31 @@ mod tests { use crate::mock::{new_test_ext, Test}; #[test] - fn v2_deserialized_from_actual_data() { - // Fetched at Kusama 14,703,780 (0x3b2c305d01bd4adf1973d32a2d55ca1260a55eea8dfb3168e317c57f2841fdf1) + fn v3_deserialized_from_actual_data() { + // Fetched at Westend 13,327,763 (0x3b2c305d01bd4adf1973d32a2d55ca1260a55eea8dfb3168e317c57f2841fdf1) // // This exceeds the maximal line width length, but that's fine, since this is not code and // doesn't need to be read and also leaving it as one line allows to easily copy it. - let raw_config = hex_literal::hex!["0000a000005000000a00000000c8000000c800000a0000000a000000100e0000580200000000500000c8000000e87648170000001e00000000000000005039278c0400000000000000000000005039278c0400000000000000000000e8030000009001001e00000000000000009001008070000000000000000000000a0000000a0000000a00000001000000010500000001c8000000060000005802000002000000580200000200000059000000000000001e0000002800000000c817a804000000000200000014000000"]; + let raw_config = hex_literal::hex!["00005000005000000a00000000c8000000c800000a0000000a000000c8000000640000000000500000c800000700e8764817020040010a0000000000000000c0220fca950300000000000000000000c0220fca9503000000000000000000e8030000009001000a00000000000000009001008070000000000000000000000a000000050000000500000001000000010500000001c80000000600000058020000020000005802000002000000280000000000000002000000010000000700c817a8040200400100020000000f000000"]; - let v2 = - v3::OldHostConfiguration::::decode(&mut &raw_config[..]) + let v3 = + v4::OldHostConfiguration::::decode(&mut &raw_config[..]) .unwrap(); // We check only a sample of the values here. If we missed any fields or messed up data types // that would skew all the fields coming after. - assert_eq!(v2.max_code_size, 10_485_760); - assert_eq!(v2.validation_upgrade_cooldown, 3600); - assert_eq!(v2.max_pov_size, 5_242_880); - assert_eq!(v2.hrmp_channel_max_message_size, 102_400); - assert_eq!(v2.dispute_max_spam_slots, 2); - assert_eq!(v2.n_delay_tranches, 89); - assert_eq!(v2.ump_max_individual_weight, OldWeight(20_000_000_000)); - assert_eq!(v2.minimum_validation_upgrade_delay, 20); + assert_eq!(v3.max_code_size, 5_242_880); + assert_eq!(v3.validation_upgrade_cooldown, 200); + assert_eq!(v3.max_pov_size, 5_242_880); + assert_eq!(v3.hrmp_channel_max_message_size, 102_400); + assert_eq!(v3.dispute_max_spam_slots, 2); + assert_eq!(v3.n_delay_tranches, 40); + assert_eq!(v3.ump_max_individual_weight, Weight::from_parts(20_000_000_000, 5_242_880)); + assert_eq!(v3.minimum_validation_upgrade_delay, 15); } #[test] - fn test_migrate_to_v3() { + fn test_migrate_to_v4() { // Host configuration has lots of fields. However, in this migration we add only a couple of // fields. The most important part to check are a couple of the last fields. We also pick // extra fields to check arbitrarily, e.g. depending on their position (i.e. the middle) and @@ -267,8 +265,7 @@ mod tests { // We specify only the picked fields and the rest should be provided by the `Default` // implementation. That implementation is copied over between the two types and should work // fine. - let v2 = v3::OldHostConfiguration:: { - ump_max_individual_weight: OldWeight(0x71616e6f6e0au64), + let v3 = v4::OldHostConfiguration:: { needed_approvals: 69, thread_availability_period: 55, hrmp_recipient_deposit: 1337, @@ -282,61 +279,58 @@ mod tests { // Implant the v2 version in the state. frame_support::storage::unhashed::put_raw( &configuration::ActiveConfig::::hashed_key(), - &v2.encode(), + &v3.encode(), ); - migrate_to_v3::(); + migrate_to_v4::(); - let v3 = configuration::ActiveConfig::::get(); + let v4 = configuration::ActiveConfig::::get(); #[rustfmt::skip] { - assert_eq!(v2.max_code_size , v3.max_code_size); - assert_eq!(v2.max_head_data_size , v3.max_head_data_size); - assert_eq!(v2.max_upward_queue_count , v3.max_upward_queue_count); - assert_eq!(v2.max_upward_queue_size , v3.max_upward_queue_size); - assert_eq!(v2.max_upward_message_size , v3.max_upward_message_size); - assert_eq!(v2.max_upward_message_num_per_candidate , v3.max_upward_message_num_per_candidate); - assert_eq!(v2.hrmp_max_message_num_per_candidate , v3.hrmp_max_message_num_per_candidate); - assert_eq!(v2.validation_upgrade_cooldown , v3.validation_upgrade_cooldown); - assert_eq!(v2.validation_upgrade_delay , v3.validation_upgrade_delay); - assert_eq!(v2.max_pov_size , v3.max_pov_size); - assert_eq!(v2.max_downward_message_size , v3.max_downward_message_size); - assert_eq!(v2.hrmp_max_parachain_outbound_channels , v3.hrmp_max_parachain_outbound_channels); - assert_eq!(v2.hrmp_max_parathread_outbound_channels , v3.hrmp_max_parathread_outbound_channels); - assert_eq!(v2.hrmp_sender_deposit , v3.hrmp_sender_deposit); - assert_eq!(v2.hrmp_recipient_deposit , v3.hrmp_recipient_deposit); - assert_eq!(v2.hrmp_channel_max_capacity , v3.hrmp_channel_max_capacity); - assert_eq!(v2.hrmp_channel_max_total_size , v3.hrmp_channel_max_total_size); - assert_eq!(v2.hrmp_max_parachain_inbound_channels , v3.hrmp_max_parachain_inbound_channels); - assert_eq!(v2.hrmp_max_parathread_inbound_channels , v3.hrmp_max_parathread_inbound_channels); - assert_eq!(v2.hrmp_channel_max_message_size , v3.hrmp_channel_max_message_size); - assert_eq!(v2.code_retention_period , v3.code_retention_period); - assert_eq!(v2.parathread_cores , v3.parathread_cores); - assert_eq!(v2.parathread_retries , v3.parathread_retries); - assert_eq!(v2.group_rotation_frequency , v3.group_rotation_frequency); - assert_eq!(v2.chain_availability_period , v3.chain_availability_period); - assert_eq!(v2.thread_availability_period , v3.thread_availability_period); - assert_eq!(v2.scheduling_lookahead , v3.scheduling_lookahead); - assert_eq!(v2.max_validators_per_core , v3.max_validators_per_core); - assert_eq!(v2.max_validators , v3.max_validators); - assert_eq!(v2.dispute_period , v3.dispute_period); - assert_eq!(v2.dispute_post_conclusion_acceptance_period, v3.dispute_post_conclusion_acceptance_period); - assert_eq!(v2.dispute_max_spam_slots , v3.dispute_max_spam_slots); - assert_eq!(v2.dispute_conclusion_by_time_out_period , v3.dispute_conclusion_by_time_out_period); - assert_eq!(v2.no_show_slots , v3.no_show_slots); - assert_eq!(v2.n_delay_tranches , v3.n_delay_tranches); - assert_eq!(v2.zeroth_delay_tranche_width , v3.zeroth_delay_tranche_width); - assert_eq!(v2.needed_approvals , v3.needed_approvals); - assert_eq!(v2.relay_vrf_modulo_samples , v3.relay_vrf_modulo_samples); - assert_eq!(v2.pvf_checking_enabled , v3.pvf_checking_enabled); - assert_eq!(v2.pvf_voting_ttl , v3.pvf_voting_ttl); - assert_eq!(v2.minimum_validation_upgrade_delay , v3.minimum_validation_upgrade_delay); - - assert_eq!(v2.ump_service_total_weight, OldWeight(v3.ump_service_total_weight.ref_time())); - assert_eq!(v2.ump_max_individual_weight, OldWeight(v3.ump_max_individual_weight.ref_time())); - assert_eq!(v3.ump_service_total_weight.proof_size(), MAX_POV_SIZE as u64); - assert_eq!(v3.ump_max_individual_weight.proof_size(), MAX_POV_SIZE as u64); + assert_eq!(v3.max_code_size , v4.max_code_size); + assert_eq!(v3.max_head_data_size , v4.max_head_data_size); + assert_eq!(v3.max_upward_queue_count , v4.max_upward_queue_count); + assert_eq!(v3.max_upward_queue_size , v4.max_upward_queue_size); + assert_eq!(v3.max_upward_message_size , v4.max_upward_message_size); + assert_eq!(v3.max_upward_message_num_per_candidate , v4.max_upward_message_num_per_candidate); + assert_eq!(v3.hrmp_max_message_num_per_candidate , v4.hrmp_max_message_num_per_candidate); + assert_eq!(v3.validation_upgrade_cooldown , v4.validation_upgrade_cooldown); + assert_eq!(v3.validation_upgrade_delay , v4.validation_upgrade_delay); + assert_eq!(v3.max_pov_size , v4.max_pov_size); + assert_eq!(v3.max_downward_message_size , v4.max_downward_message_size); + assert_eq!(v3.hrmp_max_parachain_outbound_channels , v4.hrmp_max_parachain_outbound_channels); + assert_eq!(v3.hrmp_max_parathread_outbound_channels , v4.hrmp_max_parathread_outbound_channels); + assert_eq!(v3.hrmp_sender_deposit , v4.hrmp_sender_deposit); + assert_eq!(v3.hrmp_recipient_deposit , v4.hrmp_recipient_deposit); + assert_eq!(v3.hrmp_channel_max_capacity , v4.hrmp_channel_max_capacity); + assert_eq!(v3.hrmp_channel_max_total_size , v4.hrmp_channel_max_total_size); + assert_eq!(v3.hrmp_max_parachain_inbound_channels , v4.hrmp_max_parachain_inbound_channels); + assert_eq!(v3.hrmp_max_parathread_inbound_channels , v4.hrmp_max_parathread_inbound_channels); + assert_eq!(v3.hrmp_channel_max_message_size , v4.hrmp_channel_max_message_size); + assert_eq!(v3.code_retention_period , v4.code_retention_period); + assert_eq!(v3.parathread_cores , v4.parathread_cores); + assert_eq!(v3.parathread_retries , v4.parathread_retries); + assert_eq!(v3.group_rotation_frequency , v4.group_rotation_frequency); + assert_eq!(v3.chain_availability_period , v4.chain_availability_period); + assert_eq!(v3.thread_availability_period , v4.thread_availability_period); + assert_eq!(v3.scheduling_lookahead , v4.scheduling_lookahead); + assert_eq!(v3.max_validators_per_core , v4.max_validators_per_core); + assert_eq!(v3.max_validators , v4.max_validators); + assert_eq!(v3.dispute_period , v4.dispute_period); + assert_eq!(v3.dispute_post_conclusion_acceptance_period, v4.dispute_post_conclusion_acceptance_period); + assert_eq!(v3.dispute_max_spam_slots , v4.dispute_max_spam_slots); + assert_eq!(v3.dispute_conclusion_by_time_out_period , v4.dispute_conclusion_by_time_out_period); + assert_eq!(v3.no_show_slots , v4.no_show_slots); + assert_eq!(v3.n_delay_tranches , v4.n_delay_tranches); + assert_eq!(v3.zeroth_delay_tranche_width , v4.zeroth_delay_tranche_width); + assert_eq!(v3.needed_approvals , v4.needed_approvals); + assert_eq!(v3.relay_vrf_modulo_samples , v4.relay_vrf_modulo_samples); + assert_eq!(v3.pvf_checking_enabled , v4.pvf_checking_enabled); + assert_eq!(v3.pvf_voting_ttl , v4.pvf_voting_ttl); + assert_eq!(v3.minimum_validation_upgrade_delay , v4.minimum_validation_upgrade_delay); + assert_eq!(v3.ump_service_total_weight , v4.ump_service_total_weight); + assert_eq!(v3.ump_max_individual_weight , v4.ump_max_individual_weight); }; // ; makes this a statement. `rustfmt::skip` cannot be put on an expression. }); } diff --git a/runtime/parachains/src/configuration/tests.rs b/runtime/parachains/src/configuration/tests.rs index 6f2faf6cb204..e9e6c59f2c27 100644 --- a/runtime/parachains/src/configuration/tests.rs +++ b/runtime/parachains/src/configuration/tests.rs @@ -293,6 +293,10 @@ fn consistency_bypass_works() { fn setting_pending_config_members() { new_test_ext(Default::default()).execute_with(|| { let new_config = HostConfiguration { + async_backing_parameters: AsyncBackingParameters { + max_candidate_depth: 0, + allowed_ancestry_len: 0, + }, validation_upgrade_cooldown: 100, validation_upgrade_delay: 10, code_retention_period: 5, diff --git a/runtime/parachains/src/inclusion/tests.rs b/runtime/parachains/src/inclusion/tests.rs index fe8d96f7d945..550e52a72cb6 100644 --- a/runtime/parachains/src/inclusion/tests.rs +++ b/runtime/parachains/src/inclusion/tests.rs @@ -1913,9 +1913,6 @@ fn check_allowed_relay_parents() { let chain_b = ParaId::from(2); let thread_a = ParaId::from(3); - // The block number of the relay-parent for testing. - const RELAY_PARENT_NUM: BlockNumber = 4; - let paras = vec![ (chain_a, ParaKind::Parachain), (chain_b, ParaKind::Parachain), @@ -1984,10 +1981,25 @@ fn check_allowed_relay_parents() { let relay_parent_c = (3, Hash::repeat_byte(0x3)); let mut allowed_relay_parents = AllowedRelayParentsTracker::default(); - let max_len = RELAY_PARENT_NUM as usize; - allowed_relay_parents.update(relay_parent_a.1, Hash::zero(), relay_parent_a.0, max_len); - allowed_relay_parents.update(relay_parent_b.1, Hash::zero(), relay_parent_b.0, max_len); - allowed_relay_parents.update(relay_parent_c.1, Hash::zero(), relay_parent_c.0, max_len); + let max_ancestry_len = 3; + allowed_relay_parents.update( + relay_parent_a.1, + Hash::zero(), + relay_parent_a.0, + max_ancestry_len, + ); + allowed_relay_parents.update( + relay_parent_b.1, + Hash::zero(), + relay_parent_b.0, + max_ancestry_len, + ); + allowed_relay_parents.update( + relay_parent_c.1, + Hash::zero(), + relay_parent_c.0, + max_ancestry_len, + ); let chain_a_assignment = CoreAssignment { core: CoreIndex::from(0), diff --git a/runtime/parachains/src/paras_inherent/mod.rs b/runtime/parachains/src/paras_inherent/mod.rs index 86e116a2c060..62573a44e8c6 100644 --- a/runtime/parachains/src/paras_inherent/mod.rs +++ b/runtime/parachains/src/paras_inherent/mod.rs @@ -30,8 +30,7 @@ use crate::{ metrics::METRICS, paras, scheduler::{self, CoreAssignment, FreedReason}, - shared::{self, ALLOWED_RELAY_PARENT_LOOKBACK}, - ump, ParaId, + shared, ump, ParaId, }; use bitvec::prelude::BitVec; use frame_support::{ @@ -332,6 +331,7 @@ impl Pallet { ); let now = >::block_number(); + let config = >::config(); // Before anything else, update the allowed relay-parents. { @@ -343,7 +343,7 @@ impl Pallet { parent_hash, parent_storage_root, parent_number, - ALLOWED_RELAY_PARENT_LOOKBACK, + config.async_backing_parameters.allowed_ancestry_len, ); }); } @@ -364,8 +364,6 @@ impl Pallet { .map_err(|_e| Error::::DisputeStatementsUnsortedOrDuplicates)?; let (checked_disputes, total_consumed_weight) = { - // Obtain config params.. - let config = >::config(); let max_spam_slots = config.dispute_max_spam_slots; let post_conclusion_acceptance_period = config.dispute_post_conclusion_acceptance_period; @@ -586,6 +584,7 @@ impl Pallet { disputes.len() ); + let config = >::config(); let parent_hash = >::parent_hash(); let now = >::block_number(); @@ -615,7 +614,7 @@ impl Pallet { parent_hash, parent_storage_root, parent_number, - ALLOWED_RELAY_PARENT_LOOKBACK, + config.async_backing_parameters.allowed_ancestry_len, ); tracker @@ -626,7 +625,6 @@ impl Pallet { log::debug!(target: LOG_TARGET, "Found duplicate statement sets, retaining the first"); } - let config = >::config(); let max_spam_slots = config.dispute_max_spam_slots; let post_conclusion_acceptance_period = config.dispute_post_conclusion_acceptance_period; diff --git a/runtime/parachains/src/runtime_api_impl/vstaging.rs b/runtime/parachains/src/runtime_api_impl/vstaging.rs index 3fe6e2a0b979..eb6c1cf95abc 100644 --- a/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -19,7 +19,9 @@ use crate::{configuration, disputes, dmp, hrmp, initializer, paras, shared, ump}; use primitives::{ v2::{CandidateHash, DisputeState, Id as ParaId, SessionIndex}, - vstaging::{Constraints, InboundHrmpLimitations, OutboundHrmpChannelLimitations}, + vstaging::{ + AsyncBackingParameters, Constraints, InboundHrmpLimitations, OutboundHrmpChannelLimitations, + }, }; use sp_std::prelude::*; @@ -33,6 +35,7 @@ pub fn get_session_disputes( pub fn validity_constraints( para_id: ParaId, ) -> Option> { + let config = >::config(); // Async backing is only expected to be enabled with a tracker capacity of 1. // Subsequent configuration update gets applied on new session, which always // clears the buffer. @@ -40,7 +43,10 @@ pub fn validity_constraints( // Thus, minimum relay parent is ensured to have asynchronous backing enabled. let now = >::block_number(); let min_relay_parent_number = >::allowed_relay_parents() - .hypothetical_earliest_block_number(now, shared::ALLOWED_RELAY_PARENT_LOOKBACK); + .hypothetical_earliest_block_number( + now, + config.async_backing_parameters.allowed_ancestry_len, + ); let required_parent = >::para_head(para_id)?; let validation_code_hash = >::current_code_hash(para_id)?; @@ -52,7 +58,6 @@ pub fn validity_constraints( Some(block_num).zip(>::future_code_hash(para_id)) }); - let config = >::config(); let (ump_msg_count, ump_total_bytes) = >::relay_dispatch_queue_size(para_id); let ump_remaining = config.max_upward_queue_count - ump_msg_count; let ump_remaining_bytes = config.max_upward_queue_size - ump_total_bytes; @@ -85,3 +90,8 @@ pub fn validity_constraints( future_validation_code, }) } + +/// Implementation for `StagingAsyncBackingParameters` function from the runtime API +pub fn async_backing_parameters() -> AsyncBackingParameters { + >::config().async_backing_parameters +} diff --git a/runtime/parachains/src/shared.rs b/runtime/parachains/src/shared.rs index cb125d8ef884..28012369ad27 100644 --- a/runtime/parachains/src/shared.rs +++ b/runtime/parachains/src/shared.rs @@ -39,10 +39,6 @@ pub(crate) const SESSION_DELAY: SessionIndex = 2; #[cfg(test)] mod tests; -/// The maximum amount of relay-parent lookback. -// TODO: put this in the configuration module (https://github.com/paritytech/polkadot/issues/4841). -pub const ALLOWED_RELAY_PARENT_LOOKBACK: usize = 4; - /// Information about past relay-parents. #[derive(Encode, Decode, Default, TypeInfo)] pub struct AllowedRelayParentsTracker { @@ -64,22 +60,23 @@ impl AllowedRelayParentsTracker { /// Add a new relay-parent to the allowed relay parents, along with info about the header. - /// Provide a maximum length for the buffer, which will cause old relay-parents to be pruned. + /// Provide a maximum ancestry length for the buffer, which will cause old relay-parents to be pruned. pub(crate) fn update( &mut self, relay_parent: Hash, state_root: Hash, number: BlockNumber, - max_len: usize, + max_ancestry_len: u32, ) { + // + 1 for the most recent block, which is always allowed. + let buffer_size_limit = max_ancestry_len as usize + 1; + self.buffer.push_back((relay_parent, state_root)); self.latest_number = number; - while self.buffer.len() > max_len { + while self.buffer.len() > buffer_size_limit { let _ = self.buffer.pop_front(); } - // if max_len == 0, then latest_number is nonsensical. Otherwise, it's fine. - // We only allow relay parents within the same sessions, the buffer // gets cleared on session changes. } @@ -114,11 +111,11 @@ impl pub(crate) fn hypothetical_earliest_block_number( &self, now: BlockNumber, - max_len: usize, + max_ancestry_len: u32, ) -> BlockNumber { - let allowed_ancestry_len = max_len.saturating_sub(1).min(self.buffer.len()); + let allowed_ancestry_len = max_ancestry_len.min(self.buffer.len() as u32); - now - BlockNumber::from(allowed_ancestry_len as u32) + now - allowed_ancestry_len.into() } } diff --git a/runtime/parachains/src/shared/tests.rs b/runtime/parachains/src/shared/tests.rs index 5594c9420d03..e30dc6a33d07 100644 --- a/runtime/parachains/src/shared/tests.rs +++ b/runtime/parachains/src/shared/tests.rs @@ -32,25 +32,25 @@ fn tracker_earliest_block_number() { // Test it on an empty tracker. let now: u32 = 1; - let max_len = 5; - assert_eq!(tracker.hypothetical_earliest_block_number(now, max_len), now); + let max_ancestry_len = 5; + assert_eq!(tracker.hypothetical_earliest_block_number(now, max_ancestry_len), now); // Push a single block into the tracker, suppose max capacity is 1. - let max_len = 1; - tracker.update(Hash::zero(), Hash::zero(), 0, max_len); - assert_eq!(tracker.hypothetical_earliest_block_number(now, max_len), now); + let max_ancestry_len = 0; + tracker.update(Hash::zero(), Hash::zero(), 0, max_ancestry_len); + assert_eq!(tracker.hypothetical_earliest_block_number(now, max_ancestry_len), now); // Test a greater capacity. - let max_len = 5; + let max_ancestry_len = 4; let now = 4; for i in 1..now { - tracker.update(Hash::zero(), Hash::zero(), i, max_len); - assert_eq!(tracker.hypothetical_earliest_block_number(i + 1, max_len), 0); + tracker.update(Hash::zero(), Hash::zero(), i, max_ancestry_len); + assert_eq!(tracker.hypothetical_earliest_block_number(i + 1, max_ancestry_len), 0); } // Capacity exceeded. - tracker.update(Hash::zero(), Hash::zero(), now, max_len); - assert_eq!(tracker.hypothetical_earliest_block_number(now + 1, max_len), 1); + tracker.update(Hash::zero(), Hash::zero(), now, max_ancestry_len); + assert_eq!(tracker.hypothetical_earliest_block_number(now + 1, max_ancestry_len), 1); } #[test] diff --git a/runtime/polkadot/src/lib.rs b/runtime/polkadot/src/lib.rs index c62d781a9f5b..11fd85c1a573 100644 --- a/runtime/polkadot/src/lib.rs +++ b/runtime/polkadot/src/lib.rs @@ -1616,7 +1616,7 @@ pub type Executive = frame_executive::Executive< pallet_democracy::migrations::v1::Migration, pallet_multisig::migrations::v1::MigrateToV1, // "Properly migrate weights to v2" - parachains_configuration::migration::v3::MigrateToV3, + parachains_configuration::migration::v4::MigrateToV4, pallet_election_provider_multi_phase::migrations::v1::MigrateToV1, pallet_fast_unstake::migrations::v1::MigrateToV1, ), @@ -1847,10 +1847,6 @@ sp_api::impl_runtime_apis! { { parachains_runtime_api_impl::validation_code_hash::(para_id, assumption) } - - fn staging_validity_constraints(_: ParaId) -> Option { - unimplemented!("Staging API not implemented"); - } } impl beefy_primitives::BeefyApi for Runtime { diff --git a/runtime/rococo/src/lib.rs b/runtime/rococo/src/lib.rs index 7c9542d45ceb..d08d3a8bbd45 100644 --- a/runtime/rococo/src/lib.rs +++ b/runtime/rococo/src/lib.rs @@ -1463,7 +1463,7 @@ pub type Executive = frame_executive::Executive< pallet_democracy::migrations::v1::Migration, pallet_multisig::migrations::v1::MigrateToV1, // "Properly migrate weights to v2" - parachains_configuration::migration::v3::MigrateToV3, + parachains_configuration::migration::v4::MigrateToV4, ), >; /// The payload being signed in transactions. @@ -1608,7 +1608,7 @@ sp_api::impl_runtime_apis! { } } - #[api_version(3)] + #[api_version(99)] impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { parachains_runtime_api_impl::validators::() @@ -1711,8 +1711,12 @@ sp_api::impl_runtime_apis! { runtime_parachains::runtime_api_impl::vstaging::get_session_disputes::() } - fn staging_validity_constraints(_: ParaId) -> Option { - unimplemented!("Staging API not implemented"); + fn staging_validity_constraints(para_id: ParaId) -> Option { + runtime_parachains::runtime_api_impl::vstaging::validity_constraints::(para_id) + } + + fn staging_async_backing_parameters() -> primitives::vstaging::AsyncBackingParameters { + runtime_parachains::runtime_api_impl::vstaging::async_backing_parameters::() } } diff --git a/runtime/test-runtime/src/lib.rs b/runtime/test-runtime/src/lib.rs index 08935a1270ac..0210f1ecd8b7 100644 --- a/runtime/test-runtime/src/lib.rs +++ b/runtime/test-runtime/src/lib.rs @@ -916,10 +916,6 @@ sp_api::impl_runtime_apis! { { runtime_impl::validation_code_hash::(para_id, assumption) } - - fn staging_validity_constraints(_: ParaId) -> Option { - unimplemented!("Staging API not implemented"); - } } impl beefy_primitives::BeefyApi for Runtime { diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs index d35d94e0f5ab..d69bdd88b2ea 100644 --- a/runtime/westend/src/lib.rs +++ b/runtime/westend/src/lib.rs @@ -1228,7 +1228,7 @@ pub type Executive = frame_executive::Executive< pallet_scheduler::migration::v3::MigrateToV4, pallet_multisig::migrations::v1::MigrateToV1, // "Properly migrate weights to v2" - parachains_configuration::migration::v3::MigrateToV3, + parachains_configuration::migration::v4::MigrateToV4, pallet_election_provider_multi_phase::migrations::v1::MigrateToV1, pallet_fast_unstake::migrations::v1::MigrateToV1, ), @@ -1346,7 +1346,7 @@ sp_api::impl_runtime_apis! { } } - #[api_version(3)] + #[api_version(99)] impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { parachains_runtime_api_impl::validators::() @@ -1449,8 +1449,12 @@ sp_api::impl_runtime_apis! { runtime_parachains::runtime_api_impl::vstaging::get_session_disputes::() } - fn staging_validity_constraints(_: ParaId) -> Option { - unimplemented!("Staging API not implemented"); + fn staging_validity_constraints(para_id: ParaId) -> Option { + runtime_parachains::runtime_api_impl::vstaging::validity_constraints::(para_id) + } + + fn staging_async_backing_parameters() -> primitives::vstaging::AsyncBackingParameters { + runtime_parachains::runtime_api_impl::vstaging::async_backing_parameters::() } } From 9f27a2f0e82ab1860e36e2ec45c88afa0209d1f2 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Wed, 7 Dec 2022 22:20:21 -0500 Subject: [PATCH 23/76] Use real prospective parachains subsystem (#6407) --- Cargo.lock | 1 + node/core/prospective-parachains/src/lib.rs | 7 +++++++ node/service/Cargo.toml | 2 ++ node/service/src/overseer.rs | 5 +++-- 4 files changed, 13 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d2619143ddc1..826fe6be0512 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7213,6 +7213,7 @@ dependencies = [ "polkadot-node-core-chain-selection", "polkadot-node-core-dispute-coordinator", "polkadot-node-core-parachains-inherent", + "polkadot-node-core-prospective-parachains", "polkadot-node-core-provisioner", "polkadot-node-core-pvf-checker", "polkadot-node-core-runtime-api", diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 35a9422efc84..dc56136c5588 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -79,6 +79,13 @@ impl View { #[derive(Default)] pub struct ProspectiveParachainsSubsystem; +impl ProspectiveParachainsSubsystem { + /// Create a new instance of the `ProspectiveParachainsSubsystem`. + pub fn new() -> Self { + Self + } +} + #[overseer::subsystem(ProspectiveParachains, error = SubsystemError, prefix = self::overseer)] impl ProspectiveParachainsSubsystem where diff --git a/node/service/Cargo.toml b/node/service/Cargo.toml index e6e073546a13..8f9a80c6f0ac 100644 --- a/node/service/Cargo.toml +++ b/node/service/Cargo.toml @@ -120,6 +120,7 @@ polkadot-node-core-candidate-validation = { path = "../core/candidate-validation polkadot-node-core-chain-api = { path = "../core/chain-api", optional = true } polkadot-node-core-chain-selection = { path = "../core/chain-selection", optional = true } polkadot-node-core-dispute-coordinator = { path = "../core/dispute-coordinator", optional = true } +polkadot-node-core-prospective-parachains = { path = "../core/prospective-parachains", optional = true } polkadot-node-core-provisioner = { path = "../core/provisioner", optional = true } polkadot-node-core-pvf-checker = { path = "../core/pvf-checker", optional = true } polkadot-node-core-runtime-api = { path = "../core/runtime-api", optional = true } @@ -158,6 +159,7 @@ full-node = [ "polkadot-node-core-chain-api", "polkadot-node-core-chain-selection", "polkadot-node-core-dispute-coordinator", + "polkadot-node-core-prospective-parachains", "polkadot-node-core-provisioner", "polkadot-node-core-runtime-api", "polkadot-statement-distribution", diff --git a/node/service/src/overseer.rs b/node/service/src/overseer.rs index ef7c8de74e9c..a628494d2017 100644 --- a/node/service/src/overseer.rs +++ b/node/service/src/overseer.rs @@ -70,6 +70,7 @@ pub use polkadot_node_core_candidate_validation::CandidateValidationSubsystem; pub use polkadot_node_core_chain_api::ChainApiSubsystem; pub use polkadot_node_core_chain_selection::ChainSelectionSubsystem; pub use polkadot_node_core_dispute_coordinator::DisputeCoordinatorSubsystem; +pub use polkadot_node_core_prospective_parachains::ProspectiveParachainsSubsystem; pub use polkadot_node_core_provisioner::ProvisionerSubsystem; pub use polkadot_node_core_pvf_checker::PvfCheckerSubsystem; pub use polkadot_node_core_runtime_api::RuntimeApiSubsystem; @@ -199,7 +200,7 @@ pub fn prepared_overseer_builder<'a, Spawner, RuntimeClient>( DisputeCoordinatorSubsystem, DisputeDistributionSubsystem, ChainSelectionSubsystem, - polkadot_overseer::DummySubsystem, // TODO [now]: use real prospective parachains + ProspectiveParachainsSubsystem, >, Error, > @@ -321,7 +322,7 @@ where Metrics::register(registry)?, )) .chain_selection(ChainSelectionSubsystem::new(chain_selection_config, parachains_db)) - .prospective_parachains(polkadot_overseer::DummySubsystem) + .prospective_parachains(ProspectiveParachainsSubsystem::new()) .leaves(Vec::from_iter( leaves .into_iter() From cb193fbf5e5d7d58e189cd8f86d1fb57d3e600d0 Mon Sep 17 00:00:00 2001 From: Chris Sosnin <48099298+slumber@users.noreply.github.com> Date: Tue, 24 Jan 2023 20:03:01 +0400 Subject: [PATCH 24/76] Backport `HypotheticalFrontier` into the feature branch (#6605) * implement more general HypotheticalFrontier * fmt * drop unneeded request Co-authored-by: Robert Habermeier --- .../src/fragment_tree.rs | 261 +++++++++++++++--- node/core/prospective-parachains/src/lib.rs | 96 ++++++- .../approval-distribution/src/tests.rs | 3 +- node/subsystem-types/src/messages.rs | 69 ++++- .../src/inclusion_emulator/staging.rs | 74 +++-- 5 files changed, 431 insertions(+), 72 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index eb803808c7c9..30b8801971cd 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -54,7 +54,13 @@ //! bounded and in practice will not exceed a few thousand at any time. This naive implementation //! will still perform fairly well under these conditions, despite being somewhat wasteful of memory. -use std::collections::{BTreeMap, HashMap, HashSet}; +use std::{ + borrow::Cow, + collections::{ + hash_map::{Entry, HashMap}, + BTreeMap, HashSet, + }, +}; use super::LOG_TARGET; use bitvec::prelude::*; @@ -90,8 +96,7 @@ impl CandidateStorage { CandidateStorage { by_parent_head: HashMap::new(), by_candidate_hash: HashMap::new() } } - /// Introduce a new candidate. The candidate passed to this function - /// should have been seconded before introduction. + /// Introduce a new candidate. pub fn add_candidate( &mut self, candidate: CommittedCandidateReceipt, @@ -112,9 +117,9 @@ impl CandidateStorage { let entry = CandidateEntry { candidate_hash, relay_parent: candidate.descriptor.relay_parent, - state: CandidateState::Seconded, + state: CandidateState::Introduced, candidate: ProspectiveCandidate { - commitments: candidate.commitments, + commitments: Cow::Owned(candidate.commitments), collator: candidate.descriptor.collator, collator_signature: candidate.descriptor.signature, persisted_validation_data, @@ -130,6 +135,28 @@ impl CandidateStorage { Ok(candidate_hash) } + /// Remove a candidate from the store. + pub fn remove_candidate(&mut self, candidate_hash: &CandidateHash) { + if let Some(entry) = self.by_candidate_hash.remove(candidate_hash) { + let parent_head_hash = entry.candidate.persisted_validation_data.parent_head.hash(); + if let Entry::Occupied(mut e) = self.by_parent_head.entry(parent_head_hash) { + e.get_mut().remove(&candidate_hash); + if e.get().is_empty() { + e.remove(); + } + } + } + } + + /// Note that an existing candidate has been seconded. + pub fn mark_seconded(&mut self, candidate_hash: &CandidateHash) { + if let Some(entry) = self.by_candidate_hash.get_mut(candidate_hash) { + if entry.state != CandidateState::Backed { + entry.state = CandidateState::Seconded; + } + } + } + /// Note that an existing candidate has been backed. pub fn mark_backed(&mut self, candidate_hash: &CandidateHash) { if let Some(entry) = self.by_candidate_hash.get_mut(candidate_hash) { @@ -191,6 +218,9 @@ impl CandidateStorage { /// Candidates aren't even considered until they've at least been seconded. #[derive(Debug, PartialEq)] enum CandidateState { + /// The candidate has been introduced in a spam-protected way but + /// is not necessarily backed. + Introduced, /// The candidate has been seconded. Seconded, /// The candidate has been completely backed by the group. @@ -200,7 +230,7 @@ enum CandidateState { struct CandidateEntry { candidate_hash: CandidateHash, relay_parent: Hash, - candidate: ProspectiveCandidate, + candidate: ProspectiveCandidate<'static>, state: CandidateState, } @@ -305,6 +335,38 @@ enum NodePointer { Storage(usize), } +/// A hypothetical candidate, which may or may not exist in +/// the fragment tree already. +pub(crate) enum HypotheticalCandidate<'a> { + Complete { + receipt: Cow<'a, CommittedCandidateReceipt>, + persisted_validation_data: Cow<'a, PersistedValidationData>, + }, + Incomplete { + relay_parent: Hash, + parent_head_data_hash: Hash, + }, +} + +impl<'a> HypotheticalCandidate<'a> { + fn parent_head_data_hash(&self) -> Hash { + match *self { + HypotheticalCandidate::Complete { ref persisted_validation_data, .. } => + persisted_validation_data.as_ref().parent_head.hash(), + HypotheticalCandidate::Incomplete { ref parent_head_data_hash, .. } => + *parent_head_data_hash, + } + } + + fn relay_parent(&self) -> Hash { + match *self { + HypotheticalCandidate::Complete { ref receipt, .. } => + receipt.descriptor().relay_parent, + HypotheticalCandidate::Incomplete { ref relay_parent, .. } => *relay_parent, + } + } +} + /// This is a tree of candidates based on some underlying storage of candidates /// and a scope. pub(crate) struct FragmentTree { @@ -449,11 +511,10 @@ impl FragmentTree { /// /// If the candidate is already known, this returns the actual depths where this /// candidate is part of the tree. - pub(crate) fn hypothetical_depths( + pub(crate) fn hypothetical_depths<'a>( &self, hash: CandidateHash, - parent_head_data_hash: Hash, - candidate_relay_parent: Hash, + candidate: HypotheticalCandidate<'a>, ) -> Vec { // if known. if let Some(depths) = self.candidates.get(&hash) { @@ -461,35 +522,89 @@ impl FragmentTree { } // if out of scope. - let candidate_relay_parent_number = - if self.scope.relay_parent.hash == candidate_relay_parent { - self.scope.relay_parent.number - } else if let Some(info) = self.scope.ancestors_by_hash.get(&candidate_relay_parent) { - info.number - } else { - return Vec::new() - }; + let candidate_relay_parent = candidate.relay_parent(); + let candidate_relay_parent = if self.scope.relay_parent.hash == candidate_relay_parent { + self.scope.relay_parent.clone() + } else if let Some(info) = self.scope.ancestors_by_hash.get(&candidate_relay_parent) { + info.clone() + } else { + return Vec::new() + }; let max_depth = self.scope.max_depth; let mut depths = bitvec![u16, Msb0; 0; max_depth + 1]; // iterate over all nodes < max_depth where parent head-data matches, // relay-parent number is <= candidate, and depth < max_depth. - for node in &self.nodes { - if node.depth == max_depth { + let node_pointers = (0..self.nodes.len()).map(NodePointer::Storage); + for parent_pointer in std::iter::once(NodePointer::Root).chain(node_pointers) { + let (modifications, child_depth, earliest_rp) = match parent_pointer { + NodePointer::Root => + (ConstraintModifications::identity(), 0, self.scope.earliest_relay_parent()), + NodePointer::Storage(ptr) => { + let node = &self.nodes[ptr]; + let parent_rp = self + .scope + .ancestor_by_hash(&node.relay_parent()) + .expect("nodes in tree can only contain ancestors within scope; qed"); + + (node.cumulative_modifications.clone(), node.depth + 1, parent_rp) + }, + }; + + if child_depth > max_depth { continue } - if node.fragment.relay_parent().number > candidate_relay_parent_number { + + if earliest_rp.number > candidate_relay_parent.number { continue } - if node.head_data_hash == parent_head_data_hash { - depths.set(node.depth + 1, true); - } - } - // compare against root as well. - if self.scope.base_constraints.required_parent.hash() == parent_head_data_hash { - depths.set(0, true); + let child_constraints = + match self.scope.base_constraints.apply_modifications(&modifications) { + Err(e) => { + gum::debug!( + target: LOG_TARGET, + new_parent_head = ?modifications.required_parent, + err = ?e, + "Failed to apply modifications", + ); + + continue + }, + Ok(c) => c, + }; + + let parent_head_hash = candidate.parent_head_data_hash(); + if parent_head_hash == child_constraints.required_parent.hash() { + // We do additional checks for complete candidates. + if let HypotheticalCandidate::Complete { + ref receipt, + ref persisted_validation_data, + } = candidate + { + let prospective_candidate = ProspectiveCandidate { + commitments: Cow::Borrowed(&receipt.commitments), + collator: receipt.descriptor().collator.clone(), + collator_signature: receipt.descriptor().signature.clone(), + persisted_validation_data: persisted_validation_data.as_ref().clone(), + pov_hash: receipt.descriptor().pov_hash, + validation_code_hash: receipt.descriptor().validation_code_hash, + }; + + if Fragment::new( + candidate_relay_parent.clone(), + child_constraints, + prospective_candidate, + ) + .is_err() + { + continue + } + } + + depths.set(child_depth, true); + } } depths.iter_ones().collect() @@ -623,11 +738,11 @@ impl FragmentTree { let f = Fragment::new( relay_parent.clone(), child_constraints.clone(), - candidate.candidate.clone(), + candidate.candidate.partial_clone(), ); match f { - Ok(f) => f, + Ok(f) => f.into_owned(), Err(e) => { gum::debug!( target: LOG_TARGET, @@ -645,7 +760,6 @@ impl FragmentTree { let mut cumulative_modifications = modifications.clone(); cumulative_modifications.stack(fragment.constraint_modifications()); - let head_data_hash = fragment.candidate().commitments.head_data.hash(); let node = FragmentNode { parent: parent_pointer, fragment, @@ -653,7 +767,6 @@ impl FragmentTree { depth: child_depth, cumulative_modifications, children: Vec::new(), - head_data_hash, }; self.insert_node(node); @@ -668,11 +781,10 @@ impl FragmentTree { struct FragmentNode { // A pointer to the parent node. parent: NodePointer, - fragment: Fragment, + fragment: Fragment<'static>, candidate_hash: CandidateHash, depth: usize, cumulative_modifications: ConstraintModifications, - head_data_hash: Hash, children: Vec<(NodePointer, CandidateHash)>, } @@ -1294,8 +1406,10 @@ mod tests { assert_eq!( tree.hypothetical_depths( candidate_a_hash, - HeadData::from(vec![0x0a]).hash(), - relay_parent_a, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), + relay_parent: relay_parent_a, + }, ), vec![0, 2, 4], ); @@ -1303,8 +1417,10 @@ mod tests { assert_eq!( tree.hypothetical_depths( candidate_b_hash, - HeadData::from(vec![0x0b]).hash(), - relay_parent_a, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), + relay_parent: relay_parent_a, + }, ), vec![1, 3], ); @@ -1312,8 +1428,10 @@ mod tests { assert_eq!( tree.hypothetical_depths( CandidateHash(Hash::repeat_byte(21)), - HeadData::from(vec![0x0a]).hash(), - relay_parent_a, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), + relay_parent: relay_parent_a, + }, ), vec![0, 2, 4], ); @@ -1321,10 +1439,71 @@ mod tests { assert_eq!( tree.hypothetical_depths( CandidateHash(Hash::repeat_byte(22)), - HeadData::from(vec![0x0b]).hash(), - relay_parent_a, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), + relay_parent: relay_parent_a, + }, ), vec![1, 3] ); } + + #[test] + fn hypothetical_depths_stricter_on_complete() { + let storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 1000, // watermark is illegal + ); + + let candidate_a_hash = candidate_a.hash(); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + let max_depth = 4; + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + assert_eq!( + tree.hypothetical_depths( + candidate_a_hash, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), + relay_parent: relay_parent_a, + }, + ), + vec![0], + ); + + assert!(tree + .hypothetical_depths( + candidate_a_hash, + HypotheticalCandidate::Complete { + receipt: Cow::Owned(candidate_a), + persisted_validation_data: Cow::Owned(pvd_a), + }, + ) + .is_empty()); + } } diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index dc56136c5588..b4b3867fb734 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -27,15 +27,18 @@ //! This also handles concerns such as the relay-chain being forkful, //! session changes, predicting validator group assignments. -use std::collections::{HashMap, HashSet}; +use std::{ + borrow::Cow, + collections::{HashMap, HashSet}, +}; use futures::{channel::oneshot, prelude::*}; use polkadot_node_subsystem::{ messages::{ - ChainApiMessage, FragmentTreeMembership, HypotheticalDepthRequest, - ProspectiveParachainsMessage, ProspectiveValidationDataRequest, RuntimeApiMessage, - RuntimeApiRequest, + ChainApiMessage, FragmentTreeMembership, HypotheticalCandidate, HypotheticalDepthRequest, + HypotheticalFrontierRequest, ProspectiveParachainsMessage, + ProspectiveValidationDataRequest, RuntimeApiMessage, RuntimeApiRequest, }, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; @@ -134,6 +137,8 @@ async fn run_iteration(ctx: &mut Context, view: &mut View) -> Result<() ) => answer_get_backable_candidate(&view, relay_parent, para, required_path, tx), ProspectiveParachainsMessage::GetHypotheticalDepth(request, tx) => answer_hypothetical_depths_request(&view, request, tx), + ProspectiveParachainsMessage::GetHypotheticalFrontier(request, tx) => + answer_hypothetical_frontier_request(&view, request, tx), ProspectiveParachainsMessage::GetTreeMembership(para, candidate, tx) => answer_tree_membership_request(&view, para, candidate, tx), ProspectiveParachainsMessage::GetMinimumRelayParents(relay_parent, tx) => @@ -450,8 +455,10 @@ fn answer_hypothetical_depths_request( Some(fragment_tree) => { let depths = fragment_tree.hypothetical_depths( request.candidate_hash, - request.parent_head_data_hash, - request.candidate_relay_parent, + crate::fragment_tree::HypotheticalCandidate::Incomplete { + relay_parent: request.candidate_relay_parent, + parent_head_data_hash: request.parent_head_data_hash, + }, ); let _ = tx.send(depths); }, @@ -461,21 +468,88 @@ fn answer_hypothetical_depths_request( } } -fn answer_tree_membership_request( +fn answer_hypothetical_frontier_request( view: &View, + request: HypotheticalFrontierRequest, + tx: oneshot::Sender>, +) { + let mut response = Vec::with_capacity(request.candidates.len()); + for candidate in request.candidates { + response.push((candidate, Vec::new())); + } + + let required_active_leaf = request.fragment_tree_relay_parent; + for (active_leaf, leaf_view) in view + .active_leaves + .iter() + .filter(|(h, _)| required_active_leaf.as_ref().map_or(true, |x| h == &x)) + { + for &mut (ref c, ref mut membership) in &mut response { + let fragment_tree = match leaf_view.fragment_trees.get(&c.candidate_para()) { + None => continue, + Some(f) => f, + }; + + let (c_hash, hypothetical) = match c { + HypotheticalCandidate::Complete { + candidate_hash, + receipt, + persisted_validation_data, + } => ( + *candidate_hash, + fragment_tree::HypotheticalCandidate::Complete { + receipt: Cow::Borrowed(&*receipt), + persisted_validation_data: Cow::Borrowed(&*persisted_validation_data), + }, + ), + HypotheticalCandidate::Incomplete { + candidate_hash, + parent_head_data_hash, + candidate_relay_parent, + .. + } => ( + *candidate_hash, + fragment_tree::HypotheticalCandidate::Incomplete { + relay_parent: *candidate_relay_parent, + parent_head_data_hash: *parent_head_data_hash, + }, + ), + }; + + let depths = fragment_tree.hypothetical_depths(c_hash, hypothetical); + + if !depths.is_empty() { + membership.push((*active_leaf, depths)); + } + } + } + + let _ = tx.send(response); +} + +fn fragment_tree_membership( + active_leaves: &HashMap, para: ParaId, candidate: CandidateHash, - tx: oneshot::Sender, -) { +) -> FragmentTreeMembership { let mut membership = Vec::new(); - for (relay_parent, view_data) in &view.active_leaves { + for (relay_parent, view_data) in active_leaves { if let Some(tree) = view_data.fragment_trees.get(¶) { if let Some(depths) = tree.candidate(&candidate) { membership.push((*relay_parent, depths)); } } } - let _ = tx.send(membership); + membership +} + +fn answer_tree_membership_request( + view: &View, + para: ParaId, + candidate: CandidateHash, + tx: oneshot::Sender, +) { + let _ = tx.send(fragment_tree_membership(&view.active_leaves, para, candidate)); } fn answer_minimum_relay_parents_request( diff --git a/node/network/approval-distribution/src/tests.rs b/node/network/approval-distribution/src/tests.rs index e9d0f74cd098..62a3d71ae02e 100644 --- a/node/network/approval-distribution/src/tests.rs +++ b/node/network/approval-distribution/src/tests.rs @@ -2429,7 +2429,8 @@ fn batch_test_round(message_count: usize) { .collect(); let peer = PeerId::random(); - send_assignments_batched(&mut sender, assignments.clone(), peer, ValidationVersion::V1).await; + send_assignments_batched(&mut sender, assignments.clone(), peer, ValidationVersion::V1) + .await; send_approvals_batched(&mut sender, approvals.clone(), peer, ValidationVersion::V1).await; // Check expected assignments batches. diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 731078471caa..6326f0d3d35e 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -875,8 +875,66 @@ pub enum GossipSupportMessage { NetworkBridgeUpdate(NetworkBridgeEvent), } +/// A hypothetical candidate to be evaluated for frontier membership +/// in the prospective parachains subsystem. +/// +/// Hypothetical candidates are either complete or incomplete. +/// Complete candidates have already had their (potentially heavy) +/// candidate receipt fetched, while incomplete candidates are simply +/// claims about properties that a fetched candidate would have. +/// +/// Complete candidates can be evaluated more strictly than incomplete candidates. +#[derive(Debug, PartialEq, Eq, Clone)] +pub enum HypotheticalCandidate { + /// A complete candidate. + Complete { + /// The hash of the candidate. + candidate_hash: CandidateHash, + /// The receipt of the candidate. + receipt: Arc, + /// The persisted validation data of the candidate. + persisted_validation_data: PersistedValidationData, + }, + /// An incomplete candidate. + Incomplete { + /// The claimed hash of the candidate. + candidate_hash: CandidateHash, + /// The claimed para-ID of the candidate. + candidate_para: ParaId, + /// The claimed head-data hash of the candidate. + parent_head_data_hash: Hash, + /// The claimed relay parent of the candidate. + candidate_relay_parent: Hash, + }, +} + +impl HypotheticalCandidate { + /// Get the `ParaId` of the hypothetical candidate. + pub fn candidate_para(&self) -> ParaId { + match *self { + HypotheticalCandidate::Complete { ref receipt, .. } => receipt.descriptor().para_id, + HypotheticalCandidate::Incomplete { candidate_para, .. } => candidate_para, + } + } +} + +/// Request specifying which candidates are either already included +/// or might be included in the hypothetical frontier of fragment trees +/// under a given active leaf. +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct HypotheticalFrontierRequest { + /// Candidates, in arbitrary order, which should be checked for + /// possible membership in fragment trees. + pub candidates: Vec, + /// Either a specific fragment tree to check, otherwise all. + pub fragment_tree_relay_parent: Option, +} + /// A request for the depths a hypothetical candidate would occupy within -/// some fragment tree. +/// some fragment tree. Note that this is not an absolute indication of whether +/// a candidate can be added to a fragment tree, as the commitments are not +/// considered in this request. +// TODO [now]: file issue making this obsolete in favor of `HypotheticalFrontierRequest` #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub struct HypotheticalDepthRequest { /// The hash of the potential candidate. @@ -937,6 +995,15 @@ pub enum ProspectiveParachainsMessage { /// Returns an empty vector either if there is no such depth or the fragment tree relay-parent /// is unknown. GetHypotheticalDepth(HypotheticalDepthRequest, oneshot::Sender>), + /// Get the hypothetical frontier membership of candidates with the given properties + /// under the specified active leaves' fragment trees. + /// + /// For any candidate which is already known, this returns the depths the candidate + /// occupies. + GetHypotheticalFrontier( + HypotheticalFrontierRequest, + oneshot::Sender>, + ), /// Get the membership of the candidate in all fragment trees. GetTreeMembership(ParaId, CandidateHash, oneshot::Sender), /// Get the minimum accepted relay-parent number for each para in the fragment tree diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index c875d5b65870..928bc434022f 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -118,7 +118,10 @@ use polkadot_primitives::vstaging::{ Constraints as PrimitiveConstraints, Hash, HeadData, Id as ParaId, PersistedValidationData, UpgradeRestriction, ValidationCodeHash, }; -use std::collections::HashMap; +use std::{ + borrow::{Borrow, Cow}, + collections::HashMap, +}; /// Constraints on inbound HRMP channels. #[derive(Debug, Clone, PartialEq)] @@ -526,9 +529,9 @@ impl ConstraintModifications { /// here. But the erasure-root is not. This means that prospective candidates /// are not correlated to any session in particular. #[derive(Debug, Clone, PartialEq)] -pub struct ProspectiveCandidate { +pub struct ProspectiveCandidate<'a> { /// The commitments to the output of the execution. - pub commitments: CandidateCommitments, + pub commitments: Cow<'a, CandidateCommitments>, /// The collator that created the candidate. pub collator: CollatorId, /// The signature of the collator on the payload. @@ -541,6 +544,32 @@ pub struct ProspectiveCandidate { pub validation_code_hash: ValidationCodeHash, } +impl<'a> ProspectiveCandidate<'a> { + fn into_owned(self) -> ProspectiveCandidate<'static> { + ProspectiveCandidate { commitments: Cow::Owned(self.commitments.into_owned()), ..self } + } + + /// Partially clone the prospective candidate, but borrow the + /// parts which are potentially heavy. + pub fn partial_clone<'b>(&'b self) -> ProspectiveCandidate<'b> { + ProspectiveCandidate { + commitments: Cow::Borrowed(self.commitments.borrow()), + collator: self.collator.clone(), + collator_signature: self.collator_signature.clone(), + persisted_validation_data: self.persisted_validation_data.clone(), + pov_hash: self.pov_hash.clone(), + validation_code_hash: self.validation_code_hash.clone(), + } + } +} + +#[cfg(test)] +impl ProspectiveCandidate<'static> { + fn commitments_mut(&mut self) -> &mut CandidateCommitments { + self.commitments.to_mut() + } +} + /// Kinds of errors with the validity of a fragment. #[derive(Debug, Clone, PartialEq)] pub enum FragmentValidityError { @@ -592,19 +621,19 @@ pub enum FragmentValidityError { /// This is a type which guarantees that the candidate is valid under the /// operating constraints. #[derive(Debug, Clone, PartialEq)] -pub struct Fragment { +pub struct Fragment<'a> { /// The new relay-parent. relay_parent: RelayChainBlockInfo, /// The constraints this fragment is operating under. operating_constraints: Constraints, /// The core information about the prospective candidate. - candidate: ProspectiveCandidate, + candidate: ProspectiveCandidate<'a>, /// Modifications to the constraints based on the outputs of /// the candidate. modifications: ConstraintModifications, } -impl Fragment { +impl<'a> Fragment<'a> { /// Create a new fragment. /// /// This fails if the fragment isn't in line with the operating @@ -616,7 +645,7 @@ impl Fragment { pub fn new( relay_parent: RelayChainBlockInfo, operating_constraints: Constraints, - candidate: ProspectiveCandidate, + candidate: ProspectiveCandidate<'a>, ) -> Result { let modifications = { let commitments = &candidate.commitments; @@ -681,7 +710,7 @@ impl Fragment { } /// Access the underlying prospective candidate. - pub fn candidate(&self) -> &ProspectiveCandidate { + pub fn candidate(&self) -> &ProspectiveCandidate<'a> { &self.candidate } @@ -690,6 +719,11 @@ impl Fragment { &self.modifications } + /// Convert the fragment into an owned variant. + pub fn into_owned(self) -> Fragment<'static> { + Fragment { candidate: self.candidate.into_owned(), ..self } + } + /// Validate this fragment against some set of constraints /// instead of the operating constraints. pub fn validate_against_constraints( @@ -1129,21 +1163,21 @@ mod tests { fn make_candidate( constraints: &Constraints, relay_parent: &RelayChainBlockInfo, - ) -> ProspectiveCandidate { + ) -> ProspectiveCandidate<'static> { let collator_pair = CollatorPair::generate().0; let collator = collator_pair.public(); let sig = collator_pair.sign(b"blabla".as_slice()); ProspectiveCandidate { - commitments: CandidateCommitments { + commitments: Cow::Owned(CandidateCommitments { upward_messages: Vec::new(), horizontal_messages: Vec::new(), new_validation_code: None, head_data: HeadData::from(vec![1, 2, 3, 4, 5]), processed_downward_messages: 0, hrmp_watermark: relay_parent.number, - }, + }), collator, collator_signature: sig, persisted_validation_data: PersistedValidationData { @@ -1223,7 +1257,7 @@ mod tests { let mut candidate = make_candidate(&constraints, &relay_parent); let max_code_size = constraints.max_code_size; - candidate.commitments.new_validation_code = Some(vec![0; max_code_size + 1].into()); + candidate.commitments_mut().new_validation_code = Some(vec![0; max_code_size + 1].into()); assert_eq!( Fragment::new(relay_parent, constraints, candidate), @@ -1261,9 +1295,13 @@ mod tests { let max_hrmp = constraints.max_hrmp_num_per_candidate; - candidate.commitments.horizontal_messages.extend((0..max_hrmp + 1).map(|i| { - OutboundHrmpMessage { recipient: ParaId::from(i as u32), data: vec![1, 2, 3] } - })); + candidate + .commitments_mut() + .horizontal_messages + .extend((0..max_hrmp + 1).map(|i| OutboundHrmpMessage { + recipient: ParaId::from(i as u32), + data: vec![1, 2, 3], + })); assert_eq!( Fragment::new(relay_parent, constraints, candidate), @@ -1313,7 +1351,7 @@ mod tests { let mut candidate = make_candidate(&constraints, &relay_parent); constraints.upgrade_restriction = Some(UpgradeRestriction::Present); - candidate.commitments.new_validation_code = Some(ValidationCode(vec![1, 2, 3])); + candidate.commitments_mut().new_validation_code = Some(ValidationCode(vec![1, 2, 3])); assert_eq!( Fragment::new(relay_parent, constraints, candidate), @@ -1332,7 +1370,7 @@ mod tests { let constraints = make_constraints(); let mut candidate = make_candidate(&constraints, &relay_parent); - candidate.commitments.horizontal_messages = vec![ + candidate.commitments_mut().horizontal_messages = vec![ OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![1, 2, 3] }, OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] }, ]; @@ -1342,7 +1380,7 @@ mod tests { Err(FragmentValidityError::HrmpMessagesDescendingOrDuplicate(1)), ); - candidate.commitments.horizontal_messages = vec![ + candidate.commitments_mut().horizontal_messages = vec![ OutboundHrmpMessage { recipient: ParaId::from(1 as u32), data: vec![1, 2, 3] }, OutboundHrmpMessage { recipient: ParaId::from(0 as u32), data: vec![4, 5, 6] }, ]; From fbd5999ee7aa85ef2239577d9a1a41f2f3db0abc Mon Sep 17 00:00:00 2001 From: Marcin S Date: Tue, 24 Jan 2023 18:32:01 +0100 Subject: [PATCH 25/76] Resolve todo about legacy leaf activation (#6447) --- .../network/statement-distribution/src/lib.rs | 28 +++++++++++-------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index ff3d42aa65ba..751eb7c6cc3f 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -33,7 +33,10 @@ use polkadot_node_subsystem::{ messages::{NetworkBridgeEvent, StatementDistributionMessage}, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; -use polkadot_node_subsystem_util::rand; +use polkadot_node_subsystem_util::{ + rand, + runtime::{prospective_parachains_mode, ProspectiveParachainsMode}, +}; use futures::{channel::mpsc, prelude::*}; use sp_keystore::SyncCryptoStorePtr; @@ -208,9 +211,12 @@ impl StatementDistributionSubsystem { } if let Some(activated) = activated { - // TODO [now]: legacy, activate only if no prospective parachains support. - crate::legacy_v1::handle_activated_leaf(ctx, legacy_v1_state, activated) - .await?; + // Legacy, activate only if no prospective parachains support. + let mode = prospective_parachains_mode(ctx.sender(), activated.hash).await?; + if let ProspectiveParachainsMode::Disabled = mode { + crate::legacy_v1::handle_activated_leaf(ctx, legacy_v1_state, activated) + .await?; + } } }, FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => { @@ -239,13 +245,13 @@ impl StatementDistributionSubsystem { // pass to legacy, but not if the message isn't // v1. let legacy = match &event { - &NetworkBridgeEvent::PeerMessage(_, ref message) => match message { - Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::V1Compatibility(_)) => true, - Versioned::V1(_) => true, - // TODO [now]: _ => false, - }, - _ => true, - }; + &NetworkBridgeEvent::PeerMessage(_, ref message) => match message { + Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::V1Compatibility(_)) => true, + Versioned::V1(_) => true, + // TODO [now]: _ => false, + }, + _ => true, + }; if legacy { crate::legacy_v1::handle_network_update( From d7cca3f97a8a0894be5b2a9a4422704f7b8db898 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Wed, 25 Jan 2023 11:34:39 -0600 Subject: [PATCH 26/76] fix bug/warning in handling membership answers --- node/core/backing/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 25f6ae8ed333..82d1d886ec85 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -876,7 +876,7 @@ async fn handle_active_leaves_update( } let mut seconded_at_depth = HashMap::new(); - for response in membership_answers.next().await { + while let Some(response) = membership_answers.next().await { match response { Err(oneshot::Canceled) => { gum::warn!( From 7dde6d70502873d9ae9ed479c8682adc1de541b0 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Fri, 27 Jan 2023 16:44:12 +0100 Subject: [PATCH 27/76] Remove `HypotheticalDepthRequest` in favor of `HypotheticalFrontierRequest` (#6521) * Remove `HypotheticalDepthRequest` for `HypotheticalFrontierRequest` * Update tests * Fix (removed wrong docstring) * Fix can_second request * Patch some dead_code errors --------- Co-authored-by: Chris Sosnin --- node/core/backing/src/lib.rs | 95 ++++---- .../src/tests/prospective_parachains.rs | 219 +++++++++++------- .../src/fragment_tree.rs | 3 + node/core/prospective-parachains/src/lib.rs | 30 +-- node/subsystem-types/src/messages.rs | 56 ++--- 5 files changed, 227 insertions(+), 176 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 82d1d886ec85..b96bb5752c8f 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -87,8 +87,9 @@ use polkadot_node_subsystem::{ messages::{ AvailabilityDistributionMessage, AvailabilityStoreMessage, CanSecondRequest, CandidateBackingMessage, CandidateValidationMessage, CollatorProtocolMessage, - HypotheticalDepthRequest, ProspectiveParachainsMessage, ProvisionableData, - ProvisionerMessage, RuntimeApiMessage, RuntimeApiRequest, StatementDistributionMessage, + HypotheticalCandidate, HypotheticalFrontierRequest, ProspectiveParachainsMessage, + ProvisionableData, ProvisionerMessage, RuntimeApiMessage, RuntimeApiRequest, + StatementDistributionMessage, }, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; @@ -231,7 +232,7 @@ struct ActiveLeafState { prospective_parachains_mode: ProspectiveParachainsMode, /// The candidates seconded at various depths under this active /// leaf with respect to parachain id. A candidate can only be - /// seconded when its hypothetical depth under every active leaf + /// seconded when its hypothetical frontier under every active leaf /// has an empty entry in this map. /// /// When prospective parachains are disabled, the only depth @@ -876,15 +877,13 @@ async fn handle_active_leaves_update( } let mut seconded_at_depth = HashMap::new(); - while let Some(response) = membership_answers.next().await { + if let Some(response) = membership_answers.next().await { match response { Err(oneshot::Canceled) => { gum::warn!( target: LOG_TARGET, "Prospective parachains subsystem unreachable for membership request", ); - - continue }, Ok((para_id, candidate_hash, membership)) => { // This request gives membership in all fragment trees. We have some @@ -1080,22 +1079,22 @@ enum SecondingAllowed { Yes(Vec<(Hash, Vec)>), } -/// Checks whether a candidate can be seconded based on its hypothetical -/// depths in the fragment tree and what we've already seconded in all -/// active leaves. +/// Checks whether a candidate can be seconded based on its hypothetical frontiers in the fragment +/// tree and what we've already seconded in all active leaves. #[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] async fn seconding_sanity_check( ctx: &mut Context, active_leaves: &HashMap, implicit_view: &ImplicitView, - candidate_hash: CandidateHash, - candidate_para: ParaId, - parent_head_data_hash: Hash, - candidate_relay_parent: Hash, + hypothetical_candidate: HypotheticalCandidate, ) -> SecondingAllowed { let mut membership = Vec::new(); let mut responses = FuturesOrdered::>>::new(); + let candidate_para = hypothetical_candidate.candidate_para(); + let candidate_relay_parent = hypothetical_candidate.relay_parent(); + let candidate_hash = hypothetical_candidate.candidate_hash(); + for (head, leaf_state) in active_leaves { if leaf_state.prospective_parachains_mode.is_enabled() { // Check that the candidate relay parent is allowed for para, skip the @@ -1107,20 +1106,30 @@ async fn seconding_sanity_check( } let (tx, rx) = oneshot::channel(); - ctx.send_message(ProspectiveParachainsMessage::GetHypotheticalDepth( - HypotheticalDepthRequest { - candidate_hash, - candidate_para, - parent_head_data_hash, - candidate_relay_parent, - fragment_tree_relay_parent: *head, + ctx.send_message(ProspectiveParachainsMessage::GetHypotheticalFrontier( + HypotheticalFrontierRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_tree_relay_parent: Some(*head), }, tx, )) .await; - responses.push_back(rx.map_ok(move |depths| (depths, head, leaf_state)).boxed()); + let response = rx.map_ok(move |frontiers| { + let depths: Vec = frontiers + .into_iter() + .flat_map(|(candidate, memberships)| { + debug_assert_eq!(candidate.candidate_hash(), candidate_hash); + memberships.into_iter().flat_map(|(relay_parent, depths)| { + debug_assert_eq!(relay_parent, *head); + depths + }) + }) + .collect(); + (depths, head, leaf_state) + }); + responses.push_back(response.boxed()); } else { - if head == &candidate_relay_parent { + if *head == candidate_relay_parent { if leaf_state .seconded_at_depth .get(&candidate_para) @@ -1143,7 +1152,7 @@ async fn seconding_sanity_check( Err(oneshot::Canceled) => { gum::warn!( target: LOG_TARGET, - "Failed to reach prospective parachains subsystem for hypothetical depths", + "Failed to reach prospective parachains subsystem for hypothetical frontiers", ); return SecondingAllowed::No @@ -1191,14 +1200,18 @@ async fn handle_can_second_request( .get(&relay_parent) .map_or(false, |pr_state| pr_state.prospective_parachains_mode.is_enabled()) { + let hypothetical_candidate = HypotheticalCandidate::Incomplete { + candidate_hash: request.candidate_hash, + candidate_para: request.candidate_para_id, + parent_head_data_hash: request.parent_head_data_hash, + candidate_relay_parent: relay_parent, + }; + let result = seconding_sanity_check( ctx, &state.per_leaf, &state.implicit_view, - request.candidate_hash, - request.candidate_para_id, - request.parent_head_data_hash, - relay_parent, + hypothetical_candidate, ) .await; @@ -1243,8 +1256,13 @@ async fn handle_validated_candidate_command( return Ok(()) } + let receipt = CommittedCandidateReceipt { + descriptor: candidate.descriptor.clone(), + commitments, + }; + let parent_head_data_hash = persisted_validation_data.parent_head.hash(); - // Note that `GetHypotheticalDepths` doesn't account for recursion, + // Note that `GetHypotheticalFrontier` doesn't account for recursion, // i.e. candidates can appear at multiple depths in the tree and in fact // at all depths, and we don't know what depths a candidate will ultimately occupy // because that's dependent on other candidates we haven't yet received. @@ -1253,9 +1271,14 @@ async fn handle_validated_candidate_command( // directly commit to the parachain block number or some other incrementing // counter. That requires a major primitives format upgrade, so for now // we just rule out trivial cycles. - if parent_head_data_hash == commitments.head_data.hash() { + if parent_head_data_hash == receipt.commitments.head_data.hash() { return Ok(()) } + let hypothetical_candidate = HypotheticalCandidate::Complete { + candidate_hash, + receipt: Arc::new(receipt.clone()), + persisted_validation_data: persisted_validation_data.clone(), + }; // sanity check that we're allowed to second the candidate // and that it doesn't conflict with other candidates we've // seconded. @@ -1263,10 +1286,7 @@ async fn handle_validated_candidate_command( ctx, &state.per_leaf, &state.implicit_view, - candidate_hash, - candidate.descriptor().para_id, - persisted_validation_data.parent_head.hash(), - candidate.descriptor().relay_parent, + hypothetical_candidate, ) .await { @@ -1274,13 +1294,8 @@ async fn handle_validated_candidate_command( SecondingAllowed::Yes(membership) => membership, }; - let statement = StatementWithPVD::Seconded( - CommittedCandidateReceipt { - descriptor: candidate.descriptor.clone(), - commitments, - }, - persisted_validation_data, - ); + let statement = + StatementWithPVD::Seconded(receipt, persisted_validation_data); // If we get an Error::RejectedByProspectiveParachains, // then the statement has not been distributed or imported into diff --git a/node/core/backing/src/tests/prospective_parachains.rs b/node/core/backing/src/tests/prospective_parachains.rs index 4d68fd99f798..ee948d81ef35 100644 --- a/node/core/backing/src/tests/prospective_parachains.rs +++ b/node/core/backing/src/tests/prospective_parachains.rs @@ -16,11 +16,11 @@ //! Tests for the backing subsystem with enabled prospective parachains. -use polkadot_node_subsystem::{messages::ChainApiMessage, TimeoutExt}; -use polkadot_primitives::{ - v2::{BlockNumber, Header, OccupiedCore}, - vstaging as vstaging_primitives, +use polkadot_node_subsystem::{ + messages::{ChainApiMessage, FragmentTreeMembership}, + TimeoutExt, }; +use polkadot_primitives::{vstaging as vstaging_primitives, BlockNumber, Header, OccupiedCore}; use super::*; @@ -266,9 +266,12 @@ async fn assert_validate_seconded_candidate( ); } -async fn assert_hypothetical_depth_requests( +async fn assert_hypothetical_frontier_requests( virtual_overseer: &mut VirtualOverseer, - mut expected_requests: Vec<(HypotheticalDepthRequest, Vec)>, + mut expected_requests: Vec<( + HypotheticalFrontierRequest, + Vec<(HypotheticalCandidate, FragmentTreeMembership)>, + )>, ) { // Requests come with no particular order. let requests_num = expected_requests.len(); @@ -277,12 +280,12 @@ async fn assert_hypothetical_depth_requests( assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::GetHypotheticalDepth(request, tx), + ProspectiveParachainsMessage::GetHypotheticalFrontier(request, tx), ) => { let idx = match expected_requests.iter().position(|r| r.0 == request) { Some(idx) => idx, None => panic!( - "unexpected hypothetical depth request, no match found for {:?}", + "unexpected hypothetical frontier request, no match found for {:?}", request ), }; @@ -295,6 +298,14 @@ async fn assert_hypothetical_depth_requests( } } +fn make_hypothetical_frontier_response( + depths: Vec, + hypothetical_candidate: HypotheticalCandidate, + relay_parent_hash: Hash, +) -> Vec<(HypotheticalCandidate, FragmentTreeMembership)> { + vec![(hypothetical_candidate, vec![(relay_parent_hash, depths)])] +} + // Test that `seconding_sanity_check` works when a candidate is allowed // for all leaves. #[test] @@ -375,23 +386,32 @@ fn seconding_sanity_check_allowed() { .await; // `seconding_sanity_check` - let expected_request_a = HypotheticalDepthRequest { + let hypothetical_candidate = HypotheticalCandidate::Complete { candidate_hash: candidate.hash(), - candidate_para: para_id, - parent_head_data_hash: pvd.parent_head.hash(), - candidate_relay_parent: leaf_a_parent, - fragment_tree_relay_parent: leaf_a_hash, + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), }; - let expected_request_b = HypotheticalDepthRequest { - candidate_hash: candidate.hash(), - candidate_para: para_id, - parent_head_data_hash: pvd.parent_head.hash(), - candidate_relay_parent: leaf_a_parent, - fragment_tree_relay_parent: leaf_b_hash, + let expected_request_a = HypotheticalFrontierRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_tree_relay_parent: Some(leaf_a_hash), + }; + let expected_response_a = make_hypothetical_frontier_response( + vec![0, 1, 2, 3], + hypothetical_candidate.clone(), + leaf_a_hash, + ); + let expected_request_b = HypotheticalFrontierRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_tree_relay_parent: Some(leaf_b_hash), }; - assert_hypothetical_depth_requests( + let expected_response_b = + make_hypothetical_frontier_response(vec![3], hypothetical_candidate, leaf_b_hash); + assert_hypothetical_frontier_requests( &mut virtual_overseer, - vec![(expected_request_a, vec![0, 1, 2, 3]), (expected_request_b, vec![3])], + vec![ + (expected_request_a, expected_response_a), + (expected_request_b, expected_response_b), + ], ) .await; // Prospective parachains are notified. @@ -511,16 +531,23 @@ fn seconding_sanity_check_disallowed() { .await; // `seconding_sanity_check` - let expected_request_a = HypotheticalDepthRequest { + let hypothetical_candidate = HypotheticalCandidate::Complete { candidate_hash: candidate.hash(), - candidate_para: para_id, - parent_head_data_hash: pvd.parent_head.hash(), - candidate_relay_parent: leaf_a_parent, - fragment_tree_relay_parent: leaf_a_hash, + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), + }; + let expected_request_a = HypotheticalFrontierRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_tree_relay_parent: Some(leaf_a_hash), }; - assert_hypothetical_depth_requests( + let expected_response_a = make_hypothetical_frontier_response( + vec![0, 1, 2, 3], + hypothetical_candidate, + leaf_a_hash, + ); + assert_hypothetical_frontier_requests( &mut virtual_overseer, - vec![(expected_request_a, vec![0, 1, 2, 3])], + vec![(expected_request_a, expected_response_a)], ) .await; // Prospective parachains are notified. @@ -596,25 +623,32 @@ fn seconding_sanity_check_disallowed() { .await; // `seconding_sanity_check` - let expected_request_a = HypotheticalDepthRequest { + + let hypothetical_candidate = HypotheticalCandidate::Complete { candidate_hash: candidate.hash(), - candidate_para: para_id, - parent_head_data_hash: pvd.parent_head.hash(), - candidate_relay_parent: leaf_a_grandparent, - fragment_tree_relay_parent: leaf_a_hash, + receipt: Arc::new(candidate), + persisted_validation_data: pvd, }; - let expected_request_b = HypotheticalDepthRequest { - candidate_hash: candidate.hash(), - candidate_para: para_id, - parent_head_data_hash: pvd.parent_head.hash(), - candidate_relay_parent: leaf_a_grandparent, - fragment_tree_relay_parent: leaf_b_hash, + let expected_request_a = HypotheticalFrontierRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_tree_relay_parent: Some(leaf_a_hash), }; - assert_hypothetical_depth_requests( + let expected_response_a = make_hypothetical_frontier_response( + vec![3], + hypothetical_candidate.clone(), + leaf_a_hash, + ); + let expected_request_b = HypotheticalFrontierRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_tree_relay_parent: Some(leaf_b_hash), + }; + let expected_response_b = + make_hypothetical_frontier_response(vec![1], hypothetical_candidate, leaf_b_hash); + assert_hypothetical_frontier_requests( &mut virtual_overseer, vec![ - (expected_request_a, vec![3]), // All depths are occupied. - (expected_request_b, vec![1]), + (expected_request_a, expected_response_a), // All depths are occupied. + (expected_request_b, expected_response_b), ], ) .await; @@ -694,17 +728,24 @@ fn prospective_parachains_reject_candidate() { .await; // `seconding_sanity_check` + let hypothetical_candidate = HypotheticalCandidate::Complete { + candidate_hash: candidate.hash(), + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), + }; let expected_request_a = vec![( - HypotheticalDepthRequest { - candidate_hash: candidate.hash(), - candidate_para: para_id, - parent_head_data_hash: pvd.parent_head.hash(), - candidate_relay_parent: leaf_a_parent, - fragment_tree_relay_parent: leaf_a_hash, + HypotheticalFrontierRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_tree_relay_parent: Some(leaf_a_hash), }, - vec![0, 1, 2, 3], + make_hypothetical_frontier_response( + vec![0, 1, 2, 3], + hypothetical_candidate, + leaf_a_hash, + ), )]; - assert_hypothetical_depth_requests(&mut virtual_overseer, expected_request_a.clone()).await; + assert_hypothetical_frontier_requests(&mut virtual_overseer, expected_request_a.clone()) + .await; // Prospective parachains are notified. assert_matches!( @@ -756,7 +797,7 @@ fn prospective_parachains_reject_candidate() { .await; // `seconding_sanity_check` - assert_hypothetical_depth_requests(&mut virtual_overseer, expected_request_a).await; + assert_hypothetical_frontier_requests(&mut virtual_overseer, expected_request_a).await; // Prospective parachains are notified. assert_matches!( virtual_overseer.recv().await, @@ -867,18 +908,27 @@ fn second_multiple_candidates_per_relay_parent() { .await; // `seconding_sanity_check` + let hypothetical_candidate = HypotheticalCandidate::Complete { + candidate_hash: candidate.hash(), + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), + }; let expected_request_a = vec![( - HypotheticalDepthRequest { - candidate_hash: candidate.hash(), - candidate_para: para_id, - parent_head_data_hash: pvd.parent_head.hash(), - candidate_relay_parent: candidate.descriptor().relay_parent, - fragment_tree_relay_parent: leaf_hash, + HypotheticalFrontierRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_tree_relay_parent: Some(leaf_hash), }, - vec![*depth], + make_hypothetical_frontier_response( + vec![*depth], + hypothetical_candidate, + leaf_hash, + ), )]; - assert_hypothetical_depth_requests(&mut virtual_overseer, expected_request_a.clone()) - .await; + assert_hypothetical_frontier_requests( + &mut virtual_overseer, + expected_request_a.clone(), + ) + .await; // Prospective parachains are notified. assert_matches!( @@ -1381,19 +1431,25 @@ fn seconding_sanity_check_occupy_same_depth() { .await; // `seconding_sanity_check` + let hypothetical_candidate = HypotheticalCandidate::Complete { + candidate_hash: candidate.hash(), + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), + }; let expected_request_a = vec![( - HypotheticalDepthRequest { - candidate_hash: candidate.hash(), - candidate_para: *para_id, - parent_head_data_hash: pvd.parent_head.hash(), - candidate_relay_parent: candidate.descriptor().relay_parent, - fragment_tree_relay_parent: leaf_hash, + HypotheticalFrontierRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_tree_relay_parent: Some(leaf_hash), }, - vec![0, 1], // Send the same membership for both candidates. + // Send the same membership for both candidates. + make_hypothetical_frontier_response(vec![0, 1], hypothetical_candidate, leaf_hash), )]; - assert_hypothetical_depth_requests(&mut virtual_overseer, expected_request_a.clone()) - .await; + assert_hypothetical_frontier_requests( + &mut virtual_overseer, + expected_request_a.clone(), + ) + .await; // Prospective parachains are notified. assert_matches!( @@ -1512,18 +1568,23 @@ fn occupied_core_assignment() { .await; // `seconding_sanity_check` - let expected_request = HypotheticalDepthRequest { + let hypothetical_candidate = HypotheticalCandidate::Complete { candidate_hash: candidate.hash(), - candidate_para: para_id, - parent_head_data_hash: pvd.parent_head.hash(), - candidate_relay_parent: leaf_a_parent, - fragment_tree_relay_parent: leaf_a_hash, + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), }; - assert_hypothetical_depth_requests( - &mut virtual_overseer, - vec![(expected_request, vec![0, 1, 2, 3])], - ) - .await; + let expected_request = vec![( + HypotheticalFrontierRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_tree_relay_parent: Some(leaf_a_hash), + }, + make_hypothetical_frontier_response( + vec![0, 1, 2, 3], + hypothetical_candidate, + leaf_a_hash, + ), + )]; + assert_hypothetical_frontier_requests(&mut virtual_overseer, expected_request).await; // Prospective parachains are notified. assert_matches!( virtual_overseer.recv().await, diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 30b8801971cd..0cc3935369ec 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -136,6 +136,7 @@ impl CandidateStorage { } /// Remove a candidate from the store. + #[allow(dead_code)] pub fn remove_candidate(&mut self, candidate_hash: &CandidateHash) { if let Some(entry) = self.by_candidate_hash.remove(candidate_hash) { let parent_head_hash = entry.candidate.persisted_validation_data.parent_head.hash(); @@ -149,6 +150,7 @@ impl CandidateStorage { } /// Note that an existing candidate has been seconded. + #[allow(dead_code)] pub fn mark_seconded(&mut self, candidate_hash: &CandidateHash) { if let Some(entry) = self.by_candidate_hash.get_mut(candidate_hash) { if entry.state != CandidateState::Backed { @@ -222,6 +224,7 @@ enum CandidateState { /// is not necessarily backed. Introduced, /// The candidate has been seconded. + #[allow(dead_code)] Seconded, /// The candidate has been completely backed by the group. Backed, diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index b4b3867fb734..ddfd9145f806 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -36,7 +36,7 @@ use futures::{channel::oneshot, prelude::*}; use polkadot_node_subsystem::{ messages::{ - ChainApiMessage, FragmentTreeMembership, HypotheticalCandidate, HypotheticalDepthRequest, + ChainApiMessage, FragmentTreeMembership, HypotheticalCandidate, HypotheticalFrontierRequest, ProspectiveParachainsMessage, ProspectiveValidationDataRequest, RuntimeApiMessage, RuntimeApiRequest, }, @@ -135,8 +135,6 @@ async fn run_iteration(ctx: &mut Context, view: &mut View) -> Result<() required_path, tx, ) => answer_get_backable_candidate(&view, relay_parent, para, required_path, tx), - ProspectiveParachainsMessage::GetHypotheticalDepth(request, tx) => - answer_hypothetical_depths_request(&view, request, tx), ProspectiveParachainsMessage::GetHypotheticalFrontier(request, tx) => answer_hypothetical_frontier_request(&view, request, tx), ProspectiveParachainsMessage::GetTreeMembership(para, candidate, tx) => @@ -442,32 +440,6 @@ fn answer_get_backable_candidate( let _ = tx.send(tree.select_child(&required_path, |candidate| storage.is_backed(candidate))); } -fn answer_hypothetical_depths_request( - view: &View, - request: HypotheticalDepthRequest, - tx: oneshot::Sender>, -) { - match view - .active_leaves - .get(&request.fragment_tree_relay_parent) - .and_then(|l| l.fragment_trees.get(&request.candidate_para)) - { - Some(fragment_tree) => { - let depths = fragment_tree.hypothetical_depths( - request.candidate_hash, - crate::fragment_tree::HypotheticalCandidate::Incomplete { - relay_parent: request.candidate_relay_parent, - parent_head_data_hash: request.parent_head_data_hash, - }, - ); - let _ = tx.send(depths); - }, - None => { - let _ = tx.send(Vec::new()); - }, - } -} - fn answer_hypothetical_frontier_request( view: &View, request: HypotheticalFrontierRequest, diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 6326f0d3d35e..41261a3403e4 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -909,6 +909,14 @@ pub enum HypotheticalCandidate { } impl HypotheticalCandidate { + /// Get the `CandidateHash` of the hypothetical candidate. + pub fn candidate_hash(&self) -> CandidateHash { + match *self { + HypotheticalCandidate::Complete { candidate_hash, .. } => candidate_hash, + HypotheticalCandidate::Incomplete { candidate_hash, .. } => candidate_hash, + } + } + /// Get the `ParaId` of the hypothetical candidate. pub fn candidate_para(&self) -> ParaId { match *self { @@ -916,6 +924,26 @@ impl HypotheticalCandidate { HypotheticalCandidate::Incomplete { candidate_para, .. } => candidate_para, } } + + /// Get parent head data hash of the hypothetical candidate. + pub fn parent_head_data_hash(&self) -> Hash { + match *self { + HypotheticalCandidate::Complete { ref persisted_validation_data, .. } => + persisted_validation_data.parent_head.hash(), + HypotheticalCandidate::Incomplete { parent_head_data_hash, .. } => + parent_head_data_hash, + } + } + + /// Get candidate's relay parent. + pub fn relay_parent(&self) -> Hash { + match *self { + HypotheticalCandidate::Complete { ref receipt, .. } => + receipt.descriptor().relay_parent, + HypotheticalCandidate::Incomplete { candidate_relay_parent, .. } => + candidate_relay_parent, + } + } } /// Request specifying which candidates are either already included @@ -930,25 +958,6 @@ pub struct HypotheticalFrontierRequest { pub fragment_tree_relay_parent: Option, } -/// A request for the depths a hypothetical candidate would occupy within -/// some fragment tree. Note that this is not an absolute indication of whether -/// a candidate can be added to a fragment tree, as the commitments are not -/// considered in this request. -// TODO [now]: file issue making this obsolete in favor of `HypotheticalFrontierRequest` -#[derive(Debug, PartialEq, Eq, Clone, Copy)] -pub struct HypotheticalDepthRequest { - /// The hash of the potential candidate. - pub candidate_hash: CandidateHash, - /// The para of the candidate. - pub candidate_para: ParaId, - /// The hash of the parent head-data of the candidate. - pub parent_head_data_hash: Hash, - /// The relay-parent of the candidate. - pub candidate_relay_parent: Hash, - /// The relay-parent of the fragment tree we are comparing to. - pub fragment_tree_relay_parent: Hash, -} - /// A request for the persisted validation data stored in the prospective /// parachains subsystem. #[derive(Debug)] @@ -986,15 +995,6 @@ pub enum ProspectiveParachainsMessage { /// which is a descendant of the given candidate hashes. Returns `None` on the channel /// if no such candidate exists. GetBackableCandidate(Hash, ParaId, Vec, oneshot::Sender>), - /// Get the hypothetical depths that a candidate with the given properties would - /// occupy in the fragment tree for the given relay-parent. - /// - /// If the candidate is already known, this returns the depths the candidate - /// occupies. - /// - /// Returns an empty vector either if there is no such depth or the fragment tree relay-parent - /// is unknown. - GetHypotheticalDepth(HypotheticalDepthRequest, oneshot::Sender>), /// Get the hypothetical frontier membership of candidates with the given properties /// under the specified active leaves' fragment trees. /// From c760b76fb7be5bc6d2de5f86f301c33ff6ec2a68 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Fri, 27 Jan 2023 16:44:35 +0100 Subject: [PATCH 28/76] Async Backing: Send Statement Distribution "Backed" messages (#6634) * Backing: Send Statement Distribution "Backed" messages Closes #6590. **TODO:** - [ ] Adjust tests * Fix compile errors * (Mostly) fix tests * Fix comment * Fix test and compile error * Test that `StatementDistributionMessage::Backed` is sent --- node/core/backing/src/lib.rs | 28 +++++++++++-- node/core/backing/src/tests/mod.rs | 42 +++++++++---------- .../src/tests/prospective_parachains.rs | 34 ++++++++++----- .../network/statement-distribution/src/lib.rs | 1 + node/subsystem-types/src/messages.rs | 7 ++++ 5 files changed, 76 insertions(+), 36 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index b96bb5752c8f..f0baf935faae 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -1550,12 +1550,23 @@ async fn import_statement( let stmt = primitive_statement_to_table(statement); - let summary = rp_state.table.import_statement(&rp_state.table_context, stmt); + Ok(rp_state.table.import_statement(&rp_state.table_context, stmt)) +} +/// Handles a summary received from [`import_statement`] and dispatches `Backed` notifications and +/// misbehaviors as a result of importing a statement. +#[overseer::contextbounds(CandidateBacking, prefix = self::overseer)] +async fn post_import_statement_actions( + ctx: &mut Context, + rp_state: &mut PerRelayParentState, + summary: Option<&TableSummary>, +) -> Result<(), Error> { if let Some(attested) = summary .as_ref() .and_then(|s| rp_state.table.attested_candidate(&s.candidate, &rp_state.table_context)) { + let candidate_hash = attested.candidate.hash(); + // `HashSet::insert` returns true if the thing wasn't in there already. if rp_state.backed.insert(candidate_hash) { if let Some(backed) = table_attested_to_backed(attested, &rp_state.table_context) { @@ -1583,6 +1594,8 @@ async fn import_statement( para_head: backed.candidate.descriptor.para_head, }) .await; + // Notify statement distribution of backed candidate. + ctx.send_message(StatementDistributionMessage::Backed(candidate_hash)).await; } else { // The provisioner waits on candidate-backing, which means // that we need to send unbounded messages to avoid cycles. @@ -1601,7 +1614,7 @@ async fn import_statement( issue_new_misbehaviors(ctx, rp_state.parent, &mut rp_state.table); - Ok(summary) + Ok(()) } /// Check if there have happened any new misbehaviors and issue necessary messages. @@ -1637,11 +1650,15 @@ async fn sign_import_and_distribute_statement( metrics: &Metrics, ) -> Result, Error> { if let Some(signed_statement) = sign_statement(&*rp_state, statement, keystore, metrics).await { - import_statement(ctx, rp_state, per_candidate, &signed_statement).await?; + let summary = import_statement(ctx, rp_state, per_candidate, &signed_statement).await?; + // `Share` must always be sent before `Backed`. We send the latter in + // `post_import_statement_action` below. let smsg = StatementDistributionMessage::Share(rp_state.parent, signed_statement.clone()); ctx.send_unbounded_message(smsg); + post_import_statement_actions(ctx, rp_state, summary.as_ref()).await?; + Ok(Some(signed_statement)) } else { Ok(None) @@ -1765,7 +1782,10 @@ async fn maybe_validate_and_import( return Ok(()) } - if let Some(summary) = res? { + let summary = res?; + post_import_statement_actions(ctx, rp_state, summary.as_ref()).await?; + + if let Some(summary) = summary { // import_statement already takes care of communicating with the // prospective parachains subsystem. At this point, the candidate // has already been accepted into the fragment trees. diff --git a/node/core/backing/src/tests/mod.rs b/node/core/backing/src/tests/mod.rs index 4884e86bd569..e165b6579acc 100644 --- a/node/core/backing/src/tests/mod.rs +++ b/node/core/backing/src/tests/mod.rs @@ -547,22 +547,22 @@ fn backing_works() { assert_matches!( virtual_overseer.recv().await, - AllMessages::Provisioner( - ProvisionerMessage::ProvisionableData( - _, - ProvisionableData::BackedCandidate(candidate_receipt) - ) + AllMessages::StatementDistribution( + StatementDistributionMessage::Share(hash, _stmt) ) => { - assert_eq!(candidate_receipt, candidate_a.to_plain()); + assert_eq!(test_state.relay_parent, hash); } ); assert_matches!( virtual_overseer.recv().await, - AllMessages::StatementDistribution( - StatementDistributionMessage::Share(hash, _stmt) + AllMessages::Provisioner( + ProvisionerMessage::ProvisionableData( + _, + ProvisionableData::BackedCandidate(candidate_receipt) + ) ) => { - assert_eq!(test_state.relay_parent, hash); + assert_eq!(candidate_receipt, candidate_a.to_plain()); } ); @@ -907,6 +907,18 @@ fn backing_misbehavior_works() { } ); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share( + relay_parent, + signed_statement, + ) + ) if relay_parent == test_state.relay_parent => { + assert_eq!(*signed_statement.payload(), StatementWithPVD::Valid(candidate_a_hash)); + } + ); + assert_matches!( virtual_overseer.recv().await, AllMessages::Provisioner( @@ -920,18 +932,6 @@ fn backing_misbehavior_works() { ) if descriptor == candidate_a.descriptor ); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::StatementDistribution( - StatementDistributionMessage::Share( - relay_parent, - signed_statement, - ) - ) if relay_parent == test_state.relay_parent => { - assert_eq!(*signed_statement.payload(), StatementWithPVD::Valid(candidate_a_hash)); - } - ); - // This `Valid` statement is redundant after the `Seconded` statement already sent. let statement = CandidateBackingMessage::Statement(test_state.relay_parent, valid_2.clone()); diff --git a/node/core/backing/src/tests/prospective_parachains.rs b/node/core/backing/src/tests/prospective_parachains.rs index ee948d81ef35..8f7724cc8dc3 100644 --- a/node/core/backing/src/tests/prospective_parachains.rs +++ b/node/core/backing/src/tests/prospective_parachains.rs @@ -1089,6 +1089,15 @@ fn backing_works() { ) .await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::StatementDistribution( + StatementDistributionMessage::Share(hash, _stmt) + ) => { + assert_eq!(leaf_parent, hash); + } + ); + // Prospective parachains and collator protocol are notified about candidate backed. assert_matches!( virtual_overseer.recv().await, @@ -1105,14 +1114,11 @@ fn backing_works() { para_head, }) if para_id == _para_id && candidate_a_para_head == para_head ); - assert_matches!( virtual_overseer.recv().await, - AllMessages::StatementDistribution( - StatementDistributionMessage::Share(hash, _stmt) - ) => { - assert_eq!(leaf_parent, hash); - } + AllMessages::StatementDistribution(StatementDistributionMessage::Backed ( + candidate_hash + )) if candidate_a_hash == candidate_hash ); let statement = CandidateBackingMessage::Statement(leaf_parent, signed_b.clone()); @@ -1251,6 +1257,7 @@ fn concurrent_dependent_candidates() { .start_send_unpin(FromOrchestra::Communication { msg: statement_b }); let mut valid_statements = HashSet::new(); + let mut backed_statements = HashSet::new(); loop { let msg = virtual_overseer @@ -1332,8 +1339,13 @@ fn concurrent_dependent_candidates() { assert!(valid_statements.insert(hash)); } ); + }, + AllMessages::StatementDistribution(StatementDistributionMessage::Backed(hash)) => { + // Ensure that `Share` was received first for the candidate. + assert!(valid_statements.contains(&hash)); + backed_statements.insert(hash); - if valid_statements.len() == 2 { + if backed_statements.len() == 2 { break } }, @@ -1341,10 +1353,10 @@ fn concurrent_dependent_candidates() { } } - assert!( - valid_statements.contains(&candidate_a_hash) && - valid_statements.contains(&candidate_b_hash) - ); + assert!(valid_statements.contains(&candidate_a_hash)); + assert!(valid_statements.contains(&candidate_b_hash)); + assert!(backed_statements.contains(&candidate_a_hash)); + assert!(backed_statements.contains(&candidate_b_hash)); virtual_overseer }); diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 751eb7c6cc3f..98dba823519e 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -268,6 +268,7 @@ impl StatementDistributionSubsystem { // TODO [now]: pass to vstaging, but not if the message is // v1 or the connecting peer is v1. }, + StatementDistributionMessage::Backed(_candidate_hash) => {}, }, } Ok(false) diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 41261a3403e4..f887a83de2a9 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -669,6 +669,13 @@ pub enum StatementDistributionMessage { /// We have originated a signed statement in the context of /// given relay-parent hash and it should be distributed to other validators. Share(Hash, SignedFullStatementWithPVD), + /// The candidate received enough validity votes from the backing group. + /// + /// If the candidate is backed as a result of a local statement, this message MUST + /// be preceded by a `Share` message for that statement. This ensures that Statement Distribution + /// is always aware of full candidates prior to receiving the `Backed` notification, even + /// when the group size is 1 and the candidate is seconded locally. + Backed(CandidateHash), /// Event from the network bridge. #[from] NetworkBridgeUpdate(NetworkBridgeEvent), From 3ae0f24da76a1c887363e2aecec8a706dbf3a870 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Sat, 28 Jan 2023 20:39:59 +0100 Subject: [PATCH 29/76] Fix compile error --- node/subsystem-util/src/inclusion_emulator/staging.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 928bc434022f..c07e05e4c097 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -1327,6 +1327,7 @@ mod tests { candidate .commitments + .to_mut() .upward_messages .extend((0..max_ump + 1).map(|i| vec![i as u8])); From d2e608128117dba7fa2844211d47e3396258c5d9 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Sun, 29 Jan 2023 12:39:10 +0100 Subject: [PATCH 30/76] Fix some clippy errors --- node/core/approval-voting/src/criteria.rs | 4 +- node/core/approval-voting/src/lib.rs | 2 +- node/core/approval-voting/src/ops.rs | 4 +- node/core/bitfield-signing/src/lib.rs | 2 +- node/core/dispute-coordinator/src/import.rs | 2 +- .../src/fragment_tree.rs | 14 +++--- node/core/prospective-parachains/src/lib.rs | 4 +- node/network/approval-distribution/src/lib.rs | 2 +- node/network/bridge/src/rx/mod.rs | 10 ++--- .../src/collator_side/mod.rs | 4 +- .../src/validator_side/collation.rs | 2 +- .../src/validator_side/mod.rs | 12 ++--- .../src/legacy_v1/mod.rs | 44 ++++++++++--------- .../network/statement-distribution/src/lib.rs | 2 +- .../src/backing_implicit_view.rs | 4 +- .../src/determine_new_blocks.rs | 2 +- .../src/inclusion_emulator/staging.rs | 18 +++----- runtime/parachains/src/configuration.rs | 2 +- runtime/parachains/src/inclusion/mod.rs | 2 +- runtime/parachains/src/paras/mod.rs | 4 +- runtime/parachains/src/scheduler.rs | 2 +- statement-table/src/generic.rs | 2 +- 22 files changed, 66 insertions(+), 78 deletions(-) diff --git a/node/core/approval-voting/src/criteria.rs b/node/core/approval-voting/src/criteria.rs index 6707fc5672aa..202c238f3bbb 100644 --- a/node/core/approval-voting/src/criteria.rs +++ b/node/core/approval-voting/src/criteria.rs @@ -274,7 +274,7 @@ pub(crate) fn compute_assignments( // Ignore any cores where the assigned group is our own. let leaving_cores = leaving_cores .into_iter() - .filter(|&(_, _, ref g)| !is_in_backing_group(&config.validator_groups, index, *g)) + .filter(|(_, _, g)| !is_in_backing_group(&config.validator_groups, index, *g)) .map(|(c_hash, core, _)| (c_hash, core)) .collect::>(); @@ -496,7 +496,7 @@ pub(crate) fn check_assignment_cert( return Err(InvalidAssignment(Reason::IsInBackingGroup)) } - let &(ref vrf_output, ref vrf_proof) = &assignment.vrf; + let (vrf_output, vrf_proof) = &assignment.vrf; match assignment.kind { AssignmentCertKind::RelayVRFModulo { sample } => { if sample >= config.relay_vrf_modulo_samples { diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs index 900d3107b034..5443f7a1aa78 100644 --- a/node/core/approval-voting/src/lib.rs +++ b/node/core/approval-voting/src/lib.rs @@ -486,7 +486,7 @@ impl Wakeups { .collect(); let mut pruned_wakeups = BTreeMap::new(); - self.reverse_wakeups.retain(|&(ref h, ref c_h), tick| { + self.reverse_wakeups.retain(|(h, c_h), tick| { let live = !pruned_blocks.contains(h); if !live { pruned_wakeups.entry(*tick).or_insert_with(HashSet::new).insert((*h, *c_h)); diff --git a/node/core/approval-voting/src/ops.rs b/node/core/approval-voting/src/ops.rs index 37f564c34f71..c0d6ce0e6054 100644 --- a/node/core/approval-voting/src/ops.rs +++ b/node/core/approval-voting/src/ops.rs @@ -62,7 +62,7 @@ fn visit_and_remove_block_entry( }; overlayed_db.delete_block_entry(&block_hash); - for &(_, ref candidate_hash) in block_entry.candidates() { + for (_, candidate_hash) in block_entry.candidates() { let candidate = match visited_candidates.entry(*candidate_hash) { Entry::Occupied(e) => e.into_mut(), Entry::Vacant(e) => { @@ -227,7 +227,7 @@ pub fn add_block_entry( // read and write all updated entries. { - for &(_, ref candidate_hash) in entry.candidates() { + for (_, candidate_hash) in entry.candidates() { let NewCandidateInfo { candidate, backing_group, our_assignment } = match candidate_info(candidate_hash) { None => return Ok(Vec::new()), diff --git a/node/core/bitfield-signing/src/lib.rs b/node/core/bitfield-signing/src/lib.rs index 5de75cadf12e..2ceb8f6b7728 100644 --- a/node/core/bitfield-signing/src/lib.rs +++ b/node/core/bitfield-signing/src/lib.rs @@ -83,7 +83,7 @@ async fn get_core_availability( sender: &Mutex<&mut impl SubsystemSender>, span: &jaeger::Span, ) -> Result { - if let &CoreState::Occupied(ref core) = core { + if let CoreState::Occupied(core) = core { let _span = span.child("query-chunk-availability"); let (tx, rx) = oneshot::channel(); diff --git a/node/core/dispute-coordinator/src/import.rs b/node/core/dispute-coordinator/src/import.rs index 4f6edc5fcef0..3caca8e02cac 100644 --- a/node/core/dispute-coordinator/src/import.rs +++ b/node/core/dispute-coordinator/src/import.rs @@ -97,7 +97,7 @@ pub enum OwnVoteState { } impl OwnVoteState { - fn new<'a>(votes: &CandidateVotes, env: &CandidateEnvironment<'a>) -> Self { + fn new(votes: &CandidateVotes, env: &CandidateEnvironment) -> Self { let controlled_indices = env.controlled_indices(); if controlled_indices.is_empty() { return Self::CannotVote diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 0cc3935369ec..03a86edfeabf 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -468,7 +468,7 @@ impl FragmentTree { /// Returns an O(n) iterator over the hashes of candidates contained in the /// tree. - pub(crate) fn candidates<'a>(&'a self) -> impl Iterator + 'a { + pub(crate) fn candidates(&self) -> impl Iterator + '_ { self.candidates.keys().cloned() } @@ -514,10 +514,10 @@ impl FragmentTree { /// /// If the candidate is already known, this returns the actual depths where this /// candidate is part of the tree. - pub(crate) fn hypothetical_depths<'a>( + pub(crate) fn hypothetical_depths( &self, hash: CandidateHash, - candidate: HypotheticalCandidate<'a>, + candidate: HypotheticalCandidate, ) -> Vec { // if known. if let Some(depths) = self.candidates.get(&hash) { @@ -656,11 +656,7 @@ impl FragmentTree { } } - fn populate_from_bases<'a>( - &mut self, - storage: &'a CandidateStorage, - initial_bases: Vec, - ) { + fn populate_from_bases(&mut self, storage: &CandidateStorage, initial_bases: Vec) { // Populate the tree breadth-first. let mut last_sweep_start = None; @@ -766,7 +762,7 @@ impl FragmentTree { let node = FragmentNode { parent: parent_pointer, fragment, - candidate_hash: candidate.candidate_hash.clone(), + candidate_hash: candidate.candidate_hash, depth: child_depth, cumulative_modifications, children: Vec::new(), diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index ddfd9145f806..cc6293832661 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -470,8 +470,8 @@ fn answer_hypothetical_frontier_request( } => ( *candidate_hash, fragment_tree::HypotheticalCandidate::Complete { - receipt: Cow::Borrowed(&*receipt), - persisted_validation_data: Cow::Borrowed(&*persisted_validation_data), + receipt: Cow::Borrowed(receipt), + persisted_validation_data: Cow::Borrowed(persisted_validation_data), }, ), HypotheticalCandidate::Incomplete { diff --git a/node/network/approval-distribution/src/lib.rs b/node/network/approval-distribution/src/lib.rs index 7422e0aa14b6..f967c566f9a9 100644 --- a/node/network/approval-distribution/src/lib.rs +++ b/node/network/approval-distribution/src/lib.rs @@ -1256,7 +1256,7 @@ impl State { .iter() .filter_map(|(p, k)| peer_data.get(&p).map(|pd| (p, k, pd.version))) .filter(|(p, k, _)| peer_filter(p, k)) - .map(|(p, _, v)| (p.clone(), v)) + .map(|(p, _, v)| (*p, v)) .collect::>(); // Add the metadata of the assignment to the knowledge of each peer. diff --git a/node/network/bridge/src/rx/mod.rs b/node/network/bridge/src/rx/mod.rs index 03f3e9ca8965..c81fce6ad70a 100644 --- a/node/network/bridge/src/rx/mod.rs +++ b/node/network/bridge/src/rx/mod.rs @@ -772,22 +772,18 @@ fn update_our_view( shared .validation_peers .iter() - .map(|(peer_id, data)| (peer_id.clone(), data.version)) + .map(|(peer_id, data)| (*peer_id, data.version)) .collect::>(), shared .collation_peers .iter() - .map(|(peer_id, data)| (peer_id.clone(), data.version)) + .map(|(peer_id, data)| (*peer_id, data.version)) .collect::>(), ) }; let filter_by_version = |peers: &[(PeerId, ProtocolVersion)], version| { - peers - .iter() - .filter(|(_, v)| v == &version) - .map(|(p, _)| p.clone()) - .collect::>() + peers.iter().filter(|(_, v)| v == &version).map(|(p, _)| *p).collect::>() }; let v1_validation_peers = filter_by_version(&validation_peers, ValidationVersion::V1.into()); diff --git a/node/network/collator-protocol/src/collator_side/mod.rs b/node/network/collator-protocol/src/collator_side/mod.rs index d4f670dccf67..4d7b29067f26 100644 --- a/node/network/collator-protocol/src/collator_side/mod.rs +++ b/node/network/collator-protocol/src/collator_side/mod.rs @@ -713,7 +713,7 @@ async fn advertise_collation( }; ctx.send_message(NetworkBridgeTxMessage::SendCollationMessage( - vec![peer.clone()], + vec![*peer], collation_message, )) .await; @@ -1180,7 +1180,7 @@ where { let current_leaves = state.active_leaves.clone(); - let removed = current_leaves.iter().filter(|(h, _)| !view.contains(*h)); + let removed = current_leaves.iter().filter(|(h, _)| !view.contains(h)); let added = view.iter().filter(|h| !current_leaves.contains_key(h)); for leaf in added { diff --git a/node/network/collator-protocol/src/validator_side/collation.rs b/node/network/collator-protocol/src/validator_side/collation.rs index a60cd172cf44..d3a537999c4d 100644 --- a/node/network/collator-protocol/src/validator_side/collation.rs +++ b/node/network/collator-protocol/src/validator_side/collation.rs @@ -105,7 +105,7 @@ impl PendingCollation { Self { relay_parent, para_id, - peer_id: peer_id.clone(), + peer_id: *peer_id, prospective_candidate, commitments_hash: None, } diff --git a/node/network/collator-protocol/src/validator_side/mod.rs b/node/network/collator-protocol/src/validator_side/mod.rs index 36ac8d67704f..e8faaf97cf79 100644 --- a/node/network/collator-protocol/src/validator_side/mod.rs +++ b/node/network/collator-protocol/src/validator_side/mod.rs @@ -287,7 +287,7 @@ impl PeerData { } let candidates = state.advertisements.entry(on_relay_parent).or_default(); - if candidates.len() >= max_candidate_depth + 1 { + if candidates.len() > max_candidate_depth { return Err(InsertAdvertisementError::PeerLimitReached) } candidates.insert(candidate_hash); @@ -867,7 +867,7 @@ async fn process_incoming_peer_message( ); if let Some(rep) = err.reputation_changes() { - modify_reputation(ctx.sender(), origin.clone(), rep).await; + modify_reputation(ctx.sender(), origin, rep).await; } }, Versioned::VStaging(VStaging::AdvertiseCollation { @@ -894,7 +894,7 @@ async fn process_incoming_peer_message( ); if let Some(rep) = err.reputation_changes() { - modify_reputation(ctx.sender(), origin.clone(), rep).await; + modify_reputation(ctx.sender(), origin, rep).await; } }, Versioned::V1(V1::CollationSeconded(..)) | @@ -1241,7 +1241,7 @@ where { let current_leaves = state.active_leaves.clone(); - let removed = current_leaves.iter().filter(|(h, _)| !view.contains(*h)); + let removed = current_leaves.iter().filter(|(h, _)| !view.contains(h)); let added = view.iter().filter(|h| !current_leaves.contains_key(h)); for leaf in added { @@ -1352,7 +1352,7 @@ where ?para_id, "Disconnecting peer on view change (not current parachain id)" ); - disconnect_peer(sender, peer_id.clone()).await; + disconnect_peer(sender, *peer_id).await; } } } @@ -1645,7 +1645,7 @@ async fn poll_requests( .await; if !result.is_ready() { - retained_requested.insert(pending_collation.clone()); + retained_requested.insert(*pending_collation); } if let CollationFetchResult::Error(Some(rep)) = result { reputation_changes.push((pending_collation.peer_id, rep)); diff --git a/node/network/statement-distribution/src/legacy_v1/mod.rs b/node/network/statement-distribution/src/legacy_v1/mod.rs index db97c57ef935..752385e1aea6 100644 --- a/node/network/statement-distribution/src/legacy_v1/mod.rs +++ b/node/network/statement-distribution/src/legacy_v1/mod.rs @@ -265,10 +265,10 @@ impl PeerRelayParentKnowledge { let new_known = match fingerprint.0 { CompactStatement::Seconded(ref h) => { - self.seconded_counts.entry(fingerprint.1).or_default().note_local(h.clone()); + self.seconded_counts.entry(fingerprint.1).or_default().note_local(*h); let was_known = self.is_known_candidate(h); - self.sent_candidates.insert(h.clone()); + self.sent_candidates.insert(*h); !was_known }, CompactStatement::Valid(_) => false, @@ -332,7 +332,7 @@ impl PeerRelayParentKnowledge { .seconded_counts .entry(fingerprint.1) .or_insert_with(Default::default) - .note_remote(h.clone()); + .note_remote(*h); if !allowed_remote { return Err(COST_UNEXPECTED_STATEMENT_REMOTE) @@ -361,7 +361,7 @@ impl PeerRelayParentKnowledge { } self.received_statements.insert(fingerprint.clone()); - self.received_candidates.insert(candidate_hash.clone()); + self.received_candidates.insert(*candidate_hash); Ok(fresh) } @@ -1016,13 +1016,15 @@ async fn circulate_statement<'a, Context>( let mut peers_to_send: Vec = peers .iter() - .filter_map(|(peer, data)| { - if data.can_send(&relay_parent, &fingerprint) { - Some(peer.clone()) - } else { - None - } - }) + .filter_map( + |(peer, data)| { + if data.can_send(&relay_parent, &fingerprint) { + Some(*peer) + } else { + None + } + }, + ) .collect(); let good_peers: HashSet<&PeerId> = peers_to_send.iter().collect(); @@ -1060,13 +1062,13 @@ async fn circulate_statement<'a, Context>( let (v1_peers_to_send, vstaging_peers_to_send) = peers_to_send .into_iter() - .filter_map(|peer_id| { + .map(|peer_id| { let peer_data = peers.get_mut(&peer_id).expect("a subset is taken above, so it exists; qed"); let new = peer_data.send(&relay_parent, &fingerprint); - Some((peer_id, new, peer_data.protocol_version)) + (peer_id, new, peer_data.protocol_version) }) .partition::, _>(|(_, _, version)| match version { ValidationVersion::V1 => true, @@ -1085,7 +1087,7 @@ async fn circulate_statement<'a, Context>( "Sending statement to v1 peers", ); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - v1_peers_to_send.iter().map(|(p, _, _)| p.clone()).collect(), + v1_peers_to_send.iter().map(|(p, _, _)| *p).collect(), compatible_v1_message(ValidationVersion::V1, payload.clone()).into(), )) .await; @@ -1099,7 +1101,7 @@ async fn circulate_statement<'a, Context>( "Sending statement to vstaging peers", ); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - vstaging_peers_to_send.iter().map(|(p, _, _)| p.clone()).collect(), + vstaging_peers_to_send.iter().map(|(p, _, _)| *p).collect(), compatible_v1_message(ValidationVersion::VStaging, payload.clone()).into(), )) .await; @@ -1140,7 +1142,7 @@ async fn send_statements_about( "Sending statement", ); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - vec![peer.clone()], + vec![peer], compatible_v1_message(peer_data.protocol_version, payload).into(), )) .await; @@ -1175,7 +1177,7 @@ async fn send_statements( "Sending statement" ); ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( - vec![peer.clone()], + vec![peer], compatible_v1_message(peer_data.protocol_version, payload).into(), )) .await; @@ -1467,7 +1469,7 @@ async fn handle_incoming_message<'a, Context>( } let fingerprint = message.get_fingerprint(); - let candidate_hash = fingerprint.0.candidate_hash().clone(); + let candidate_hash = *fingerprint.0.candidate_hash(); let handle_incoming_span = active_head .span .child("handle-incoming") @@ -1594,7 +1596,7 @@ async fn handle_incoming_message<'a, Context>( // Send the peer all statements concerning the candidate that we have, // since it appears to have just learned about the candidate. send_statements_about( - peer.clone(), + peer, peer_data, ctx, relay_parent, @@ -1704,7 +1706,7 @@ async fn update_peer_view_and_maybe_send_unlocked( continue } if let Some(active_head) = active_heads.get(&new) { - send_statements(peer.clone(), peer_data, ctx, new, active_head, metrics).await; + send_statements(peer, peer_data, ctx, new, active_head, metrics).await; } } } @@ -1805,7 +1807,7 @@ pub(crate) async fn handle_network_update( topology_storage, peers, active_heads, - &*recent_outdated_heads, + recent_outdated_heads, ctx, message, req_sender, diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 98dba823519e..c1ab9bd50821 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -245,7 +245,7 @@ impl StatementDistributionSubsystem { // pass to legacy, but not if the message isn't // v1. let legacy = match &event { - &NetworkBridgeEvent::PeerMessage(_, ref message) => match message { + NetworkBridgeEvent::PeerMessage(_, message) => match message { Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::V1Compatibility(_)) => true, Versioned::V1(_) => true, // TODO [now]: _ => false, diff --git a/node/subsystem-util/src/backing_implicit_view.rs b/node/subsystem-util/src/backing_implicit_view.rs index 4d38657f5bbf..6fd273b1b212 100644 --- a/node/subsystem-util/src/backing_implicit_view.rs +++ b/node/subsystem-util/src/backing_implicit_view.rs @@ -101,7 +101,7 @@ struct BlockInfo { impl View { /// Get an iterator over active leaves in the view. - pub fn leaves<'a>(&'a self) -> impl Iterator + 'a { + pub fn leaves(&self) -> impl Iterator { self.leaves.keys() } @@ -193,7 +193,7 @@ impl View { /// /// For getting relay-parents that are valid for parachain candidates use /// [`View::known_allowed_relay_parents_under`]. - pub fn all_allowed_relay_parents<'a>(&'a self) -> impl Iterator + 'a { + pub fn all_allowed_relay_parents(&self) -> impl Iterator { self.block_info_storage.keys() } diff --git a/node/subsystem-util/src/determine_new_blocks.rs b/node/subsystem-util/src/determine_new_blocks.rs index 5a032c38aa1e..8bebe969cb78 100644 --- a/node/subsystem-util/src/determine_new_blocks.rs +++ b/node/subsystem-util/src/determine_new_blocks.rs @@ -65,7 +65,7 @@ where } 'outer: loop { - let &(ref last_hash, ref last_header) = ancestry + let (last_hash, last_header) = ancestry .last() .expect("ancestry has length 1 at initialization and is only added to; qed"); diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index c07e05e4c097..827e708bebc5 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -268,13 +268,7 @@ impl Constraints { ) -> Result<(), ModificationError> { if let Some(HrmpWatermarkUpdate::Trunk(hrmp_watermark)) = modifications.hrmp_watermark { // head updates are always valid. - if self - .hrmp_inbound - .valid_watermarks - .iter() - .position(|w| w == &hrmp_watermark) - .is_none() - { + if self.hrmp_inbound.valid_watermarks.iter().any(|w| w == &hrmp_watermark) { return Err(ModificationError::DisallowedHrmpWatermark(hrmp_watermark)) } } @@ -509,7 +503,7 @@ impl ConstraintModifications { } for (id, mods) in &other.outbound_hrmp { - let record = self.outbound_hrmp.entry(id.clone()).or_default(); + let record = self.outbound_hrmp.entry(*id).or_default(); record.messages_submitted += mods.messages_submitted; record.bytes_submitted += mods.bytes_submitted; } @@ -551,14 +545,14 @@ impl<'a> ProspectiveCandidate<'a> { /// Partially clone the prospective candidate, but borrow the /// parts which are potentially heavy. - pub fn partial_clone<'b>(&'b self) -> ProspectiveCandidate<'b> { + pub fn partial_clone(&self) -> ProspectiveCandidate { ProspectiveCandidate { commitments: Cow::Borrowed(self.commitments.borrow()), collator: self.collator.clone(), collator_signature: self.collator_signature.clone(), persisted_validation_data: self.persisted_validation_data.clone(), - pov_hash: self.pov_hash.clone(), - validation_code_hash: self.validation_code_hash.clone(), + pov_hash: self.pov_hash, + validation_code_hash: self.validation_code_hash, } } } @@ -672,7 +666,7 @@ impl<'a> Fragment<'a> { } last_recipient = Some(message.recipient); - let record = outbound_hrmp.entry(message.recipient.clone()).or_default(); + let record = outbound_hrmp.entry(message.recipient).or_default(); record.bytes_submitted += message.data.len(); record.messages_submitted += 1; diff --git a/runtime/parachains/src/configuration.rs b/runtime/parachains/src/configuration.rs index b8a7013ac4b9..a73a96ced6f6 100644 --- a/runtime/parachains/src/configuration.rs +++ b/runtime/parachains/src/configuration.rs @@ -1302,7 +1302,7 @@ impl Pallet { // First, we need to decide what we should use as the base configuration. let mut base_config = pending_configs .last() - .map(|&(_, ref config)| config.clone()) + .map(|(_, config)| config.clone()) .unwrap_or_else(Self::config); let base_config_consistent = base_config.check_consistency().is_ok(); diff --git a/runtime/parachains/src/inclusion/mod.rs b/runtime/parachains/src/inclusion/mod.rs index 2c7435cad1db..4f5a7d24635b 100644 --- a/runtime/parachains/src/inclusion/mod.rs +++ b/runtime/parachains/src/inclusion/mod.rs @@ -669,7 +669,7 @@ impl Pallet { }; // one more sweep for actually writing to storage. - let core_indices = core_indices_and_backers.iter().map(|&(ref c, ..)| *c).collect(); + let core_indices = core_indices_and_backers.iter().map(|(c, ..)| *c).collect(); for (candidate, (core, backers, group, relay_parent_number)) in candidates.into_iter().zip(core_indices_and_backers) { diff --git a/runtime/parachains/src/paras/mod.rs b/runtime/parachains/src/paras/mod.rs index 5ec3b261e462..18ef1e0bf904 100644 --- a/runtime/parachains/src/paras/mod.rs +++ b/runtime/parachains/src/paras/mod.rs @@ -1287,13 +1287,13 @@ impl Pallet { ::UpcomingUpgrades::mutate(|upcoming_upgrades| { *upcoming_upgrades = mem::take(upcoming_upgrades) .into_iter() - .filter(|&(ref para, _)| !outgoing.contains(para)) + .filter(|(para, _)| !outgoing.contains(para)) .collect(); }); ::UpgradeCooldowns::mutate(|upgrade_cooldowns| { *upgrade_cooldowns = mem::take(upgrade_cooldowns) .into_iter() - .filter(|&(ref para, _)| !outgoing.contains(para)) + .filter(|(para, _)| !outgoing.contains(para)) .collect(); }); } diff --git a/runtime/parachains/src/scheduler.rs b/runtime/parachains/src/scheduler.rs index a317b8b1ccb2..e34ee8b47ed1 100644 --- a/runtime/parachains/src/scheduler.rs +++ b/runtime/parachains/src/scheduler.rs @@ -245,7 +245,7 @@ impl Pallet { pub(crate) fn initializer_on_new_session( notification: &SessionChangeNotification, ) { - let &SessionChangeNotification { ref validators, ref new_config, .. } = notification; + let SessionChangeNotification { validators, new_config, .. } = notification; let config = new_config; let mut thread_queue = ParathreadQueue::::get(); diff --git a/statement-table/src/generic.rs b/statement-table/src/generic.rs index 4c3986468176..720ee0acb030 100644 --- a/statement-table/src/generic.rs +++ b/statement-table/src/generic.rs @@ -403,7 +403,7 @@ impl Table { let existing = occ.get_mut(); if !self.config.allow_multiple_seconded && existing.proposals.len() == 1 { - let &(ref old_digest, ref old_sig) = &existing.proposals[0]; + let (old_digest, old_sig) = &existing.proposals[0]; if old_digest != &digest { const EXISTENCE_PROOF: &str = From 11556a6b9b5deeb3f87145c1a7462e48c59a8040 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Mon, 30 Jan 2023 12:03:44 +0100 Subject: [PATCH 31/76] Add prospective parachains subsystem tests (#6454) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add prospective parachains subsystem test * Add `should_do_no_work_if_async_backing_disabled_for_leaf` test * Implement `activate_leaf` helper, up to getting ancestry * Finish implementing `activate_leaf` * Small refactor in `activate_leaf` * Get `CandidateSeconded` working * Finish `send_candidate_and_check_if_found` test * Refactor; send more leaves & candidates * Refactor test * Implement `check_candidate_parent_leaving_view` test * Start work on `check_candidate_on_multiple_forks` test * Don’t associate specific parachains with leaf * Finish `correctly_updates_leaves` test * Fix cycle due to reused head data * Fix `check_backable_query` test * Fix `check_candidate_on_multiple_forks` test * Add `check_depth_and_pvd_queries` test * Address review comments * Remove TODO * add a new index for output head data to candidate storage * Resolve test TODOs * Fix compile errors * test candidate storage pruning, make sure new index is cleaned up --------- Co-authored-by: Robert Habermeier --- Cargo.lock | 7 + .../src/tests/prospective_parachains.rs | 2 +- .../dispute-coordinator/src/scraping/tests.rs | 3 +- node/core/prospective-parachains/Cargo.toml | 7 + .../src/fragment_tree.rs | 70 +- node/core/prospective-parachains/src/lib.rs | 4 +- node/core/prospective-parachains/src/tests.rs | 1331 +++++++++++++++++ .../tests/prospective_parachains.rs | 2 - node/service/Cargo.toml | 2 +- .../src/inclusion_emulator/staging.rs | 7 +- 10 files changed, 1407 insertions(+), 28 deletions(-) create mode 100644 node/core/prospective-parachains/src/tests.rs diff --git a/Cargo.lock b/Cargo.lock index 1f9a117733c0..ea4ab5989a22 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6943,9 +6943,16 @@ dependencies = [ "parity-scale-codec 2.3.1", "polkadot-node-primitives", "polkadot-node-subsystem", + "polkadot-node-subsystem-test-helpers", + "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "sc-keystore", + "sp-application-crypto", + "sp-core", + "sp-keyring", + "sp-keystore", "thiserror", "tracing-gum", ] diff --git a/node/core/backing/src/tests/prospective_parachains.rs b/node/core/backing/src/tests/prospective_parachains.rs index 8f7724cc8dc3..f633141b8aa9 100644 --- a/node/core/backing/src/tests/prospective_parachains.rs +++ b/node/core/backing/src/tests/prospective_parachains.rs @@ -317,7 +317,6 @@ fn seconding_sanity_check_allowed() { const LEAF_A_ANCESTRY_LEN: BlockNumber = 3; let para_id = test_state.chain_ids[0]; - let leaf_b_hash = Hash::from_low_u64_be(128); // `a` is grandparent of `b`. let leaf_a_hash = Hash::from_low_u64_be(130); let leaf_a_parent = get_parent_hash(leaf_a_hash); @@ -333,6 +332,7 @@ fn seconding_sanity_check_allowed() { const LEAF_B_BLOCK_NUMBER: BlockNumber = LEAF_A_BLOCK_NUMBER + 2; const LEAF_B_ANCESTRY_LEN: BlockNumber = 4; + let leaf_b_hash = Hash::from_low_u64_be(128); let activated = ActivatedLeaf { hash: leaf_b_hash, number: LEAF_B_BLOCK_NUMBER, diff --git a/node/core/dispute-coordinator/src/scraping/tests.rs b/node/core/dispute-coordinator/src/scraping/tests.rs index b7183739d8f8..621947345bef 100644 --- a/node/core/dispute-coordinator/src/scraping/tests.rs +++ b/node/core/dispute-coordinator/src/scraping/tests.rs @@ -135,8 +135,7 @@ fn make_candidate_receipt(relay_parent: Hash) -> CandidateReceipt { para_head: zeros, validation_code_hash: zeros.into(), }; - let candidate = CandidateReceipt { descriptor, commitments_hash: zeros }; - candidate + CandidateReceipt { descriptor, commitments_hash: zeros } } /// Get a dummy `ActivatedLeaf` for a given block number. diff --git a/node/core/prospective-parachains/Cargo.toml b/node/core/prospective-parachains/Cargo.toml index 71374285707b..7a149e268ef4 100644 --- a/node/core/prospective-parachains/Cargo.toml +++ b/node/core/prospective-parachains/Cargo.toml @@ -19,7 +19,14 @@ polkadot-node-subsystem-util = { path = "../../subsystem-util" } [dev-dependencies] assert_matches = "1" +polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } +polkadot-node-subsystem-types = { path = "../../subsystem-types" } polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } +sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } [features] # If not enabled, the dispute coordinator will do nothing. diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 03a86edfeabf..c4a920577459 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -83,9 +83,12 @@ pub enum CandidateStorageInsertionError { } pub(crate) struct CandidateStorage { - // Index from parent head hash to candidate hashes. + // Index from head data hash to candidate hashes with that head data as a parent. by_parent_head: HashMap>, + // Index from head data hash to candidate hashes outputting that head data. + by_output_head: HashMap>, + // Index from candidate hash to fragment node. by_candidate_hash: HashMap, } @@ -93,7 +96,11 @@ pub(crate) struct CandidateStorage { impl CandidateStorage { /// Create a new `CandidateStorage`. pub fn new() -> Self { - CandidateStorage { by_parent_head: HashMap::new(), by_candidate_hash: HashMap::new() } + CandidateStorage { + by_parent_head: HashMap::new(), + by_output_head: HashMap::new(), + by_candidate_hash: HashMap::new(), + } } /// Introduce a new candidate. @@ -113,7 +120,7 @@ impl CandidateStorage { } let parent_head_hash = persisted_validation_data.parent_head.hash(); - + let output_head_hash = candidate.commitments.head_data.hash(); let entry = CandidateEntry { candidate_hash, relay_parent: candidate.descriptor.relay_parent, @@ -129,6 +136,7 @@ impl CandidateStorage { }; self.by_parent_head.entry(parent_head_hash).or_default().insert(candidate_hash); + self.by_output_head.entry(output_head_hash).or_default().insert(candidate_hash); // sanity-checked already. self.by_candidate_hash.insert(candidate_hash, entry); @@ -184,18 +192,32 @@ impl CandidateStorage { self.by_parent_head.retain(|_parent, children| { children.retain(|h| pred(h)); !children.is_empty() - }) + }); + self.by_output_head.retain(|_output, candidates| { + candidates.retain(|h| pred(h)); + !candidates.is_empty() + }); } /// Get head-data by hash. pub(crate) fn head_data_by_hash(&self, hash: &Hash) -> Option<&HeadData> { - // Get some candidate which has a parent-head with the same hash as requested. - let a_candidate_hash = self.by_parent_head.get(hash).and_then(|m| m.iter().next())?; - - // Extract the full parent head from that candidate's `PersistedValidationData`. - self.by_candidate_hash - .get(a_candidate_hash) - .map(|e| &e.candidate.persisted_validation_data.parent_head) + // First, search for candidates outputting this head data and extract the head data + // from their commitments if they exist. + // + // Otherwise, search for candidates building upon this head data and extract the head data + // from their persisted validation data if they exist. + self.by_output_head + .get(hash) + .and_then(|m| m.iter().next()) + .and_then(|a_candidate| self.by_candidate_hash.get(a_candidate)) + .map(|e| &e.candidate.commitments.head_data) + .or_else(|| { + self.by_parent_head + .get(hash) + .and_then(|m| m.iter().next()) + .and_then(|a_candidate| self.by_candidate_hash.get(a_candidate)) + .map(|e| &e.candidate.persisted_validation_data.parent_head) + }) } fn iter_para_children<'a>( @@ -213,6 +235,11 @@ impl CandidateStorage { fn get(&'_ self, candidate_hash: &CandidateHash) -> Option<&'_ CandidateEntry> { self.by_candidate_hash.get(candidate_hash) } + + #[cfg(test)] + pub fn len(&self) -> (usize, usize) { + (self.by_parent_head.len(), self.by_candidate_hash.len()) + } } /// The state of a candidate. @@ -230,6 +257,7 @@ enum CandidateState { Backed, } +#[derive(Debug)] struct CandidateEntry { candidate_hash: CandidateHash, relay_parent: Hash, @@ -251,7 +279,12 @@ pub(crate) struct Scope { /// An error variant indicating that ancestors provided to a scope /// had unexpected order. #[derive(Debug)] -pub struct UnexpectedAncestor; +pub struct UnexpectedAncestor { + /// The block number that this error occurred at. + pub number: BlockNumber, + /// The previous seen block number, which did not match `number`. + pub prev: BlockNumber, +} impl Scope { /// Define a new [`Scope`]. @@ -283,9 +316,9 @@ impl Scope { let mut prev = relay_parent.number; for ancestor in ancestors { if prev == 0 { - return Err(UnexpectedAncestor) + return Err(UnexpectedAncestor { number: ancestor.number, prev }) } else if ancestor.number != prev - 1 { - return Err(UnexpectedAncestor) + return Err(UnexpectedAncestor { number: ancestor.number, prev }) } else if prev == base_constraints.min_relay_parent_number { break } else { @@ -889,8 +922,8 @@ mod tests { let base_constraints = make_constraints(8, vec![8, 9], vec![1, 2, 3].into()); assert_matches!( - Scope::with_ancestors(para_id, relay_parent, base_constraints, max_depth, ancestors,), - Err(UnexpectedAncestor) + Scope::with_ancestors(para_id, relay_parent, base_constraints, max_depth, ancestors), + Err(UnexpectedAncestor { number: 8, prev: 10 }) ); } @@ -914,7 +947,7 @@ mod tests { assert_matches!( Scope::with_ancestors(para_id, relay_parent, base_constraints, max_depth, ancestors,), - Err(UnexpectedAncestor) + Err(UnexpectedAncestor { number: 99999, prev: 0 }) ); } @@ -991,16 +1024,19 @@ mod tests { ); let candidate_hash = candidate.hash(); + let output_head_hash = candidate.commitments.head_data.hash(); let parent_head_hash = pvd.parent_head.hash(); storage.add_candidate(candidate, pvd).unwrap(); storage.retain(|_| true); assert!(storage.contains(&candidate_hash)); assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 1); + assert!(storage.head_data_by_hash(&output_head_hash).is_some()); storage.retain(|_| false); assert!(!storage.contains(&candidate_hash)); assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 0); + assert!(storage.head_data_by_hash(&output_head_hash).is_none()); } #[test] diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index cc6293832661..dabbd6634a95 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -58,6 +58,8 @@ use crate::{ mod error; mod fragment_tree; +#[cfg(test)] +mod tests; const LOG_TARGET: &str = "parachain::prospective-parachains"; @@ -684,7 +686,7 @@ async fn fetch_ancestry( let hashes = rx.map_err(JfyiError::ChainApiRequestCanceled).await??; let mut block_info = Vec::with_capacity(hashes.len()); for hash in hashes { - match fetch_block_info(ctx, relay_hash).await? { + match fetch_block_info(ctx, hash).await? { None => { gum::warn!( target: LOG_TARGET, diff --git a/node/core/prospective-parachains/src/tests.rs b/node/core/prospective-parachains/src/tests.rs new file mode 100644 index 000000000000..e7f15654f86e --- /dev/null +++ b/node/core/prospective-parachains/src/tests.rs @@ -0,0 +1,1331 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use super::*; +use ::polkadot_primitives_test_helpers::{dummy_candidate_receipt_bad_sig, dummy_hash}; +use assert_matches::assert_matches; +use polkadot_node_subsystem::{ + errors::RuntimeApiError, + messages::{ + AllMessages, HypotheticalDepthRequest, ProspectiveParachainsMessage, + ProspectiveValidationDataRequest, + }, +}; +use polkadot_node_subsystem_test_helpers as test_helpers; +use polkadot_node_subsystem_types::{jaeger, ActivatedLeaf, LeafStatus}; +use polkadot_primitives::{ + v2::{ + CandidateCommitments, HeadData, Header, PersistedValidationData, ScheduledCore, + ValidationCodeHash, + }, + vstaging::{AsyncBackingParameters, Constraints, InboundHrmpLimitations}, +}; +use std::sync::Arc; + +const ALLOWED_ANCESTRY_LEN: u32 = 3; +const ASYNC_BACKING_PARAMETERS: AsyncBackingParameters = + AsyncBackingParameters { max_candidate_depth: 4, allowed_ancestry_len: ALLOWED_ANCESTRY_LEN }; + +const ASYNC_BACKING_DISABLED_ERROR: RuntimeApiError = + RuntimeApiError::NotSupported { runtime_api_name: "test-runtime" }; + +const MAX_POV_SIZE: u32 = 1_000_000; + +type VirtualOverseer = test_helpers::TestSubsystemContextHandle; + +fn dummy_constraints( + min_relay_parent_number: BlockNumber, + valid_watermarks: Vec, + required_parent: HeadData, + validation_code_hash: ValidationCodeHash, +) -> Constraints { + Constraints { + min_relay_parent_number, + max_pov_size: MAX_POV_SIZE, + max_code_size: 1_000_000, + ump_remaining: 10, + ump_remaining_bytes: 1_000, + max_ump_num_per_candidate: 10, + dmp_remaining_messages: 10, + hrmp_inbound: InboundHrmpLimitations { valid_watermarks }, + hrmp_channels_out: vec![], + max_hrmp_num_per_candidate: 0, + required_parent, + validation_code_hash, + upgrade_restriction: None, + future_validation_code: None, + } +} + +fn dummy_pvd(parent_head: HeadData, relay_parent_number: u32) -> PersistedValidationData { + PersistedValidationData { + parent_head, + relay_parent_number, + max_pov_size: MAX_POV_SIZE, + relay_parent_storage_root: dummy_hash(), + } +} + +fn make_candidate( + leaf: &TestLeaf, + para_id: ParaId, + parent_head: HeadData, + head_data: HeadData, + validation_code_hash: ValidationCodeHash, +) -> (CommittedCandidateReceipt, PersistedValidationData) { + let pvd = dummy_pvd(parent_head, leaf.number); + let commitments = CandidateCommitments { + head_data, + horizontal_messages: Vec::new(), + upward_messages: Vec::new(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: leaf.number, + }; + + let mut candidate = dummy_candidate_receipt_bad_sig(leaf.hash, Some(Default::default())); + candidate.commitments_hash = commitments.hash(); + candidate.descriptor.para_id = para_id; + candidate.descriptor.persisted_validation_data_hash = pvd.hash(); + candidate.descriptor.validation_code_hash = validation_code_hash; + let candidate = CommittedCandidateReceipt { descriptor: candidate.descriptor, commitments }; + + (candidate, pvd) +} + +struct TestState { + availability_cores: Vec, + validation_code_hash: ValidationCodeHash, +} + +impl Default for TestState { + fn default() -> Self { + let chain_a = ParaId::from(1); + let chain_b = ParaId::from(2); + + let availability_cores = vec![ + CoreState::Scheduled(ScheduledCore { para_id: chain_a, collator: None }), + CoreState::Scheduled(ScheduledCore { para_id: chain_b, collator: None }), + ]; + let validation_code_hash = Hash::repeat_byte(42).into(); + + Self { availability_cores, validation_code_hash } + } +} + +fn get_parent_hash(hash: Hash) -> Hash { + Hash::from_low_u64_be(hash.to_low_u64_be() + 1) +} + +fn test_harness>( + test: impl FnOnce(VirtualOverseer) -> T, +) -> View { + let pool = sp_core::testing::TaskExecutor::new(); + + let (mut context, virtual_overseer) = test_helpers::make_subsystem_context(pool.clone()); + + let mut view = View::new(); + let subsystem = async move { + loop { + match run_iteration(&mut context, &mut view).await { + Ok(()) => break, + Err(e) => panic!("{:?}", e), + } + } + + view + }; + + let test_fut = test(virtual_overseer); + + futures::pin_mut!(test_fut); + futures::pin_mut!(subsystem); + let (_, view) = futures::executor::block_on(future::join( + async move { + let mut virtual_overseer = test_fut.await; + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + }, + subsystem, + )); + + view +} + +struct PerParaData { + min_relay_parent: BlockNumber, + head_data: HeadData, +} + +impl PerParaData { + pub fn new(min_relay_parent: BlockNumber, head_data: HeadData) -> Self { + Self { min_relay_parent, head_data } + } +} + +struct TestLeaf { + number: BlockNumber, + hash: Hash, + para_data: Vec<(ParaId, PerParaData)>, +} + +impl TestLeaf { + pub fn para_data(&self, para_id: ParaId) -> &PerParaData { + self.para_data + .iter() + .find_map(|(p_id, data)| if *p_id == para_id { Some(data) } else { None }) + .unwrap() + } +} + +async fn send_block_header(virtual_overseer: &mut VirtualOverseer, hash: Hash, number: u32) { + let header = Header { + parent_hash: get_parent_hash(hash), + number, + state_root: Hash::zero(), + extrinsics_root: Hash::zero(), + digest: Default::default(), + }; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ChainApi( + ChainApiMessage::BlockHeader(parent, tx) + ) if parent == hash => { + tx.send(Ok(Some(header))).unwrap(); + } + ); +} + +async fn activate_leaf( + virtual_overseer: &mut VirtualOverseer, + leaf: &TestLeaf, + test_state: &TestState, +) { + let TestLeaf { number, hash, para_data: _ } = leaf; + + let activated = ActivatedLeaf { + hash: *hash, + number: *number, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + }; + + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work( + activated, + )))) + .await; + + handle_leaf_activation(virtual_overseer, leaf, test_state).await; +} + +async fn handle_leaf_activation( + virtual_overseer: &mut VirtualOverseer, + leaf: &TestLeaf, + test_state: &TestState, +) { + let TestLeaf { number, hash, para_data } = leaf; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) + ) if parent == *hash => { + tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AvailabilityCores(tx)) + ) if parent == *hash => { + tx.send(Ok(test_state.availability_cores.clone())).unwrap(); + } + ); + + send_block_header(virtual_overseer, *hash, *number).await; + + // Check that subsystem job issues a request for ancestors. + let min_min = para_data.iter().map(|(_, data)| data.min_relay_parent).min().unwrap_or(*number); + let ancestry_len = number - min_min; + let ancestry_hashes: Vec = + std::iter::successors(Some(*hash), |h| Some(get_parent_hash(*h))) + .skip(1) + .take(ancestry_len as usize) + .collect(); + let ancestry_numbers = (min_min..*number).rev(); + let ancestry_iter = ancestry_hashes.clone().into_iter().zip(ancestry_numbers).peekable(); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ChainApi( + ChainApiMessage::Ancestors{hash: block_hash, k, response_channel: tx} + ) if block_hash == *hash && k == ALLOWED_ANCESTRY_LEN as usize => { + tx.send(Ok(ancestry_hashes.clone())).unwrap(); + } + ); + + for (hash, number) in ancestry_iter { + send_block_header(virtual_overseer, hash, number).await; + } + + for _ in 0..test_state.availability_cores.len() { + let message = virtual_overseer.recv().await; + // Get the para we are working with since the order is not deterministic. + let para_id = match message { + AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _, + RuntimeApiRequest::StagingValidityConstraints(p_id, _), + )) => p_id, + _ => panic!("received unexpected message {:?}", message), + }; + + let PerParaData { min_relay_parent, head_data } = leaf.para_data(para_id); + let constraints = dummy_constraints( + *min_relay_parent, + vec![*number], + head_data.clone(), + test_state.validation_code_hash, + ); + assert_matches!( + message, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingValidityConstraints(p_id, tx)) + ) if parent == *hash && p_id == para_id => { + tx.send(Ok(Some(constraints))).unwrap(); + } + ); + } + + // Get minimum relay parents. + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(overseer::FromOrchestra::Communication { + msg: ProspectiveParachainsMessage::GetMinimumRelayParents(*hash, tx), + }) + .await; + let mut resp = rx.await.unwrap(); + resp.sort(); + let mrp_response: Vec<(ParaId, BlockNumber)> = para_data + .iter() + .map(|(para_id, data)| (*para_id, data.min_relay_parent)) + .collect(); + assert_eq!(resp, mrp_response); +} + +async fn deactivate_leaf(virtual_overseer: &mut VirtualOverseer, hash: Hash) { + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::stop_work( + hash, + )))) + .await; +} + +async fn second_candidate( + virtual_overseer: &mut VirtualOverseer, + candidate: CommittedCandidateReceipt, + pvd: PersistedValidationData, + expected_candidate_response: Vec<(Hash, Vec)>, +) { + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(overseer::FromOrchestra::Communication { + msg: ProspectiveParachainsMessage::CandidateSeconded( + candidate.descriptor.para_id, + candidate, + pvd, + tx, + ), + }) + .await; + let resp = rx.await.unwrap(); + assert_eq!(resp, expected_candidate_response); +} + +async fn back_candidate( + virtual_overseer: &mut VirtualOverseer, + candidate: &CommittedCandidateReceipt, + candidate_hash: CandidateHash, +) { + virtual_overseer + .send(overseer::FromOrchestra::Communication { + msg: ProspectiveParachainsMessage::CandidateBacked( + candidate.descriptor.para_id, + candidate_hash, + ), + }) + .await; +} + +async fn get_membership( + virtual_overseer: &mut VirtualOverseer, + para_id: ParaId, + candidate_hash: CandidateHash, + expected_membership_response: Vec<(Hash, Vec)>, +) { + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(overseer::FromOrchestra::Communication { + msg: ProspectiveParachainsMessage::GetTreeMembership(para_id, candidate_hash, tx), + }) + .await; + let resp = rx.await.unwrap(); + assert_eq!(resp, expected_membership_response); +} + +async fn get_backable_candidate( + virtual_overseer: &mut VirtualOverseer, + leaf: &TestLeaf, + para_id: ParaId, + required_path: Vec, + expected_candidate_hash: Option, +) { + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(overseer::FromOrchestra::Communication { + msg: ProspectiveParachainsMessage::GetBackableCandidate( + leaf.hash, + para_id, + required_path, + tx, + ), + }) + .await; + let resp = rx.await.unwrap(); + assert_eq!(resp, expected_candidate_hash); +} + +async fn get_hypothetical_depth( + virtual_overseer: &mut VirtualOverseer, + candidate_hash: CandidateHash, + para_id: ParaId, + parent_head_data: HeadData, + candidate_relay_parent: Hash, + fragment_tree_relay_parent: Hash, + expected_depths: Vec, +) { + let request = HypotheticalDepthRequest { + candidate_hash, + candidate_para: para_id, + parent_head_data_hash: parent_head_data.hash(), + candidate_relay_parent, + fragment_tree_relay_parent, + }; + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(overseer::FromOrchestra::Communication { + msg: ProspectiveParachainsMessage::GetHypotheticalDepth(request, tx), + }) + .await; + let resp = rx.await.unwrap(); + assert_eq!(resp, expected_depths); +} + +async fn get_pvd( + virtual_overseer: &mut VirtualOverseer, + para_id: ParaId, + candidate_relay_parent: Hash, + parent_head_data: HeadData, + expected_pvd: Option, +) { + let request = ProspectiveValidationDataRequest { + para_id, + candidate_relay_parent, + parent_head_data_hash: parent_head_data.hash(), + }; + let (tx, rx) = oneshot::channel(); + virtual_overseer + .send(overseer::FromOrchestra::Communication { + msg: ProspectiveParachainsMessage::GetProspectiveValidationData(request, tx), + }) + .await; + let resp = rx.await.unwrap(); + assert_eq!(resp, expected_pvd); +} + +#[test] +fn should_do_no_work_if_async_backing_disabled_for_leaf() { + async fn activate_leaf_async_backing_disabled(virtual_overseer: &mut VirtualOverseer) { + let hash = Hash::from_low_u64_be(130); + + // Start work on some new parent. + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( + ActiveLeavesUpdate::start_work(ActivatedLeaf { + hash, + number: 1, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + }), + ))) + .await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) + ) if parent == hash => { + tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); + } + ); + } + + let view = test_harness(|mut virtual_overseer| async move { + activate_leaf_async_backing_disabled(&mut virtual_overseer).await; + + virtual_overseer + }); + + assert!(view.active_leaves.is_empty()); + assert!(view.candidate_storage.is_empty()); +} + +// Send some candidates and make sure all are found: +// - Two for the same leaf A +// - One for leaf B on parachain 1 +// - One for leaf C on parachain 2 +#[test] +fn send_candidates_and_check_if_found() { + let test_state = TestState::default(); + let view = test_harness(|mut virtual_overseer| async move { + // Leaf A + let leaf_a = TestLeaf { + number: 100, + hash: Hash::from_low_u64_be(130), + para_data: vec![ + (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), + (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), + ], + }; + // Leaf B + let leaf_b = TestLeaf { + number: 101, + hash: Hash::from_low_u64_be(131), + para_data: vec![ + (1.into(), PerParaData::new(99, HeadData(vec![3, 4, 5]))), + (2.into(), PerParaData::new(101, HeadData(vec![4, 5, 6]))), + ], + }; + // Leaf C + let leaf_c = TestLeaf { + number: 102, + hash: Hash::from_low_u64_be(132), + para_data: vec![ + (1.into(), PerParaData::new(102, HeadData(vec![5, 6, 7]))), + (2.into(), PerParaData::new(98, HeadData(vec![6, 7, 8]))), + ], + }; + + // Activate leaves. + activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; + activate_leaf(&mut virtual_overseer, &leaf_b, &test_state).await; + activate_leaf(&mut virtual_overseer, &leaf_c, &test_state).await; + + // Candidate A1 + let (candidate_a1, pvd_a1) = make_candidate( + &leaf_a, + 1.into(), + HeadData(vec![1, 2, 3]), + HeadData(vec![1]), + test_state.validation_code_hash, + ); + let candidate_hash_a1 = candidate_a1.hash(); + let response_a1 = vec![(leaf_a.hash, vec![0])]; + + // Candidate A2 + let (candidate_a2, pvd_a2) = make_candidate( + &leaf_a, + 2.into(), + HeadData(vec![2, 3, 4]), + HeadData(vec![2]), + test_state.validation_code_hash, + ); + let candidate_hash_a2 = candidate_a2.hash(); + let response_a2 = vec![(leaf_a.hash, vec![0])]; + + // Candidate B + let (candidate_b, pvd_b) = make_candidate( + &leaf_b, + 1.into(), + HeadData(vec![3, 4, 5]), + HeadData(vec![3]), + test_state.validation_code_hash, + ); + let candidate_hash_b = candidate_b.hash(); + let response_b = vec![(leaf_b.hash, vec![0])]; + + // Candidate C + let (candidate_c, pvd_c) = make_candidate( + &leaf_c, + 2.into(), + HeadData(vec![6, 7, 8]), + HeadData(vec![4]), + test_state.validation_code_hash, + ); + let candidate_hash_c = candidate_c.hash(); + let response_c = vec![(leaf_c.hash, vec![0])]; + + // Second candidates. + second_candidate(&mut virtual_overseer, candidate_a1, pvd_a1, response_a1.clone()).await; + second_candidate(&mut virtual_overseer, candidate_a2, pvd_a2, response_a2.clone()).await; + second_candidate(&mut virtual_overseer, candidate_b, pvd_b, response_b.clone()).await; + second_candidate(&mut virtual_overseer, candidate_c, pvd_c, response_c.clone()).await; + + // Check candidate tree membership. + get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a1, response_a1).await; + get_membership(&mut virtual_overseer, 2.into(), candidate_hash_a2, response_a2).await; + get_membership(&mut virtual_overseer, 1.into(), candidate_hash_b, response_b).await; + get_membership(&mut virtual_overseer, 2.into(), candidate_hash_c, response_c).await; + + // The candidates should not be found on other parachains. + get_membership(&mut virtual_overseer, 2.into(), candidate_hash_a1, vec![]).await; + get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a2, vec![]).await; + get_membership(&mut virtual_overseer, 2.into(), candidate_hash_b, vec![]).await; + get_membership(&mut virtual_overseer, 1.into(), candidate_hash_c, vec![]).await; + + virtual_overseer + }); + + assert_eq!(view.active_leaves.len(), 3); + assert_eq!(view.candidate_storage.len(), 2); + // Two parents and two candidates per para. + assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (2, 2)); + assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (2, 2)); +} + +// Send some candidates, check if the candidate won't be found once its relay parent leaves the view. +#[test] +fn check_candidate_parent_leaving_view() { + let test_state = TestState::default(); + let view = test_harness(|mut virtual_overseer| async move { + // Leaf A + let leaf_a = TestLeaf { + number: 100, + hash: Hash::from_low_u64_be(130), + para_data: vec![ + (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), + (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), + ], + }; + // Leaf B + let leaf_b = TestLeaf { + number: 101, + hash: Hash::from_low_u64_be(131), + para_data: vec![ + (1.into(), PerParaData::new(99, HeadData(vec![3, 4, 5]))), + (2.into(), PerParaData::new(101, HeadData(vec![4, 5, 6]))), + ], + }; + // Leaf C + let leaf_c = TestLeaf { + number: 102, + hash: Hash::from_low_u64_be(132), + para_data: vec![ + (1.into(), PerParaData::new(102, HeadData(vec![5, 6, 7]))), + (2.into(), PerParaData::new(98, HeadData(vec![6, 7, 8]))), + ], + }; + + // Activate leaves. + activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; + activate_leaf(&mut virtual_overseer, &leaf_b, &test_state).await; + activate_leaf(&mut virtual_overseer, &leaf_c, &test_state).await; + + // Candidate A1 + let (candidate_a1, pvd_a1) = make_candidate( + &leaf_a, + 1.into(), + HeadData(vec![1, 2, 3]), + HeadData(vec![1]), + test_state.validation_code_hash, + ); + let candidate_hash_a1 = candidate_a1.hash(); + let response_a1 = vec![(leaf_a.hash, vec![0])]; + + // Candidate A2 + let (candidate_a2, pvd_a2) = make_candidate( + &leaf_a, + 2.into(), + HeadData(vec![2, 3, 4]), + HeadData(vec![2]), + test_state.validation_code_hash, + ); + let candidate_hash_a2 = candidate_a2.hash(); + let response_a2 = vec![(leaf_a.hash, vec![0])]; + + // Candidate B + let (candidate_b, pvd_b) = make_candidate( + &leaf_b, + 1.into(), + HeadData(vec![3, 4, 5]), + HeadData(vec![3]), + test_state.validation_code_hash, + ); + let candidate_hash_b = candidate_b.hash(); + let response_b = vec![(leaf_b.hash, vec![0])]; + + // Candidate C + let (candidate_c, pvd_c) = make_candidate( + &leaf_c, + 2.into(), + HeadData(vec![6, 7, 8]), + HeadData(vec![4]), + test_state.validation_code_hash, + ); + let candidate_hash_c = candidate_c.hash(); + let response_c = vec![(leaf_c.hash, vec![0])]; + + // Second candidates. + second_candidate(&mut virtual_overseer, candidate_a1, pvd_a1, response_a1.clone()).await; + second_candidate(&mut virtual_overseer, candidate_a2, pvd_a2, response_a2.clone()).await; + second_candidate(&mut virtual_overseer, candidate_b, pvd_b, response_b.clone()).await; + second_candidate(&mut virtual_overseer, candidate_c, pvd_c, response_c.clone()).await; + + // Deactivate leaf A. + deactivate_leaf(&mut virtual_overseer, leaf_a.hash).await; + + // Candidates A1 and A2 should be gone. Candidates B and C should remain. + get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a1, vec![]).await; + get_membership(&mut virtual_overseer, 2.into(), candidate_hash_a2, vec![]).await; + get_membership(&mut virtual_overseer, 1.into(), candidate_hash_b, response_b).await; + get_membership(&mut virtual_overseer, 2.into(), candidate_hash_c, response_c.clone()).await; + + // Deactivate leaf B. + deactivate_leaf(&mut virtual_overseer, leaf_b.hash).await; + + // Candidate B should be gone, C should remain. + get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a1, vec![]).await; + get_membership(&mut virtual_overseer, 2.into(), candidate_hash_a2, vec![]).await; + get_membership(&mut virtual_overseer, 1.into(), candidate_hash_b, vec![]).await; + get_membership(&mut virtual_overseer, 2.into(), candidate_hash_c, response_c).await; + + // Deactivate leaf C. + deactivate_leaf(&mut virtual_overseer, leaf_c.hash).await; + + // Candidate C should be gone. + get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a1, vec![]).await; + get_membership(&mut virtual_overseer, 2.into(), candidate_hash_a2, vec![]).await; + get_membership(&mut virtual_overseer, 1.into(), candidate_hash_b, vec![]).await; + get_membership(&mut virtual_overseer, 2.into(), candidate_hash_c, vec![]).await; + + virtual_overseer + }); + + assert_eq!(view.active_leaves.len(), 0); + assert_eq!(view.candidate_storage.len(), 0); +} + +// Introduce a candidate to multiple forks, see how the membership is returned. +#[test] +fn check_candidate_on_multiple_forks() { + let test_state = TestState::default(); + let view = test_harness(|mut virtual_overseer| async move { + // Leaf A + let leaf_a = TestLeaf { + number: 100, + hash: Hash::from_low_u64_be(130), + para_data: vec![ + (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), + (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), + ], + }; + // Leaf B + let leaf_b = TestLeaf { + number: 101, + hash: Hash::from_low_u64_be(131), + para_data: vec![ + (1.into(), PerParaData::new(99, HeadData(vec![3, 4, 5]))), + (2.into(), PerParaData::new(101, HeadData(vec![4, 5, 6]))), + ], + }; + // Leaf C + let leaf_c = TestLeaf { + number: 102, + hash: Hash::from_low_u64_be(132), + para_data: vec![ + (1.into(), PerParaData::new(102, HeadData(vec![5, 6, 7]))), + (2.into(), PerParaData::new(98, HeadData(vec![6, 7, 8]))), + ], + }; + + // Activate leaves. + activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; + activate_leaf(&mut virtual_overseer, &leaf_b, &test_state).await; + activate_leaf(&mut virtual_overseer, &leaf_c, &test_state).await; + + // Candidate on leaf A. + let (candidate_a, pvd_a) = make_candidate( + &leaf_a, + 1.into(), + HeadData(vec![1, 2, 3]), + HeadData(vec![1]), + test_state.validation_code_hash, + ); + let candidate_hash_a = candidate_a.hash(); + let response_a = vec![(leaf_a.hash, vec![0])]; + + // Candidate on leaf B. + let (candidate_b, pvd_b) = make_candidate( + &leaf_b, + 1.into(), + HeadData(vec![3, 4, 5]), + HeadData(vec![1]), + test_state.validation_code_hash, + ); + let candidate_hash_b = candidate_b.hash(); + let response_b = vec![(leaf_b.hash, vec![0])]; + + // Candidate on leaf C. + let (candidate_c, pvd_c) = make_candidate( + &leaf_c, + 1.into(), + HeadData(vec![5, 6, 7]), + HeadData(vec![1]), + test_state.validation_code_hash, + ); + let candidate_hash_c = candidate_c.hash(); + let response_c = vec![(leaf_c.hash, vec![0])]; + + // Second candidate on all three leaves. + second_candidate( + &mut virtual_overseer, + candidate_a.clone(), + pvd_a.clone(), + response_a.clone(), + ) + .await; + second_candidate( + &mut virtual_overseer, + candidate_b.clone(), + pvd_b.clone(), + response_b.clone(), + ) + .await; + second_candidate( + &mut virtual_overseer, + candidate_c.clone(), + pvd_c.clone(), + response_c.clone(), + ) + .await; + + // Check candidate tree membership. + get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a, response_a).await; + get_membership(&mut virtual_overseer, 1.into(), candidate_hash_b, response_b).await; + get_membership(&mut virtual_overseer, 1.into(), candidate_hash_c, response_c).await; + + virtual_overseer + }); + + assert_eq!(view.active_leaves.len(), 3); + assert_eq!(view.candidate_storage.len(), 2); + // Three parents and three candidates on para 1. + assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (3, 3)); + assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); +} + +// Backs some candidates and tests `GetBackableCandidate`. +#[test] +fn check_backable_query() { + let test_state = TestState::default(); + let view = test_harness(|mut virtual_overseer| async move { + // Leaf A + let leaf_a = TestLeaf { + number: 100, + hash: Hash::from_low_u64_be(130), + para_data: vec![ + (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), + (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), + ], + }; + + // Activate leaves. + activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; + + // Candidate A + let (candidate_a, pvd_a) = make_candidate( + &leaf_a, + 1.into(), + HeadData(vec![1, 2, 3]), + HeadData(vec![1]), + test_state.validation_code_hash, + ); + let candidate_hash_a = candidate_a.hash(); + let response_a = vec![(leaf_a.hash, vec![0])]; + + // Candidate B + let (mut candidate_b, pvd_b) = make_candidate( + &leaf_a, + 1.into(), + HeadData(vec![1]), + HeadData(vec![2]), + test_state.validation_code_hash, + ); + // Set a field to make this candidate unique. + candidate_b.descriptor.para_head = Hash::from_low_u64_le(1000); + let candidate_hash_b = candidate_b.hash(); + let response_b = vec![(leaf_a.hash, vec![1])]; + + // Second candidates. + second_candidate( + &mut virtual_overseer, + candidate_a.clone(), + pvd_a.clone(), + response_a.clone(), + ) + .await; + second_candidate( + &mut virtual_overseer, + candidate_b.clone(), + pvd_b.clone(), + response_b.clone(), + ) + .await; + + // Should not get any backable candidates. + get_backable_candidate( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_a], + None, + ) + .await; + + // Back candidates. + back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await; + back_candidate(&mut virtual_overseer, &candidate_b, candidate_hash_b).await; + + // Get backable candidate. + get_backable_candidate( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![], + Some(candidate_hash_a), + ) + .await; + get_backable_candidate( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_a], + Some(candidate_hash_b), + ) + .await; + + // Should not get anything at the wrong path. + get_backable_candidate( + &mut virtual_overseer, + &leaf_a, + 1.into(), + vec![candidate_hash_b], + None, + ) + .await; + + virtual_overseer + }); + + assert_eq!(view.active_leaves.len(), 1); + assert_eq!(view.candidate_storage.len(), 2); + // Two parents and two candidates on para 1. + assert_eq!(view.candidate_storage.get(&1.into()).unwrap().len(), (2, 2)); + assert_eq!(view.candidate_storage.get(&2.into()).unwrap().len(), (0, 0)); +} + +// Test depth query. +#[test] +fn check_depth_query() { + let test_state = TestState::default(); + let view = test_harness(|mut virtual_overseer| async move { + // Leaf A + let leaf_a = TestLeaf { + number: 100, + hash: Hash::from_low_u64_be(130), + para_data: vec![ + (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), + (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), + ], + }; + + // Activate leaves. + activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; + + // Candidate A. + let (candidate_a, pvd_a) = make_candidate( + &leaf_a, + 1.into(), + HeadData(vec![1, 2, 3]), + HeadData(vec![1]), + test_state.validation_code_hash, + ); + let candidate_hash_a = candidate_a.hash(); + let response_a = vec![(leaf_a.hash, vec![0])]; + + // Candidate B. + let (candidate_b, pvd_b) = make_candidate( + &leaf_a, + 1.into(), + HeadData(vec![1]), + HeadData(vec![2]), + test_state.validation_code_hash, + ); + let candidate_hash_b = candidate_b.hash(); + let response_b = vec![(leaf_a.hash, vec![1])]; + + // Candidate C. + let (candidate_c, pvd_c) = make_candidate( + &leaf_a, + 1.into(), + HeadData(vec![2]), + HeadData(vec![3]), + test_state.validation_code_hash, + ); + let candidate_hash_c = candidate_c.hash(); + let response_c = vec![(leaf_a.hash, vec![2])]; + + // Get hypothetical depth of candidate A before adding it. + get_hypothetical_depth( + &mut virtual_overseer, + candidate_hash_a, + 1.into(), + leaf_a.para_data(1.into()).head_data.clone(), + leaf_a.hash, + leaf_a.hash, + vec![0], + ) + .await; + + // Add candidate A. + second_candidate( + &mut virtual_overseer, + candidate_a.clone(), + pvd_a.clone(), + response_a.clone(), + ) + .await; + + // Get depth of candidate A after adding it. + get_hypothetical_depth( + &mut virtual_overseer, + candidate_hash_a, + 1.into(), + HeadData(vec![1, 2, 3]), + leaf_a.hash, + leaf_a.hash, + vec![0], + ) + .await; + + // Get hypothetical depth of candidate B before adding it. + get_hypothetical_depth( + &mut virtual_overseer, + candidate_hash_b, + 1.into(), + HeadData(vec![1]), + leaf_a.hash, + leaf_a.hash, + vec![1], + ) + .await; + + // Add candidate B. + second_candidate(&mut virtual_overseer, candidate_b, pvd_b.clone(), response_b.clone()) + .await; + + // Get depth of candidate B after adding it. + get_hypothetical_depth( + &mut virtual_overseer, + candidate_hash_b, + 1.into(), + HeadData(vec![1]), + leaf_a.hash, + leaf_a.hash, + vec![1], + ) + .await; + + // Get hypothetical depth of candidate C before adding it. + get_hypothetical_depth( + &mut virtual_overseer, + candidate_hash_c, + 1.into(), + HeadData(vec![1]), + leaf_a.hash, + leaf_a.hash, + vec![1], + ) + .await; + + // Add candidate C. + second_candidate(&mut virtual_overseer, candidate_c, pvd_c.clone(), response_c.clone()) + .await; + + // Get depth of candidate C after adding it. + get_hypothetical_depth( + &mut virtual_overseer, + candidate_hash_c, + 1.into(), + HeadData(vec![2]), + leaf_a.hash, + leaf_a.hash, + vec![2], + ) + .await; + + virtual_overseer + }); + + assert_eq!(view.active_leaves.len(), 1); + assert_eq!(view.candidate_storage.len(), 2); +} + +#[test] +fn check_pvd_query() { + let test_state = TestState::default(); + let view = test_harness(|mut virtual_overseer| async move { + // Leaf A + let leaf_a = TestLeaf { + number: 100, + hash: Hash::from_low_u64_be(130), + para_data: vec![ + (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), + (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), + ], + }; + + // Activate leaves. + activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; + + // Candidate A. + let (candidate_a, pvd_a) = make_candidate( + &leaf_a, + 1.into(), + HeadData(vec![1, 2, 3]), + HeadData(vec![1]), + test_state.validation_code_hash, + ); + let response_a = vec![(leaf_a.hash, vec![0])]; + + // Candidate B. + let (candidate_b, pvd_b) = make_candidate( + &leaf_a, + 1.into(), + HeadData(vec![1]), + HeadData(vec![2]), + test_state.validation_code_hash, + ); + let response_b = vec![(leaf_a.hash, vec![1])]; + + // Candidate C. + let (candidate_c, pvd_c) = make_candidate( + &leaf_a, + 1.into(), + HeadData(vec![2]), + HeadData(vec![3]), + test_state.validation_code_hash, + ); + let response_c = vec![(leaf_a.hash, vec![2])]; + + // Get pvd of candidate A before adding it. + get_pvd( + &mut virtual_overseer, + 1.into(), + leaf_a.hash, + HeadData(vec![1, 2, 3]), + Some(pvd_a.clone()), + ) + .await; + + // Add candidate A. + second_candidate( + &mut virtual_overseer, + candidate_a.clone(), + pvd_a.clone(), + response_a.clone(), + ) + .await; + back_candidate(&mut virtual_overseer, &candidate_a, candidate_a.hash()).await; + + // Get pvd of candidate A after adding it. + get_pvd( + &mut virtual_overseer, + 1.into(), + leaf_a.hash, + HeadData(vec![1, 2, 3]), + Some(pvd_a.clone()), + ) + .await; + + // Get pvd of candidate B before adding it. + get_pvd( + &mut virtual_overseer, + 1.into(), + leaf_a.hash, + HeadData(vec![1]), + Some(pvd_b.clone()), + ) + .await; + + // Add candidate B. + second_candidate(&mut virtual_overseer, candidate_b, pvd_b.clone(), response_b.clone()) + .await; + + // Get depth and pvd of candidate B after adding it. + get_pvd( + &mut virtual_overseer, + 1.into(), + leaf_a.hash, + HeadData(vec![1]), + Some(pvd_b.clone()), + ) + .await; + + // Get pvd of candidate C before adding it. + get_pvd( + &mut virtual_overseer, + 1.into(), + leaf_a.hash, + HeadData(vec![2]), + Some(pvd_c.clone()), + ) + .await; + + // Add candidate C. + second_candidate(&mut virtual_overseer, candidate_c, pvd_c.clone(), response_c.clone()) + .await; + + // Get depth and pvd of candidate C after adding it. + get_pvd( + &mut virtual_overseer, + 1.into(), + leaf_a.hash, + HeadData(vec![2]), + Some(pvd_c.clone()), + ) + .await; + + virtual_overseer + }); + + assert_eq!(view.active_leaves.len(), 1); + assert_eq!(view.candidate_storage.len(), 2); +} + +// Test simultaneously activating and deactivating leaves, and simultaneously deactivating multiple +// leaves. +#[test] +fn correctly_updates_leaves() { + let test_state = TestState::default(); + let view = test_harness(|mut virtual_overseer| async move { + // Leaf A + let leaf_a = TestLeaf { + number: 100, + hash: Hash::from_low_u64_be(130), + para_data: vec![ + (1.into(), PerParaData::new(97, HeadData(vec![1, 2, 3]))), + (2.into(), PerParaData::new(100, HeadData(vec![2, 3, 4]))), + ], + }; + // Leaf B + let leaf_b = TestLeaf { + number: 101, + hash: Hash::from_low_u64_be(131), + para_data: vec![ + (1.into(), PerParaData::new(99, HeadData(vec![3, 4, 5]))), + (2.into(), PerParaData::new(101, HeadData(vec![4, 5, 6]))), + ], + }; + // Leaf C + let leaf_c = TestLeaf { + number: 102, + hash: Hash::from_low_u64_be(132), + para_data: vec![ + (1.into(), PerParaData::new(102, HeadData(vec![5, 6, 7]))), + (2.into(), PerParaData::new(98, HeadData(vec![6, 7, 8]))), + ], + }; + + // Activate leaves. + activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; + activate_leaf(&mut virtual_overseer, &leaf_b, &test_state).await; + + // Try activating a duplicate leaf. + activate_leaf(&mut virtual_overseer, &leaf_b, &test_state).await; + + // Pass in an empty update. + let update = ActiveLeavesUpdate::default(); + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update))) + .await; + + // Activate a leaf and remove one at the same time. + let activated = ActivatedLeaf { + hash: leaf_c.hash, + number: leaf_c.number, + span: Arc::new(jaeger::Span::Disabled), + status: LeafStatus::Fresh, + }; + let update = ActiveLeavesUpdate { + activated: Some(activated), + deactivated: [leaf_b.hash][..].into(), + }; + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update))) + .await; + handle_leaf_activation(&mut virtual_overseer, &leaf_c, &test_state).await; + + // Remove all remaining leaves. + let update = ActiveLeavesUpdate { + deactivated: [leaf_a.hash, leaf_c.hash][..].into(), + ..Default::default() + }; + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update))) + .await; + + // Activate and deactivate the same leaf. + let activated = ActivatedLeaf { + hash: leaf_a.hash, + number: leaf_a.number, + span: Arc::new(jaeger::Span::Disabled), + status: LeafStatus::Fresh, + }; + let update = ActiveLeavesUpdate { + activated: Some(activated), + deactivated: [leaf_a.hash][..].into(), + }; + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update))) + .await; + handle_leaf_activation(&mut virtual_overseer, &leaf_a, &test_state).await; + + // Remove the leaf again. Send some unnecessary hashes. + let update = ActiveLeavesUpdate { + deactivated: [leaf_a.hash, leaf_b.hash, leaf_c.hash][..].into(), + ..Default::default() + }; + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update))) + .await; + + virtual_overseer + }); + + assert_eq!(view.active_leaves.len(), 0); + assert_eq!(view.candidate_storage.len(), 0); +} diff --git a/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs b/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs index ebaf18f050e0..588754f6fe63 100644 --- a/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs +++ b/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs @@ -423,7 +423,6 @@ fn second_multiple_candidates_per_relay_parent() { for i in 0..(ASYNC_BACKING_PARAMETERS.max_candidate_depth + 1) { let mut candidate = dummy_candidate_receipt_bad_sig(head_c, Some(Default::default())); candidate.descriptor.para_id = test_state.chain_ids[0]; - candidate.descriptor.relay_parent = head_c; candidate.descriptor.persisted_validation_data_hash = dummy_pvd().hash(); let commitments = CandidateCommitments { head_data: HeadData(vec![i as u8]), @@ -579,7 +578,6 @@ fn fetched_collation_sanity_check() { let mut candidate = dummy_candidate_receipt_bad_sig(head_c, Some(Default::default())); candidate.descriptor.para_id = test_state.chain_ids[0]; - candidate.descriptor.relay_parent = head_c; let commitments = CandidateCommitments { head_data: HeadData(vec![1, 2, 3]), horizontal_messages: Vec::new(), diff --git a/node/service/Cargo.toml b/node/service/Cargo.toml index 67bfaf0ee5bb..55edd424059f 100644 --- a/node/service/Cargo.toml +++ b/node/service/Cargo.toml @@ -161,7 +161,7 @@ full-node = [ "polkadot-node-core-chain-api", "polkadot-node-core-chain-selection", "polkadot-node-core-dispute-coordinator", - "polkadot-node-core-prospective-parachains", + "polkadot-node-core-prospective-parachains", "polkadot-node-core-provisioner", "polkadot-node-core-runtime-api", "polkadot-statement-distribution", diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 827e708bebc5..5f8f7d53f149 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -64,9 +64,8 @@ //! A fragment tree is a mental model for thinking about a forking series of predictions //! about a single parachain. There may be one or more fragment trees per parachain. //! -//! In expectation, most parachains will have a plausibly-unique authorship method -//! which means that they should really be much closer to fragment-chains, maybe -//! maybe with an occasional fork. +//! In expectation, most parachains will have a plausibly-unique authorship method which means that +//! they should really be much closer to fragment-chains, maybe with an occasional fork. //! //! Avoiding fragment-tree blowup is beyond the scope of this module. //! @@ -99,7 +98,7 @@ //! As predictions fade into the past, new ones should be stacked on top. //! //! Every new relay-chain block is an opportunity to make a new prediction about the future. -//! higher-level logic should select the leaves of the fragment-trees to build upon or whether +//! Higher-level logic should select the leaves of the fragment-trees to build upon or whether //! to create a new fragment-tree. //! //! ### Code Upgrades From 1c59180a2a747a373f78fab3d963f9e091e782ea Mon Sep 17 00:00:00 2001 From: Marcin S Date: Mon, 30 Jan 2023 16:48:27 +0100 Subject: [PATCH 32/76] Node-side metrics for asynchronous backing (#6549) * Add metrics for `prune_view_candidate_storage` * Add metrics for `request_unblocked_collations` * Fix docstring * Couple fixes from review comments --- node/core/prospective-parachains/src/lib.rs | 41 ++++++++------- .../prospective-parachains/src/metrics.rs | 52 +++++++++++++++++++ .../src/validator_side/metrics.rs | 21 +++++++- .../src/validator_side/mod.rs | 4 +- node/service/src/overseer.rs | 4 +- 5 files changed, 100 insertions(+), 22 deletions(-) create mode 100644 node/core/prospective-parachains/src/metrics.rs diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index dabbd6634a95..a077a706b127 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -1,4 +1,4 @@ -// Copyright 2022 Parity Technologies (UK) Ltd. +// Copyright 2022-2023 Parity Technologies (UK) Ltd. // This file is part of Polkadot. // Polkadot is free software: you can redistribute it and/or modify @@ -61,6 +61,9 @@ mod fragment_tree; #[cfg(test)] mod tests; +mod metrics; +use self::metrics::Metrics; + const LOG_TARGET: &str = "parachain::prospective-parachains"; struct RelayBlockViewData { @@ -82,12 +85,14 @@ impl View { /// The prospective parachains subsystem. #[derive(Default)] -pub struct ProspectiveParachainsSubsystem; +pub struct ProspectiveParachainsSubsystem { + metrics: Metrics, +} impl ProspectiveParachainsSubsystem { /// Create a new instance of the `ProspectiveParachainsSubsystem`. - pub fn new() -> Self { - Self + pub fn new(metrics: Metrics) -> Self { + Self { metrics } } } @@ -98,7 +103,7 @@ where { fn start(self, ctx: Context) -> SpawnedSubsystem { SpawnedSubsystem { - future: run(ctx) + future: run(ctx, self.metrics) .map_err(|e| SubsystemError::with_origin("prospective-parachains", e)) .boxed(), name: "prospective-parachains-subsystem", @@ -107,23 +112,27 @@ where } #[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] -async fn run(mut ctx: Context) -> FatalResult<()> { +async fn run(mut ctx: Context, metrics: Metrics) -> FatalResult<()> { let mut view = View::new(); loop { crate::error::log_error( - run_iteration(&mut ctx, &mut view).await, + run_iteration(&mut ctx, &mut view, &metrics).await, "Encountered issue during run iteration", )?; } } #[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] -async fn run_iteration(ctx: &mut Context, view: &mut View) -> Result<()> { +async fn run_iteration( + ctx: &mut Context, + view: &mut View, + metrics: &Metrics, +) -> Result<()> { loop { match ctx.recv().await.map_err(FatalError::SubsystemReceive)? { FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()), FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => { - handle_active_leaves_update(&mut *ctx, view, update).await?; + handle_active_leaves_update(&mut *ctx, view, update, metrics).await?; }, FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => {}, FromOrchestra::Communication { msg } => match msg { @@ -155,6 +164,7 @@ async fn handle_active_leaves_update( ctx: &mut Context, view: &mut View, update: ActiveLeavesUpdate, + metrics: &Metrics, ) -> JfyiErrorResult<()> { // 1. clean up inactive leaves // 2. determine all scheduled para at new block @@ -245,13 +255,15 @@ async fn handle_active_leaves_update( if !update.deactivated.is_empty() { // This has potential to be a hotspot. - prune_view_candidate_storage(view); + prune_view_candidate_storage(view, metrics); } Ok(()) } -fn prune_view_candidate_storage(view: &mut View) { +fn prune_view_candidate_storage(view: &mut View, metrics: &Metrics) { + metrics.time_prune_view_candidate_storage(); + let active_leaves = &view.active_leaves; view.candidate_storage.retain(|para_id, storage| { let mut coverage = HashSet::new(); @@ -721,10 +733,3 @@ async fn fetch_block_info( storage_root: header.state_root, })) } - -#[derive(Clone)] -struct MetricsInner; - -/// Prospective parachain metrics. -#[derive(Default, Clone)] -pub struct Metrics(Option); diff --git a/node/core/prospective-parachains/src/metrics.rs b/node/core/prospective-parachains/src/metrics.rs new file mode 100644 index 000000000000..d7a1760bb459 --- /dev/null +++ b/node/core/prospective-parachains/src/metrics.rs @@ -0,0 +1,52 @@ +// Copyright 2023 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use polkadot_node_subsystem_util::metrics::{self, prometheus}; + +#[derive(Clone)] +pub(crate) struct MetricsInner { + pub(crate) prune_view_candidate_storage: prometheus::Histogram, +} + +/// Candidate backing metrics. +#[derive(Default, Clone)] +pub struct Metrics(pub(crate) Option); + +impl Metrics { + /// Provide a timer for handling `prune_view_candidate_storage` which observes on drop. + pub fn time_prune_view_candidate_storage( + &self, + ) -> Option { + self.0 + .as_ref() + .map(|metrics| metrics.prune_view_candidate_storage.start_timer()) + } +} + +impl metrics::Metrics for Metrics { + fn try_register(registry: &prometheus::Registry) -> Result { + let metrics = MetricsInner { + prune_view_candidate_storage: prometheus::register( + prometheus::Histogram::with_opts(prometheus::HistogramOpts::new( + "polkadot_parachain_prospective_parachains_prune_view_candidate_storage", + "Time spent within `prospective_parachains::prune_view_candidate_storage`", + ))?, + registry, + )?, + }; + Ok(Metrics(Some(metrics))) + } +} diff --git a/node/network/collator-protocol/src/validator_side/metrics.rs b/node/network/collator-protocol/src/validator_side/metrics.rs index a011a5f3b43e..947fe36550f1 100644 --- a/node/network/collator-protocol/src/validator_side/metrics.rs +++ b/node/network/collator-protocol/src/validator_side/metrics.rs @@ -1,4 +1,4 @@ -// Copyright 2017-2022 Parity Technologies (UK) Ltd. +// Copyright 2017-2023 Parity Technologies (UK) Ltd. // This file is part of Polkadot. // Polkadot is free software: you can redistribute it and/or modify @@ -56,6 +56,15 @@ impl Metrics { ) -> Option { self.0.as_ref().map(|metrics| metrics.collation_request_duration.start_timer()) } + + /// Provide a timer for `request_unblocked_collations` which observes on drop. + pub fn time_request_unblocked_collations( + &self, + ) -> Option { + self.0 + .as_ref() + .map(|metrics| metrics.request_unblocked_collations.start_timer()) + } } #[derive(Clone)] @@ -65,6 +74,7 @@ struct MetricsInner { handle_collation_request_result: prometheus::Histogram, collator_peer_count: prometheus::Gauge, collation_request_duration: prometheus::Histogram, + request_unblocked_collations: prometheus::Histogram, } impl metrics::Metrics for Metrics { @@ -116,6 +126,15 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + request_unblocked_collations: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "polkadot_parachain_collator_protocol_validator_request_unblocked_collations", + "Time spent within `collator_protocol_validator::request_unblocked_collations`", + ) + )?, + registry, + )?, }; Ok(Metrics(Some(metrics))) diff --git a/node/network/collator-protocol/src/validator_side/mod.rs b/node/network/collator-protocol/src/validator_side/mod.rs index e8faaf97cf79..60fd1db2bb6b 100644 --- a/node/network/collator-protocol/src/validator_side/mod.rs +++ b/node/network/collator-protocol/src/validator_side/mod.rs @@ -1,4 +1,4 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. +// Copyright 2020-2023 Parity Technologies (UK) Ltd. // This file is part of Polkadot. // Polkadot is free software: you can redistribute it and/or modify @@ -979,6 +979,8 @@ where Sender: CollatorProtocolSenderTrait, I: IntoIterator)>, { + let _timer = state.metrics.time_request_unblocked_collations(); + for (key, mut value) in blocked { let (para_id, para_head) = key; let blocked = std::mem::take(&mut value); diff --git a/node/service/src/overseer.rs b/node/service/src/overseer.rs index b0668f632fab..83291addb762 100644 --- a/node/service/src/overseer.rs +++ b/node/service/src/overseer.rs @@ -1,4 +1,4 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// Copyright 2017-2023 Parity Technologies (UK) Ltd. // This file is part of Polkadot. // Polkadot is free software: you can redistribute it and/or modify @@ -322,7 +322,7 @@ where Metrics::register(registry)?, )) .chain_selection(ChainSelectionSubsystem::new(chain_selection_config, parachains_db)) - .prospective_parachains(ProspectiveParachainsSubsystem::new()) + .prospective_parachains(ProspectiveParachainsSubsystem::new(Metrics::register(registry)?)) .leaves(Vec::from_iter( leaves .into_iter() From 8ce4057188c03ffc9ab348b43b93c49a61d2ebef Mon Sep 17 00:00:00 2001 From: Marcin S Date: Mon, 30 Jan 2023 20:23:55 +0100 Subject: [PATCH 33/76] Fix `check_depth_query` test --- node/core/prospective-parachains/src/tests.rs | 107 ++++++++++-------- 1 file changed, 57 insertions(+), 50 deletions(-) diff --git a/node/core/prospective-parachains/src/tests.rs b/node/core/prospective-parachains/src/tests.rs index e7f15654f86e..ef03789ce101 100644 --- a/node/core/prospective-parachains/src/tests.rs +++ b/node/core/prospective-parachains/src/tests.rs @@ -20,7 +20,7 @@ use assert_matches::assert_matches; use polkadot_node_subsystem::{ errors::RuntimeApiError, messages::{ - AllMessages, HypotheticalDepthRequest, ProspectiveParachainsMessage, + AllMessages, HypotheticalFrontierRequest, ProspectiveParachainsMessage, ProspectiveValidationDataRequest, }, }; @@ -140,7 +140,7 @@ fn test_harness>( let mut view = View::new(); let subsystem = async move { loop { - match run_iteration(&mut context, &mut view).await { + match run_iteration(&mut context, &mut view, &Metrics(None)).await { Ok(()) => break, Err(e) => panic!("{:?}", e), } @@ -408,30 +408,33 @@ async fn get_backable_candidate( assert_eq!(resp, expected_candidate_hash); } -async fn get_hypothetical_depth( +async fn get_hypothetical_frontier( virtual_overseer: &mut VirtualOverseer, candidate_hash: CandidateHash, - para_id: ParaId, - parent_head_data: HeadData, - candidate_relay_parent: Hash, + receipt: CommittedCandidateReceipt, + persisted_validation_data: PersistedValidationData, fragment_tree_relay_parent: Hash, expected_depths: Vec, ) { - let request = HypotheticalDepthRequest { + let hypothetical_candidate = HypotheticalCandidate::Complete { candidate_hash, - candidate_para: para_id, - parent_head_data_hash: parent_head_data.hash(), - candidate_relay_parent, - fragment_tree_relay_parent, + receipt: Arc::new(receipt), + persisted_validation_data, + }; + let request = HypotheticalFrontierRequest { + candidates: vec![hypothetical_candidate.clone()], + fragment_tree_relay_parent: Some(fragment_tree_relay_parent), }; let (tx, rx) = oneshot::channel(); virtual_overseer .send(overseer::FromOrchestra::Communication { - msg: ProspectiveParachainsMessage::GetHypotheticalDepth(request, tx), + msg: ProspectiveParachainsMessage::GetHypotheticalFrontier(request, tx), }) .await; let resp = rx.await.unwrap(); - assert_eq!(resp, expected_depths); + let expected_frontier = + vec![(hypothetical_candidate, vec![(fragment_tree_relay_parent, expected_depths)])]; + assert_eq!(resp, expected_frontier); } async fn get_pvd( @@ -998,13 +1001,12 @@ fn check_depth_query() { let candidate_hash_c = candidate_c.hash(); let response_c = vec![(leaf_a.hash, vec![2])]; - // Get hypothetical depth of candidate A before adding it. - get_hypothetical_depth( + // Get hypothetical frontier of candidate A before adding it. + get_hypothetical_frontier( &mut virtual_overseer, candidate_hash_a, - 1.into(), - leaf_a.para_data(1.into()).head_data.clone(), - leaf_a.hash, + candidate_a.clone(), + pvd_a.clone(), leaf_a.hash, vec![0], ) @@ -1019,69 +1021,74 @@ fn check_depth_query() { ) .await; - // Get depth of candidate A after adding it. - get_hypothetical_depth( + // Get frontier of candidate A after adding it. + get_hypothetical_frontier( &mut virtual_overseer, candidate_hash_a, - 1.into(), - HeadData(vec![1, 2, 3]), - leaf_a.hash, + candidate_a.clone(), + pvd_a.clone(), leaf_a.hash, vec![0], ) .await; - // Get hypothetical depth of candidate B before adding it. - get_hypothetical_depth( + // Get hypothetical frontier of candidate B before adding it. + get_hypothetical_frontier( &mut virtual_overseer, candidate_hash_b, - 1.into(), - HeadData(vec![1]), - leaf_a.hash, + candidate_b.clone(), + pvd_b.clone(), leaf_a.hash, vec![1], ) .await; // Add candidate B. - second_candidate(&mut virtual_overseer, candidate_b, pvd_b.clone(), response_b.clone()) - .await; + second_candidate( + &mut virtual_overseer, + candidate_b.clone(), + pvd_b.clone(), + response_b.clone(), + ) + .await; - // Get depth of candidate B after adding it. - get_hypothetical_depth( + // Get frontier of candidate B after adding it. + get_hypothetical_frontier( &mut virtual_overseer, candidate_hash_b, - 1.into(), - HeadData(vec![1]), - leaf_a.hash, + candidate_b, + pvd_b.clone(), leaf_a.hash, vec![1], ) .await; - // Get hypothetical depth of candidate C before adding it. - get_hypothetical_depth( + // Get hypothetical frontier of candidate C before adding it. + get_hypothetical_frontier( &mut virtual_overseer, candidate_hash_c, - 1.into(), - HeadData(vec![1]), - leaf_a.hash, + candidate_c.clone(), + pvd_c.clone(), leaf_a.hash, - vec![1], + vec![2], ) .await; // Add candidate C. - second_candidate(&mut virtual_overseer, candidate_c, pvd_c.clone(), response_c.clone()) - .await; + second_candidate( + &mut virtual_overseer, + candidate_c.clone(), + pvd_c.clone(), + response_c.clone(), + ) + .await; - // Get depth of candidate C after adding it. - get_hypothetical_depth( + // Get frontier of candidate C after adding it. + get_hypothetical_frontier( &mut virtual_overseer, candidate_hash_c, - 1.into(), - HeadData(vec![2]), - leaf_a.hash, + candidate_c, + pvd_c.clone(), leaf_a.hash, vec![2], ) @@ -1185,7 +1192,7 @@ fn check_pvd_query() { second_candidate(&mut virtual_overseer, candidate_b, pvd_b.clone(), response_b.clone()) .await; - // Get depth and pvd of candidate B after adding it. + // Get pvd of candidate B after adding it. get_pvd( &mut virtual_overseer, 1.into(), @@ -1209,7 +1216,7 @@ fn check_pvd_query() { second_candidate(&mut virtual_overseer, candidate_c, pvd_c.clone(), response_c.clone()) .await; - // Get depth and pvd of candidate C after adding it. + // Get pvd of candidate C after adding it. get_pvd( &mut virtual_overseer, 1.into(), From 9153998dca576070dc1835f2b96465d636ba5c6f Mon Sep 17 00:00:00 2001 From: Chris Sosnin <48099298+slumber@users.noreply.github.com> Date: Tue, 31 Jan 2023 21:25:47 +0400 Subject: [PATCH 34/76] inclusion-emulator: mirror advancement rule check (#6361) * inclusion-emulator: mirror advancement rule check * fix build --- .../src/fragment_tree.rs | 4 +- .../src/inclusion_emulator/staging.rs | 85 ++++++++++++++++--- primitives/src/vstaging/mod.rs | 4 +- .../src/runtime_api_impl/vstaging.rs | 5 +- 4 files changed, 79 insertions(+), 19 deletions(-) diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index c4a920577459..7f89d3232a0e 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -852,7 +852,7 @@ mod tests { ump_remaining: 10, ump_remaining_bytes: 1_000, max_ump_num_per_candidate: 10, - dmp_remaining_messages: 10, + dmp_remaining_messages: [0; 10].into(), hrmp_inbound: InboundHrmpLimitations { valid_watermarks }, hrmp_channels_out: HashMap::new(), max_hrmp_num_per_candidate: 0, @@ -895,7 +895,7 @@ mod tests { horizontal_messages: Vec::new(), new_validation_code: None, head_data: para_head, - processed_downward_messages: 0, + processed_downward_messages: 1, hrmp_watermark, }, }; diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 5f8f7d53f149..a8271703da9e 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -155,8 +155,8 @@ pub struct Constraints { pub ump_remaining_bytes: usize, /// The maximum number of UMP messages allowed per candidate. pub max_ump_num_per_candidate: usize, - /// The amount of remaining DMP messages. - pub dmp_remaining_messages: usize, + /// Remaining DMP queue. Only includes sent-at block numbers. + pub dmp_remaining_messages: Vec, /// The limitations of all registered inbound HRMP channels. pub hrmp_inbound: InboundHrmpLimitations, /// The limitations of all registered outbound HRMP channels. @@ -183,7 +183,7 @@ impl From for Constraints { ump_remaining: c.ump_remaining as _, ump_remaining_bytes: c.ump_remaining_bytes as _, max_ump_num_per_candidate: c.max_ump_num_per_candidate as _, - dmp_remaining_messages: c.dmp_remaining_messages as _, + dmp_remaining_messages: c.dmp_remaining_messages, hrmp_inbound: InboundHrmpLimitations { valid_watermarks: c.hrmp_inbound.valid_watermarks, }, @@ -310,9 +310,10 @@ impl Constraints { )?; self.dmp_remaining_messages + .len() .checked_sub(modifications.dmp_messages_processed) .ok_or(ModificationError::DmpMessagesUnderflow { - messages_remaining: self.dmp_remaining_messages, + messages_remaining: self.dmp_remaining_messages.len(), messages_processed: modifications.dmp_messages_processed, })?; @@ -393,13 +394,15 @@ impl Constraints { bytes_submitted: modifications.ump_bytes_sent, })?; - new.dmp_remaining_messages = new - .dmp_remaining_messages - .checked_sub(modifications.dmp_messages_processed) - .ok_or(ModificationError::DmpMessagesUnderflow { - messages_remaining: new.dmp_remaining_messages, + if modifications.dmp_messages_processed > new.dmp_remaining_messages.len() { + return Err(ModificationError::DmpMessagesUnderflow { + messages_remaining: new.dmp_remaining_messages.len(), messages_processed: modifications.dmp_messages_processed, - })?; + }) + } else { + new.dmp_remaining_messages = + new.dmp_remaining_messages[modifications.dmp_messages_processed..].to_vec(); + } if modifications.code_upgrade_applied { new.validation_code_hash = new @@ -586,6 +589,8 @@ pub enum FragmentValidityError { /// /// Min allowed, current. RelayParentTooOld(BlockNumber, BlockNumber), + /// Para is required to process at least one DMP message from the queue. + DmpAdvancementRule, /// Too many messages upward messages submitted. UmpMessagesPerCandidateOverflow { /// The amount of messages a single candidate can submit. @@ -787,6 +792,16 @@ fn validate_against_constraints( )) } + if modifications.dmp_messages_processed == 0 { + if constraints + .dmp_remaining_messages + .get(0) + .map_or(false, |&msg_sent_at| msg_sent_at <= relay_parent.number) + { + return Err(FragmentValidityError::DmpAdvancementRule) + } + } + if candidate.commitments.horizontal_messages.len() > constraints.max_hrmp_num_per_candidate { return Err(FragmentValidityError::HrmpMessagesPerCandidateOverflow { messages_allowed: constraints.max_hrmp_num_per_candidate, @@ -926,7 +941,7 @@ mod tests { ump_remaining: 10, ump_remaining_bytes: 1024, max_ump_num_per_candidate: 5, - dmp_remaining_messages: 5, + dmp_remaining_messages: Vec::new(), hrmp_inbound: InboundHrmpLimitations { valid_watermarks: vec![6, 8] }, hrmp_channels_out: { let mut map = HashMap::new(); @@ -1115,14 +1130,17 @@ mod tests { #[test] fn constraints_dmp_messages() { - let constraints = make_constraints(); + let mut constraints = make_constraints(); let mut modifications = ConstraintModifications::identity(); + assert!(constraints.check_modifications(&modifications).is_ok()); + assert!(constraints.apply_modifications(&modifications).is_ok()); + modifications.dmp_messages_processed = 6; assert_eq!( constraints.check_modifications(&modifications), Err(ModificationError::DmpMessagesUnderflow { - messages_remaining: 5, + messages_remaining: 0, messages_processed: 6, }), ); @@ -1130,10 +1148,19 @@ mod tests { assert_eq!( constraints.apply_modifications(&modifications), Err(ModificationError::DmpMessagesUnderflow { - messages_remaining: 5, + messages_remaining: 0, messages_processed: 6, }), ); + + constraints.dmp_remaining_messages = vec![1, 4, 8, 10]; + modifications.dmp_messages_processed = 2; + assert!(constraints.check_modifications(&modifications).is_ok()); + let constraints = constraints + .apply_modifications(&modifications) + .expect("modifications are valid"); + + assert_eq!(&constraints.dmp_remaining_messages, &[8, 10]); } #[test] @@ -1305,6 +1332,36 @@ mod tests { ); } + #[test] + fn fragment_dmp_advancement_rule() { + let relay_parent = RelayChainBlockInfo { + number: 6, + hash: Hash::repeat_byte(0x0a), + storage_root: Hash::repeat_byte(0xff), + }; + + let mut constraints = make_constraints(); + let mut candidate = make_candidate(&constraints, &relay_parent); + + // Empty dmp queue is ok. + assert!(Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()).is_ok()); + // Unprocessed message that was sent later is ok. + constraints.dmp_remaining_messages = vec![relay_parent.number + 1]; + assert!(Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()).is_ok()); + + for block_number in 0..=relay_parent.number { + constraints.dmp_remaining_messages = vec![block_number]; + + assert_eq!( + Fragment::new(relay_parent.clone(), constraints.clone(), candidate.clone()), + Err(FragmentValidityError::DmpAdvancementRule), + ); + } + + candidate.commitments.processed_downward_messages = 1; + assert!(Fragment::new(relay_parent, constraints, candidate).is_ok()); + } + #[test] fn fragment_ump_messages_overflow() { let relay_parent = RelayChainBlockInfo { diff --git a/primitives/src/vstaging/mod.rs b/primitives/src/vstaging/mod.rs index ed268bf1990f..958aaa77d5c3 100644 --- a/primitives/src/vstaging/mod.rs +++ b/primitives/src/vstaging/mod.rs @@ -80,8 +80,8 @@ pub struct Constraints { pub ump_remaining_bytes: u32, /// The maximum number of UMP messages allowed per candidate. pub max_ump_num_per_candidate: u32, - /// The amount of remaining DMP messages. - pub dmp_remaining_messages: u32, + /// Remaining DMP queue. Only includes sent-at block numbers. + pub dmp_remaining_messages: Vec, /// The limitations of all registered inbound HRMP channels. pub hrmp_inbound: InboundHrmpLimitations, /// The limitations of all registered outbound HRMP channels. diff --git a/runtime/parachains/src/runtime_api_impl/vstaging.rs b/runtime/parachains/src/runtime_api_impl/vstaging.rs index 503fbdf0808c..ee398d65e5e7 100644 --- a/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -62,7 +62,10 @@ pub fn validity_constraints( let ump_remaining = config.max_upward_queue_count - ump_msg_count; let ump_remaining_bytes = config.max_upward_queue_size - ump_total_bytes; - let dmp_remaining_messages = >::dmq_length(para_id); + let dmp_remaining_messages = >::dmq_contents(para_id) + .into_iter() + .map(|msg| msg.sent_at) + .collect(); let valid_watermarks = >::valid_watermarks(para_id); let hrmp_inbound = InboundHrmpLimitations { valid_watermarks }; From b858af1ab79f56220aa66acc1feaec1c17c4eb62 Mon Sep 17 00:00:00 2001 From: Chris Sosnin <48099298+slumber@users.noreply.github.com> Date: Tue, 31 Jan 2023 21:27:53 +0400 Subject: [PATCH 35/76] prospective-parachains: introduce `backed_in_path_only` flag for advertisements (#6649) * Introduce `backed_in_path_only` flag for depth request * fmt * update doc comment --- node/core/backing/src/lib.rs | 4 + .../src/tests/prospective_parachains.rs | 9 + .../src/fragment_tree.rs | 177 +++++++++++++++++- node/core/prospective-parachains/src/lib.rs | 34 ++-- node/subsystem-types/src/messages.rs | 3 + .../src/inclusion_emulator/staging.rs | 2 +- 6 files changed, 207 insertions(+), 22 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index f0baf935faae..a640acf6d62c 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -1087,6 +1087,7 @@ async fn seconding_sanity_check( active_leaves: &HashMap, implicit_view: &ImplicitView, hypothetical_candidate: HypotheticalCandidate, + backed_in_path_only: bool, ) -> SecondingAllowed { let mut membership = Vec::new(); let mut responses = FuturesOrdered::>>::new(); @@ -1110,6 +1111,7 @@ async fn seconding_sanity_check( HypotheticalFrontierRequest { candidates: vec![hypothetical_candidate.clone()], fragment_tree_relay_parent: Some(*head), + backed_in_path_only, }, tx, )) @@ -1212,6 +1214,7 @@ async fn handle_can_second_request( &state.per_leaf, &state.implicit_view, hypothetical_candidate, + true, ) .await; @@ -1287,6 +1290,7 @@ async fn handle_validated_candidate_command( &state.per_leaf, &state.implicit_view, hypothetical_candidate, + false, ) .await { diff --git a/node/core/backing/src/tests/prospective_parachains.rs b/node/core/backing/src/tests/prospective_parachains.rs index f633141b8aa9..97121e2903aa 100644 --- a/node/core/backing/src/tests/prospective_parachains.rs +++ b/node/core/backing/src/tests/prospective_parachains.rs @@ -394,6 +394,7 @@ fn seconding_sanity_check_allowed() { let expected_request_a = HypotheticalFrontierRequest { candidates: vec![hypothetical_candidate.clone()], fragment_tree_relay_parent: Some(leaf_a_hash), + backed_in_path_only: false, }; let expected_response_a = make_hypothetical_frontier_response( vec![0, 1, 2, 3], @@ -403,6 +404,7 @@ fn seconding_sanity_check_allowed() { let expected_request_b = HypotheticalFrontierRequest { candidates: vec![hypothetical_candidate.clone()], fragment_tree_relay_parent: Some(leaf_b_hash), + backed_in_path_only: false, }; let expected_response_b = make_hypothetical_frontier_response(vec![3], hypothetical_candidate, leaf_b_hash); @@ -539,6 +541,7 @@ fn seconding_sanity_check_disallowed() { let expected_request_a = HypotheticalFrontierRequest { candidates: vec![hypothetical_candidate.clone()], fragment_tree_relay_parent: Some(leaf_a_hash), + backed_in_path_only: false, }; let expected_response_a = make_hypothetical_frontier_response( vec![0, 1, 2, 3], @@ -632,6 +635,7 @@ fn seconding_sanity_check_disallowed() { let expected_request_a = HypotheticalFrontierRequest { candidates: vec![hypothetical_candidate.clone()], fragment_tree_relay_parent: Some(leaf_a_hash), + backed_in_path_only: false, }; let expected_response_a = make_hypothetical_frontier_response( vec![3], @@ -641,6 +645,7 @@ fn seconding_sanity_check_disallowed() { let expected_request_b = HypotheticalFrontierRequest { candidates: vec![hypothetical_candidate.clone()], fragment_tree_relay_parent: Some(leaf_b_hash), + backed_in_path_only: false, }; let expected_response_b = make_hypothetical_frontier_response(vec![1], hypothetical_candidate, leaf_b_hash); @@ -737,6 +742,7 @@ fn prospective_parachains_reject_candidate() { HypotheticalFrontierRequest { candidates: vec![hypothetical_candidate.clone()], fragment_tree_relay_parent: Some(leaf_a_hash), + backed_in_path_only: false, }, make_hypothetical_frontier_response( vec![0, 1, 2, 3], @@ -917,6 +923,7 @@ fn second_multiple_candidates_per_relay_parent() { HypotheticalFrontierRequest { candidates: vec![hypothetical_candidate.clone()], fragment_tree_relay_parent: Some(leaf_hash), + backed_in_path_only: false, }, make_hypothetical_frontier_response( vec![*depth], @@ -1452,6 +1459,7 @@ fn seconding_sanity_check_occupy_same_depth() { HypotheticalFrontierRequest { candidates: vec![hypothetical_candidate.clone()], fragment_tree_relay_parent: Some(leaf_hash), + backed_in_path_only: false, }, // Send the same membership for both candidates. make_hypothetical_frontier_response(vec![0, 1], hypothetical_candidate, leaf_hash), @@ -1589,6 +1597,7 @@ fn occupied_core_assignment() { HypotheticalFrontierRequest { candidates: vec![hypothetical_candidate.clone()], fragment_tree_relay_parent: Some(leaf_a_hash), + backed_in_path_only: false, }, make_hypothetical_frontier_response( vec![0, 1, 2, 3], diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 7f89d3232a0e..fd48f4f1ba3a 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -542,19 +542,49 @@ impl FragmentTree { self.populate_from_bases(storage, bases); } + /// Returns `true` if the path from the root to the node's parent (inclusive) + /// only contains backed candidates, `false` otherwise. + fn path_contains_backed_only_candidates( + &self, + mut parent_pointer: NodePointer, + candidate_storage: &CandidateStorage, + ) -> bool { + while let NodePointer::Storage(ptr) = parent_pointer { + let node = &self.nodes[ptr]; + let candidate_hash = &node.candidate_hash; + + if candidate_storage.get(candidate_hash).map_or(true, |candidate_entry| { + !matches!(candidate_entry.state, CandidateState::Backed) + }) { + return false + } + parent_pointer = node.parent; + } + + true + } + /// Returns the hypothetical depths where a candidate with the given hash and parent head data /// would be added to the tree, without applying other candidates recursively on top of it. /// /// If the candidate is already known, this returns the actual depths where this /// candidate is part of the tree. + /// + /// Setting `backed_in_path_only` to `true` ensures this function only returns such membership + /// that every candidate in the path from the root is backed. pub(crate) fn hypothetical_depths( &self, hash: CandidateHash, candidate: HypotheticalCandidate, + candidate_storage: &CandidateStorage, + backed_in_path_only: bool, ) -> Vec { - // if known. - if let Some(depths) = self.candidates.get(&hash) { - return depths.iter_ones().collect() + // if `true`, we always have to traverse the tree. + if !backed_in_path_only { + // if known. + if let Some(depths) = self.candidates.get(&hash) { + return depths.iter_ones().collect() + } } // if out of scope. @@ -639,7 +669,12 @@ impl FragmentTree { } } - depths.set(child_depth, true); + // Check that the path only contains backed candidates, if necessary. + if !backed_in_path_only || + self.path_contains_backed_only_candidates(parent_pointer, candidate_storage) + { + depths.set(child_depth, true); + } } } @@ -1445,6 +1480,8 @@ mod tests { parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), relay_parent: relay_parent_a, }, + &storage, + false, ), vec![0, 2, 4], ); @@ -1456,6 +1493,8 @@ mod tests { parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), relay_parent: relay_parent_a, }, + &storage, + false, ), vec![1, 3], ); @@ -1467,6 +1506,8 @@ mod tests { parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), relay_parent: relay_parent_a, }, + &storage, + false, ), vec![0, 2, 4], ); @@ -1478,6 +1519,8 @@ mod tests { parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), relay_parent: relay_parent_a, }, + &storage, + false, ), vec![1, 3] ); @@ -1527,6 +1570,8 @@ mod tests { parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), relay_parent: relay_parent_a, }, + &storage, + false, ), vec![0], ); @@ -1538,7 +1583,131 @@ mod tests { receipt: Cow::Owned(candidate_a), persisted_validation_data: Cow::Owned(pvd_a), }, + &storage, + false, ) .is_empty()); } + + #[test] + fn hypothetical_depths_backed_in_path() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 0, + ); + let candidate_a_hash = candidate_a.hash(); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0b].into(), + vec![0x0c].into(), + 0, + ); + let candidate_b_hash = candidate_b.hash(); + + let (pvd_c, candidate_c) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0b].into(), + vec![0x0d].into(), + 0, + ); + + let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + + let max_depth = 4; + storage.add_candidate(candidate_a, pvd_a).unwrap(); + storage.add_candidate(candidate_b, pvd_b).unwrap(); + storage.add_candidate(candidate_c, pvd_c).unwrap(); + + // `A` and `B` are backed, `C` is not. + storage.mark_backed(&candidate_a_hash); + storage.mark_backed(&candidate_b_hash); + + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + max_depth, + vec![], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 3); + assert_eq!(tree.nodes.len(), 3); + + let candidate_d_hash = CandidateHash(Hash::repeat_byte(0xAA)); + + assert_eq!( + tree.hypothetical_depths( + candidate_d_hash, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0a]).hash(), + relay_parent: relay_parent_a, + }, + &storage, + true, + ), + vec![0], + ); + + assert_eq!( + tree.hypothetical_depths( + candidate_d_hash, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0c]).hash(), + relay_parent: relay_parent_a, + }, + &storage, + true, + ), + vec![2], + ); + + assert_eq!( + tree.hypothetical_depths( + candidate_d_hash, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0d]).hash(), + relay_parent: relay_parent_a, + }, + &storage, + true, + ), + Vec::::new(), + ); + + assert_eq!( + tree.hypothetical_depths( + candidate_d_hash, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0d]).hash(), + relay_parent: relay_parent_a, + }, + &storage, + false, + ), + vec![2], // non-empty if `false`. + ); + } } diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index a077a706b127..01d45f6a0e2e 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -475,34 +475,34 @@ fn answer_hypothetical_frontier_request( None => continue, Some(f) => f, }; + let candidate_storage = match view.candidate_storage.get(&c.candidate_para()) { + None => continue, + Some(storage) => storage, + }; - let (c_hash, hypothetical) = match c { - HypotheticalCandidate::Complete { - candidate_hash, - receipt, - persisted_validation_data, - } => ( - *candidate_hash, + let candidate_hash = c.candidate_hash(); + let hypothetical = match c { + HypotheticalCandidate::Complete { receipt, persisted_validation_data, .. } => fragment_tree::HypotheticalCandidate::Complete { receipt: Cow::Borrowed(receipt), persisted_validation_data: Cow::Borrowed(persisted_validation_data), }, - ), HypotheticalCandidate::Incomplete { - candidate_hash, parent_head_data_hash, candidate_relay_parent, .. - } => ( - *candidate_hash, - fragment_tree::HypotheticalCandidate::Incomplete { - relay_parent: *candidate_relay_parent, - parent_head_data_hash: *parent_head_data_hash, - }, - ), + } => fragment_tree::HypotheticalCandidate::Incomplete { + relay_parent: *candidate_relay_parent, + parent_head_data_hash: *parent_head_data_hash, + }, }; - let depths = fragment_tree.hypothetical_depths(c_hash, hypothetical); + let depths = fragment_tree.hypothetical_depths( + candidate_hash, + hypothetical, + candidate_storage, + request.backed_in_path_only, + ); if !depths.is_empty() { membership.push((*active_leaf, depths)); diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index f887a83de2a9..29b05a7de7b7 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -963,6 +963,9 @@ pub struct HypotheticalFrontierRequest { pub candidates: Vec, /// Either a specific fragment tree to check, otherwise all. pub fragment_tree_relay_parent: Option, + /// Only return membership if all candidates in the path from the + /// root are backed. + pub backed_in_path_only: bool, } /// A request for the persisted validation data stored in the prospective diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index a8271703da9e..3aaee43d79fe 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -267,7 +267,7 @@ impl Constraints { ) -> Result<(), ModificationError> { if let Some(HrmpWatermarkUpdate::Trunk(hrmp_watermark)) = modifications.hrmp_watermark { // head updates are always valid. - if self.hrmp_inbound.valid_watermarks.iter().any(|w| w == &hrmp_watermark) { + if self.hrmp_inbound.valid_watermarks.iter().all(|w| w != &hrmp_watermark) { return Err(ModificationError::DisallowedHrmpWatermark(hrmp_watermark)) } } From 381862b0975c69c82d37a6836bbc69e224590df0 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Thu, 16 Feb 2023 15:58:02 +0400 Subject: [PATCH 36/76] fmt --- node/core/runtime-api/src/cache.rs | 12 ++++++------ node/subsystem-types/src/messages.rs | 10 +++++----- runtime/kusama/src/lib.rs | 1 - runtime/parachains/src/runtime_api_impl/vstaging.rs | 5 +++-- runtime/polkadot/src/lib.rs | 1 - runtime/rococo/src/lib.rs | 1 - runtime/westend/src/lib.rs | 1 - 7 files changed, 14 insertions(+), 17 deletions(-) diff --git a/node/core/runtime-api/src/cache.rs b/node/core/runtime-api/src/cache.rs index ce8afa785ee5..1fe709515f94 100644 --- a/node/core/runtime-api/src/cache.rs +++ b/node/core/runtime-api/src/cache.rs @@ -20,12 +20,12 @@ use lru::LruCache; use sp_consensus_babe::Epoch; use polkadot_primitives::{ - vstaging as vstaging_primitives, AuthorityDiscoveryId, BlockNumber, CandidateCommitments, - CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, DisputeState, - GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, - OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, - SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, - ValidatorSignature, vstaging::ExecutorParams + vstaging as vstaging_primitives, vstaging::ExecutorParams, AuthorityDiscoveryId, BlockNumber, + CandidateCommitments, CandidateEvent, CandidateHash, CommittedCandidateReceipt, CoreState, + DisputeState, GroupRotationInfo, Hash, Id as ParaId, InboundDownwardMessage, + InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, + ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, + ValidatorId, ValidatorIndex, ValidatorSignature, }; /// For consistency we have the same capacity for all caches. We use 128 as we'll only need that diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 1ce5f9bac3cf..10330757104e 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -39,13 +39,13 @@ use polkadot_node_primitives::{ SignedDisputeStatement, SignedFullStatement, SignedFullStatementWithPVD, ValidationResult, }; use polkadot_primitives::{ - vstaging as vstaging_primitives, AuthorityDiscoveryId, BackedCandidate, BlockNumber, - CandidateEvent, CandidateHash, CandidateIndex, CandidateReceipt, CollatorId, - CommittedCandidateReceipt, CoreState, DisputeState, GroupIndex, GroupRotationInfo, Hash, - Header as BlockHeader, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, + vstaging as vstaging_primitives, vstaging::ExecutorParams, AuthorityDiscoveryId, + BackedCandidate, BlockNumber, CandidateEvent, CandidateHash, CandidateIndex, CandidateReceipt, + CollatorId, CommittedCandidateReceipt, CoreState, DisputeState, GroupIndex, GroupRotationInfo, + Hash, Header as BlockHeader, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, MultiDisputeStatementSet, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, SessionIndex, SessionInfo, SignedAvailabilityBitfield, SignedAvailabilityBitfields, - ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, vstaging::ExecutorParams + ValidationCode, ValidationCodeHash, ValidatorId, ValidatorIndex, ValidatorSignature, }; use polkadot_statement_table::v2::Misbehavior; use std::{ diff --git a/runtime/kusama/src/lib.rs b/runtime/kusama/src/lib.rs index f6655e398290..8cc0d8bb21a3 100644 --- a/runtime/kusama/src/lib.rs +++ b/runtime/kusama/src/lib.rs @@ -1493,7 +1493,6 @@ pub type Migrations = ( // this release they will be properly pruned after the bonding duration has // elapsed) pallet_grandpa::migrations::CleanupSetIdSessionMap, - /* Asynchronous backing mirgration */ parachains_configuration::migration::v5::MigrateToV5, ); diff --git a/runtime/parachains/src/runtime_api_impl/vstaging.rs b/runtime/parachains/src/runtime_api_impl/vstaging.rs index 9b1281d7a0d4..e1aea3eaf882 100644 --- a/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -16,10 +16,11 @@ //! Put implementations of functions from staging APIs here. -use crate::{configuration, disputes, dmp, hrmp, initializer, paras, shared, ump, session_info}; +use crate::{configuration, disputes, dmp, hrmp, initializer, paras, session_info, shared, ump}; use primitives::{ vstaging::{ - AsyncBackingParameters, Constraints, InboundHrmpLimitations, OutboundHrmpChannelLimitations, ExecutorParams + AsyncBackingParameters, Constraints, ExecutorParams, InboundHrmpLimitations, + OutboundHrmpChannelLimitations, }, CandidateHash, DisputeState, Id as ParaId, SessionIndex, }; diff --git a/runtime/polkadot/src/lib.rs b/runtime/polkadot/src/lib.rs index 414a1a30d69c..909fff613c72 100644 --- a/runtime/polkadot/src/lib.rs +++ b/runtime/polkadot/src/lib.rs @@ -1613,7 +1613,6 @@ pub type Migrations = ( // this release they will be properly pruned after the bonding duration has // elapsed) pallet_grandpa::migrations::CleanupSetIdSessionMap, - /* Asynchronous backing mirgration */ parachains_configuration::migration::v5::MigrateToV5, ); diff --git a/runtime/rococo/src/lib.rs b/runtime/rococo/src/lib.rs index 13b447e919b1..5682e903ec44 100644 --- a/runtime/rococo/src/lib.rs +++ b/runtime/rococo/src/lib.rs @@ -1497,7 +1497,6 @@ pub type Migrations = ( // this release they will be properly pruned after the bonding duration has // elapsed) pallet_grandpa::migrations::CleanupSetIdSessionMap, - /* Asynchronous backing mirgration */ parachains_configuration::migration::v5::MigrateToV5, ); diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs index 81736d19f1f1..c5499926f6a8 100644 --- a/runtime/westend/src/lib.rs +++ b/runtime/westend/src/lib.rs @@ -1251,7 +1251,6 @@ pub type Migrations = ( // this release they will be properly pruned after the bonding duration has // elapsed) pallet_grandpa::migrations::CleanupSetIdSessionMap, - /* Asynchronous backing mirgration */ parachains_configuration::migration::v5::MigrateToV5, ); From c752c6cddd5554797bc3cb1330cce3c370c7e4d7 Mon Sep 17 00:00:00 2001 From: "Mattia L.V. Bradascio" <28816406+bredamatt@users.noreply.github.com> Date: Wed, 1 Mar 2023 08:21:29 +0000 Subject: [PATCH 37/76] Add async-backing zombienet tests (#6314) --- .gitlab-ci.yml | 2 +- scripts/ci/gitlab/pipeline/zombienet.yml | 97 +++++++++++++++++++ .../001-async-backing-compatibility.toml | 34 +++++++ .../001-async-backing-compatibility.zndsl | 23 +++++ .../002-async-backing-runtime-upgrade.toml | 49 ++++++++++ .../002-async-backing-runtime-upgrade.zndsl | 43 ++++++++ .../003-async-backing-collator-mix.toml | 40 ++++++++ .../003-async-backing-collator-mix.zndsl | 21 ++++ zombienet_tests/async_backing/README.md | 9 ++ 9 files changed, 317 insertions(+), 1 deletion(-) create mode 100644 zombienet_tests/async_backing/001-async-backing-compatibility.toml create mode 100644 zombienet_tests/async_backing/001-async-backing-compatibility.zndsl create mode 100644 zombienet_tests/async_backing/002-async-backing-runtime-upgrade.toml create mode 100644 zombienet_tests/async_backing/002-async-backing-runtime-upgrade.zndsl create mode 100644 zombienet_tests/async_backing/003-async-backing-collator-mix.toml create mode 100644 zombienet_tests/async_backing/003-async-backing-collator-mix.zndsl create mode 100644 zombienet_tests/async_backing/README.md diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6d9300ba93a0..6c652c6734dc 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -36,7 +36,7 @@ variables: BUILDAH_IMAGE: "quay.io/buildah/stable:v1.27" DOCKER_OS: "debian:stretch" ARCH: "x86_64" - ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.2.78" + ZOMBIENET_IMAGE: "docker.io/paritytech/zombienet:v1.3.16" default: cache: {} diff --git a/scripts/ci/gitlab/pipeline/zombienet.yml b/scripts/ci/gitlab/pipeline/zombienet.yml index b088f20c55ff..75fb2fa8b62d 100644 --- a/scripts/ci/gitlab/pipeline/zombienet.yml +++ b/scripts/ci/gitlab/pipeline/zombienet.yml @@ -299,3 +299,100 @@ zombienet-tests-beefy-and-mmr: retry: 2 tags: - zombienet-polkadot-integration-test + +zombienet-tests-async-backing-compatibility: + stage: zombienet + extends: + - .kubernetes-env + - .zombienet-refs + image: "${ZOMBIENET_IMAGE}" + needs: + - job: publish-polkadot-debug-image + - job: publish-test-collators-image + - job: build-linux-stable + artifacts: true + variables: + GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/async_backing" + before_script: + - echo "Zombie-net Tests Config" + - echo "${ZOMBIENET_IMAGE_NAME}" + - echo "${PARACHAINS_IMAGE_NAME} ${PARACHAINS_IMAGE_TAG}" + - echo "${GH_DIR}" + - export DEBUG=zombie,zombie::network-node + - BUILD_RELEASE_VERSION="$(cat ./artifacts/BUILD_RELEASE_VERSION)" + - export ZOMBIENET_INTEGRATION_TEST_IMAGE=${PARACHAINS_IMAGE_NAME}:${PARACHAINS_IMAGE_TAG} + - export ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE="docker.io/parity/polkadot:${BUILD_RELEASE_VERSION}" + - export COL_IMAGE=${COLLATOR_IMAGE_NAME}:${COLLATOR_IMAGE_TAG} + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-env-manager.sh + --github-remote-dir="${GH_DIR}" + --test="001-async-backing-compatibility.zndsl" + allow_failure: false + retry: 2 + tags: + - zombienet-polkadot-integration-test + +zombienet-tests-async-backing-runtime-upgrade: + stage: zombienet + extends: + - .kubernetes-env + - .zombienet-refs + image: "${ZOMBIENET_IMAGE}" + needs: + - job: publish-polkadot-debug-image + - job: publish-test-collators-image + - job: build-linux-stable + artifacts: true + variables: + GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/async_backing" + before_script: + - echo "Zombie-net Tests Config" + - echo "${ZOMBIENET_IMAGE_NAME}" + - echo "${PARACHAINS_IMAGE_NAME} ${PARACHAINS_IMAGE_TAG}" + - echo "${GH_DIR}" + - export DEBUG=zombie,zombie::network-node + - BUILD_RELEASE_VERSION="$(cat ./artifacts/BUILD_RELEASE_VERSION)" + - export ZOMBIENET_INTEGRATION_TEST_IMAGE=${PARACHAINS_IMAGE_NAME}:${PARACHAINS_IMAGE_TAG} + - export ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE="docker.io/parity/polkadot:${BUILD_RELEASE_VERSION}" + - export COL_IMAGE=${COLLATOR_IMAGE_NAME}:${COLLATOR_IMAGE_TAG} + - export POLKADOT_PR_BIN_URL="https://gitlab.parity.io/parity/mirrors/polkadot/-/jobs/${BUILD_LINUX_JOB_ID}/artifacts/raw/artifacts/polkadot" + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-env-manager.sh + --github-remote-dir="${GH_DIR}" + --test="002-async-backing-runtime-upgrade.zndsl" + allow_failure: false + retry: 2 + tags: + - zombienet-polkadot-integration-test + +zombienet-tests-async-backing-collator-mix: + stage: zombienet + extends: + - .kubernetes-env + - .zombienet-refs + image: "${ZOMBIENET_IMAGE}" + needs: + - job: publish-polkadot-debug-image + - job: publish-test-collators-image + - job: build-linux-stable + artifacts: true + variables: + GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/async_backing" + before_script: + - echo "Zombie-net Tests Config" + - echo "${ZOMBIENET_IMAGE_NAME}" + - echo "${PARACHAINS_IMAGE_NAME} ${PARACHAINS_IMAGE_TAG}" + - echo "${GH_DIR}" + - export DEBUG=zombie,zombie::network-node + - BUILD_RELEASE_VERSION="$(cat ./artifacts/BUILD_RELEASE_VERSION)" + - export ZOMBIENET_INTEGRATION_TEST_IMAGE=${PARACHAINS_IMAGE_NAME}:${PARACHAINS_IMAGE_TAG} + - export ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE="docker.io/parity/polkadot:${BUILD_RELEASE_VERSION}" + - export COL_IMAGE=${COLLATOR_IMAGE_NAME}:${COLLATOR_IMAGE_TAG} + script: + - /home/nonroot/zombie-net/scripts/ci/run-test-env-manager.sh + --github-remote-dir="${GH_DIR}" + --test="003-async-backing-collator-mix.zndsl" + allow_failure: false + retry: 2 + tags: + - zombienet-polkadot-integration-test \ No newline at end of file diff --git a/zombienet_tests/async_backing/001-async-backing-compatibility.toml b/zombienet_tests/async_backing/001-async-backing-compatibility.toml new file mode 100644 index 000000000000..918fb5bf4f62 --- /dev/null +++ b/zombienet_tests/async_backing/001-async-backing-compatibility.toml @@ -0,0 +1,34 @@ +[settings] +timeout = 1000 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +default_command = "polkadot" + + [relaychain.default_resources] + limits = { memory = "4G", cpu = "2" } + requests = { memory = "2G", cpu = "1" } + + [[relaychain.nodes]] + name = "alice" + args = [ "-lparachain=debug,runtime=debug"] + + [[relaychain.nodes]] + name = "bob" + image = "{{ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE}}" + args = [ "-lparachain=debug,runtime=debug"] + +[[parachains]] +id = 100 + + [parachains.collator] + name = "collator01" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug"] + +[types.Header] +number = "u64" +parent_hash = "Hash" +post_state = "Hash" diff --git a/zombienet_tests/async_backing/001-async-backing-compatibility.zndsl b/zombienet_tests/async_backing/001-async-backing-compatibility.zndsl new file mode 100644 index 000000000000..46c1d77acf46 --- /dev/null +++ b/zombienet_tests/async_backing/001-async-backing-compatibility.zndsl @@ -0,0 +1,23 @@ +Description: Async Backing Compatibility Test +Network: ./001-async-backing-compatibility.toml +Creds: config + +# General +alice: is up +bob: is up + +# Check authority status +alice: reports node_roles is 4 +bob: reports node_roles is 4 + +# Check peers +alice: reports peers count is at least 2 within 20 seconds +bob: reports peers count is at least 2 within 20 seconds + +# Parachain registration +alice: parachain 100 is registered within 225 seconds +bob: parachain 100 is registered within 225 seconds + +# Ensure parachain progress +alice: parachain 100 block height is at least 10 within 250 seconds +bob: parachain 100 block height is at least 10 within 250 seconds diff --git a/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.toml b/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.toml new file mode 100644 index 000000000000..cce8510fccbd --- /dev/null +++ b/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.toml @@ -0,0 +1,49 @@ +[settings] +timeout = 1000 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +default_command = "polkadot" + + [relaychain.default_resources] + limits = { memory = "4G", cpu = "2" } + requests = { memory = "2G", cpu = "1" } + + [[relaychain.nodes]] + name = "alice" + args = [ "-lparachain=debug,runtime=debug"] + + [[relaychain.nodes]] + name = "bob" + args = [ "-lparachain=debug,runtime=debug"] + + [[relaychain.nodes]] + name = "charlie" + image = "{{ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE}}" + args = [ "-lparachain=debug,runtime=debug"] + + [[relaychain.nodes]] + name = "dave" + image = "{{ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE}}" + args = [ "-lparachain=debug,runtime=debug"] + +[[parachains]] +id = 100 +addToGenesis = true + + [parachains.collator] + name = "collator02" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug"] + +[[parachains]] +id = 101 +addToGenesis = true + + [parachains.collator] + name = "collator02" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug"] \ No newline at end of file diff --git a/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.zndsl b/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.zndsl new file mode 100644 index 000000000000..2a4e2f1ded18 --- /dev/null +++ b/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.zndsl @@ -0,0 +1,43 @@ +Description: Async Backing Runtime Upgrade Test +Network: ./002-async-backing-runtime-upgrade.toml +Creds: config + +# General +alice: is up +bob: is up +charlie: is up +dave: is up + +# Check peers +alice: reports peers count is at least 3 within 20 seconds +bob: reports peers count is at least 3 within 20 seconds + +# Parachain registration +alice: parachain 100 is registered within 225 seconds +bob: parachain 100 is registered within 225 seconds +charlie: parachain 100 is registered within 225 seconds +dave: parachain 100 is registered within 225 seconds +alice: parachain 101 is registered within 225 seconds +bob: parachain 101 is registered within 225 seconds +charlie: parachain 101 is registered within 225 seconds +dave: parachain 101 is registered within 225 seconds + +# Ensure parachain progress +alice: parachain 100 block height is at least 10 within 250 seconds +bob: parachain 100 block height is at least 10 within 250 seconds + +# Runtime upgrade (according to previous runtime tests, avg. is 30s) +alice: run ../misc/0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_BIN_URL}}" within 40 seconds +bob: run ../misc/0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_BIN_URL}}" within 40 seconds + +# Bootstrap the runtime upgrade +sleep 30 seconds + +alice: restart after 5 seconds +bob: restart after 5 seconds + +alice: is up within 10 seconds +bob: is up within 10 seconds + +alice: parachain 100 block height is at least 10 within 250 seconds +bob: parachain 101 block height is at least 10 within 250 seconds diff --git a/zombienet_tests/async_backing/003-async-backing-collator-mix.toml b/zombienet_tests/async_backing/003-async-backing-collator-mix.toml new file mode 100644 index 000000000000..4dca4d3d5312 --- /dev/null +++ b/zombienet_tests/async_backing/003-async-backing-collator-mix.toml @@ -0,0 +1,40 @@ +[settings] +timeout = 1000 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +default_command = "polkadot" + + [relaychain.default_resources] + limits = { memory = "4G", cpu = "2" } + requests = { memory = "2G", cpu = "1" } + + [[relaychain.nodes]] + name = "alice" + args = [ "-lparachain=debug"] + + [[relaychain.nodes]] + name = "bob" + image = "{{ZOMBIENET_INTEGRATION_TEST_SECONDARY_IMAGE}}" + args = [ "-lparachain=debug"] + +[[parachains]] +id = 100 + + [[parachains.collators]] + name = "collator01" + image = "docker.io/paritypr/colander:master" + command = "undying-collator" + args = ["-lparachain=debug"] + + [[parachains.collators]] + name = "collator02" + image = "{{COL_IMAGE}}" + command = "undying-collator" + args = ["-lparachain=debug"] + +[types.Header] +number = "u64" +parent_hash = "Hash" +post_state = "Hash" diff --git a/zombienet_tests/async_backing/003-async-backing-collator-mix.zndsl b/zombienet_tests/async_backing/003-async-backing-collator-mix.zndsl new file mode 100644 index 000000000000..7eb14836d7e3 --- /dev/null +++ b/zombienet_tests/async_backing/003-async-backing-collator-mix.zndsl @@ -0,0 +1,21 @@ +Description: Async Backing Collator Mix Test +Network: ./003-async-backing-collator-mix.toml +Creds: config + +# General +alice: is up +bob: is up +charlie: is up +dave: is up + +# Check peers +alice: reports peers count is at least 3 within 20 seconds +bob: reports peers count is at least 3 within 20 seconds + +# Parachain registration +alice: parachain 100 is registered within 225 seconds +bob: parachain 100 is registered within 225 seconds + +# Ensure parachain progress +alice: parachain 100 block height is at least 10 within 250 seconds +bob: parachain 100 block height is at least 10 within 250 seconds diff --git a/zombienet_tests/async_backing/README.md b/zombienet_tests/async_backing/README.md new file mode 100644 index 000000000000..9774ea3c25c9 --- /dev/null +++ b/zombienet_tests/async_backing/README.md @@ -0,0 +1,9 @@ +# async-backing zombienet tests + +This directory contains zombienet tests made explicitly for the async-backing feature branch. + +## coverage + +- Network protocol upgrade deploying both master and async branch (compatibility). +- Runtime ugprade while running both master and async backing branch nodes. +- Async backing test with a mix of collators collating via async backing and sync backing. From 797b32f72138b3ebd1ab12f54a55471db9d0b97d Mon Sep 17 00:00:00 2001 From: Marcin S Date: Wed, 1 Mar 2023 10:44:22 +0100 Subject: [PATCH 38/76] Async backing: impl guide for statement distribution (#6738) Co-authored-by: Bradley Olson <34992650+BradleyOlson64@users.noreply.github.com> Co-authored-by: alexgparity <115470171+alexgparity@users.noreply.github.com> --- roadmap/implementers-guide/src/SUMMARY.md | 1 + .../backing/statement-distribution-legacy.md | 119 ++++ .../node/backing/statement-distribution.md | 561 ++++++++++++++---- 3 files changed, 579 insertions(+), 102 deletions(-) create mode 100644 roadmap/implementers-guide/src/node/backing/statement-distribution-legacy.md diff --git a/roadmap/implementers-guide/src/SUMMARY.md b/roadmap/implementers-guide/src/SUMMARY.md index c504b9ac1923..c6c91aa67cc2 100644 --- a/roadmap/implementers-guide/src/SUMMARY.md +++ b/roadmap/implementers-guide/src/SUMMARY.md @@ -46,6 +46,7 @@ - [Backing Subsystems](node/backing/README.md) - [Candidate Backing](node/backing/candidate-backing.md) - [Statement Distribution](node/backing/statement-distribution.md) + - [Statement Distribution (Legacy)](node/backing/statement-distribution-legacy.md) - [Availability Subsystems](node/availability/README.md) - [Availability Distribution](node/availability/availability-distribution.md) - [Availability Recovery](node/availability/availability-recovery.md) diff --git a/roadmap/implementers-guide/src/node/backing/statement-distribution-legacy.md b/roadmap/implementers-guide/src/node/backing/statement-distribution-legacy.md new file mode 100644 index 000000000000..67dcaf9053a5 --- /dev/null +++ b/roadmap/implementers-guide/src/node/backing/statement-distribution-legacy.md @@ -0,0 +1,119 @@ +# Statement Distribution (Legacy) + +This describes the legacy, backwards-compatible version of the Statement +Distribution subsystem. + +**Note:** All the V1 (legacy) code was extracted out to a `legacy_v1` module of +the `statement-distribution` crate, which doesn't alter any logic. V2 (new +protocol) peers also run `legacy_v1` and communicate with V1 peers using V1 +messages and with V2 peers using V2 messages. Once the runtime upgrade goes +through on all networks, this `legacy_v1` code will no longer be triggered and +will be vestigial and can be removed. + +## Overview + +The Statement Distribution Subsystem is responsible for distributing statements about seconded candidates between validators. + +## Protocol + +`PeerSet`: `Validation` + +Input: + +- `NetworkBridgeUpdate(update)` +- `StatementDistributionMessage` + +Output: + +- `NetworkBridge::SendMessage(PeerId, message)` +- `NetworkBridge::SendRequests(StatementFetchingV1)` +- `NetworkBridge::ReportPeer(PeerId, cost_or_benefit)` + +## Functionality + +Implemented as a gossip protocol. Handles updates to our view and peers' views. Neighbor packets are used to inform peers which chain heads we are interested in data for. + +The Statement Distribution Subsystem is responsible for distributing signed statements that we have generated and for forwarding statements generated by other validators. It also detects a variety of Validator misbehaviors for reporting to [Misbehavior Arbitration](../utility/misbehavior-arbitration.md). During the Backing stage of the inclusion pipeline, Statement Distribution is the main point of contact with peer nodes. On receiving a signed statement from a peer in the same backing group, assuming the peer receipt state machine is in an appropriate state, it sends the Candidate Receipt to the [Candidate Backing subsystem](candidate-backing.md) to handle the validator's statement. On receiving `StatementDistributionMessage::Share` we make sure to send messages to our backing group in addition to random other peers, to ensure a fast backing process and getting all statements quickly for distribution. + +This subsystem tracks equivocating validators and stops accepting information from them. It establishes a data-dependency order: + +- In order to receive a `Seconded` message we have the corresponding chain head in our view +- In order to receive a `Valid` message we must have received the corresponding `Seconded` message. + +And respect this data-dependency order from our peers by respecting their views. This subsystem is responsible for checking message signatures. + +The Statement Distribution subsystem sends statements to peer nodes. + +## Peer Receipt State Machine + +There is a very simple state machine which governs which messages we are willing to receive from peers. Not depicted in the state machine: on initial receipt of any [`SignedFullStatement`](../../types/backing.md#signed-statement-type), validate that the provided signature does in fact sign the included data. Note that each individual parablock candidate gets its own instance of this state machine; it is perfectly legal to receive a `Valid(X)` before a `Seconded(Y)`, as long as a `Seconded(X)` has been received. + +A: Initial State. Receive `SignedFullStatement(Statement::Second)`: extract `Statement`, forward to Candidate Backing, proceed to B. Receive any other `SignedFullStatement` variant: drop it. + +B: Receive any `SignedFullStatement`: check signature and determine whether the statement is new to us. if new, forward to Candidate Backing and circulate to other peers. Receive `OverseerMessage::StopWork`: proceed to C. + +C: Receive any message for this block: drop it. + +For large statements (see below), we also keep track of the total received large +statements per peer and have a hard limit on that number for flood protection. +This is necessary as in the current code we only forward statements once we have +all the data, therefore flood protection for large statement is a bit more +subtle. This will become an obsolete problem once [off chain code +upgrades](https://github.com/paritytech/polkadot/issues/2979) are implemented. + +## Peer Knowledge Tracking + +The peer receipt state machine implies that for parsimony of network resources, we should model the knowledge of our peers, and help them out. For example, let's consider a case with peers A, B, and C, validators X and Y, and candidate M. A sends us a `Statement::Second(M)` signed by X. We've double-checked it, and it's valid. While we're checking it, we receive a copy of X's `Statement::Second(M)` from `B`, along with a `Statement::Valid(M)` signed by Y. + +Our response to A is just the `Statement::Valid(M)` signed by Y. However, we haven't heard anything about this from C. Therefore, we send it everything we have: first a copy of X's `Statement::Second`, then Y's `Statement::Valid`. + +This system implies a certain level of duplication of messages--we received X's `Statement::Second` from both our peers, and C may experience the same--but it minimizes the degree to which messages are simply dropped. + +And respect this data-dependency order from our peers. This subsystem is responsible for checking message signatures. + +No jobs. We follow view changes from the [`NetworkBridge`](../utility/network-bridge.md), which in turn is updated by the overseer. + +## Equivocations and Flood Protection + +An equivocation is a double-vote by a validator. The [Candidate Backing](candidate-backing.md) Subsystem is better-suited than this one to detect equivocations as it adds votes to quorum trackers. + +At this level, we are primarily concerned about flood-protection, and to some extent, detecting equivocations is a part of that. In particular, we are interested in detecting equivocations of `Seconded` statements. Since every other statement is dependent on `Seconded` statements, ensuring that we only ever hold a bounded number of `Seconded` statements is sufficient for flood-protection. + +The simple approach is to say that we only receive up to two `Seconded` statements per validator per chain head. However, the marginal cost of equivocation, conditional on having already equivocated, is close to 0, since a single double-vote offence is counted as all double-vote offences for a particular chain-head. Even if it were not, there is some amount of equivocations that can be done such that the marginal cost of issuing further equivocations is close to 0, as there would be an amount of equivocations necessary to be completely and totally obliterated by the slashing algorithm. We fear the validator with nothing left to lose. + +With that in mind, this simple approach has a caveat worth digging deeper into. + +First: We may be aware of two equivocated `Seconded` statements issued by a validator. A totally honest peer of ours can also be aware of one or two different `Seconded` statements issued by the same validator. And yet another peer may be aware of one or two _more_ `Seconded` statements. And so on. This interacts badly with pre-emptive sending logic. Upon sending a `Seconded` statement to a peer, we will want to pre-emptively follow up with all statements relative to that candidate. Waiting for acknowledgment introduces latency at every hop, so that is best avoided. What can happen is that upon receipt of the `Seconded` statement, the peer will discard it as it falls beyond the bound of 2 that it is allowed to store. It cannot store anything in memory about discarded candidates as that would introduce a DoS vector. Then, the peer would receive from us all of the statements pertaining to that candidate, which, from its perspective, would be undesired - they are data-dependent on the `Seconded` statement we sent them, but they have erased all record of that from their memory. Upon receiving a potential flood of undesired statements, this 100% honest peer may choose to disconnect from us. In this way, an adversary may be able to partition the network with careful distribution of equivocated `Seconded` statements. + +The fix is to track, per-peer, the hashes of up to 4 candidates per validator (per relay-parent) that the peer is aware of. It is 4 because we may send them 2 and they may send us 2 different ones. We track the data that they are aware of as the union of things we have sent them and things they have sent us. If we receive a 1st or 2nd `Seconded` statement from a peer, we note it in the peer's known candidates even if we do disregard the data locally. And then, upon receipt of any data dependent on that statement, we do not reduce that peer's standing in our eyes, as the data was not undesired. + +There is another caveat to the fix: we don't want to allow the peer to flood us because it has set things up in a way that it knows we will drop all of its traffic. +We also track how many statements we have received per peer, per candidate, and per chain-head. This is any statement concerning a particular candidate: `Seconded`, `Valid`, or `Invalid`. If we ever receive a statement from a peer which would push any of these counters beyond twice the amount of validators at the chain-head, we begin to lower the peer's standing and eventually disconnect. This bound is a massive overestimate and could be reduced to twice the number of validators in the corresponding validator group. It is worth noting that the goal at the time of writing is to ensure any finite bound on the amount of stored data, as any equivocation results in a large slash. + +## Large statements + +Seconded statements can become quite large on parachain runtime upgrades for +example. For this reason, there exists a `LargeStatement` constructor for the +`StatementDistributionMessage` wire message, which only contains light metadata +of a statement. The actual candidate data is not included. This message type is +used whenever a message is deemed large. The receiver of such a message needs to +request the actual payload via request/response by means of a +`StatementFetchingV1` request. + +This is necessary as distribution of a large payload (mega bytes) via gossip +would make the network collapse and timely distribution of statements would no +longer be possible. By using request/response it is ensured that each peer only +transferes large data once. We only take good care to detect an overloaded +peer early and immediately move on to a different peer for fetching the data. +This mechanism should result in a good load distribution and therefore a rather +optimal distribution path. + +With these optimizations, distribution of payloads in the size of up to 3 to 4 +MB should work with Kusama validator specifications. For scaling up even more, +runtime upgrades and message passing should be done off chain at some point. + +Flood protection considerations: For making DoS attacks slightly harder on this +subsystem, nodes will only respond to large statement requests, when they +previously notified that peer via gossip about that statement. So, it is not +possible to DoS nodes at scale, by requesting candidate data over and over +again. diff --git a/roadmap/implementers-guide/src/node/backing/statement-distribution.md b/roadmap/implementers-guide/src/node/backing/statement-distribution.md index 39ea1c630d31..dec351e64e6b 100644 --- a/roadmap/implementers-guide/src/node/backing/statement-distribution.md +++ b/roadmap/implementers-guide/src/node/backing/statement-distribution.md @@ -1,107 +1,464 @@ # Statement Distribution -The Statement Distribution Subsystem is responsible for distributing statements about seconded candidates between validators. +## Overview + +**Goal:** every well-connected node is aware of every next potential parachain +block. + +Validators can either: + +- receive parachain block from collator, check block, and gossip statement. +- receive statements from other validators, check the parachain block if it + originated within their own group, gossip forward statement if valid. + +Validators must have statements, candidates, and persisted validation from all +other validators. This is because we need to store statements from validators +who've checked the candidate on the relay chain, so we know who to hold +accountable in case of disputes. Any validator can be selected as the next +relay-chain block author, and this is not revealed in advance for security +reasons. As a result, all validators must have a up to date view of all possible +parachain candidates + backing statements that could be placed on-chain in the +next block. + +[This blog +post](https://polkadot.network/blog/polkadot-v1-0-sharding-and-economic-security) +puts it another way: "Validators who aren't assigned to the parachain still +listen for the attestations [statements] because whichever validator ends up +being the author of the relay-chain block needs to bundle up attested parachain +blocks for several parachains and place them into the relay-chain block." + +Backing-group quorum (that is, enough backing group votes) must be reached +before the block author will consider the candidate. Therefore, validators need +to consider _all_ seconded candidates within their own group, because that's +what they're assigned to work on. Validators only need to consider _backable_ +candidates from other groups. This informs the design of the statement +distribution protocol to have separate phases for in-group and out-group +distribution, respectively called "cluster" and "grid" mode (see below). + +### With Async Backing + +Asynchronous backing changes the runtime to accept parachain candidates from a +certain allowed range of historic relay-parents. These candidates must be backed +by the group assigned to the parachain as-of their corresponding relay parents. ## Protocol -`PeerSet`: `Validation` - -Input: - -- `NetworkBridgeUpdate(update)` -- `StatementDistributionMessage` - -Output: - -- `NetworkBridge::SendMessage(PeerId, message)` -- `NetworkBridge::SendRequests(StatementFetchingV1)` -- `NetworkBridge::ReportPeer(PeerId, cost_or_benefit)` - -## Functionality - -Implemented as a gossip protocol. Handle updates to our view and peers' views. Neighbor packets are used to inform peers which chain heads we are interested in data for. - -It is responsible for distributing signed statements that we have generated and forwarding them, and for detecting a variety of Validator misbehaviors for reporting to [Misbehavior Arbitration](../utility/misbehavior-arbitration.md). During the Backing stage of the inclusion pipeline, it's the main point of contact with peer nodes. On receiving a signed statement from a peer in the same backing group, assuming the peer receipt state machine is in an appropriate state, it sends the Candidate Receipt to the [Candidate Backing subsystem](candidate-backing.md) to handle the validator's statement. On receiving `StatementDistributionMessage::Share` we make sure to send messages to our backing group in addition to random other peers, to ensure a fast backing process and getting all statements quickly for distribution. - -Track equivocating validators and stop accepting information from them. Establish a data-dependency order: - -- In order to receive a `Seconded` message we have the corresponding chain head in our view -- In order to receive an `Valid` message we must have received the corresponding `Seconded` message. - -And respect this data-dependency order from our peers by respecting their views. This subsystem is responsible for checking message signatures. - -The Statement Distribution subsystem sends statements to peer nodes. - -## Peer Receipt State Machine - -There is a very simple state machine which governs which messages we are willing to receive from peers. Not depicted in the state machine: on initial receipt of any [`SignedFullStatement`](../../types/backing.md#signed-statement-type), validate that the provided signature does in fact sign the included data. Note that each individual parablock candidate gets its own instance of this state machine; it is perfectly legal to receive a `Valid(X)` before a `Seconded(Y)`, as long as a `Seconded(X)` has been received. - -A: Initial State. Receive `SignedFullStatement(Statement::Second)`: extract `Statement`, forward to Candidate Backing, proceed to B. Receive any other `SignedFullStatement` variant: drop it. - -B: Receive any `SignedFullStatement`: check signature and determine whether the statement is new to us. if new, forward to Candidate Backing and circulate to other peers. Receive `OverseerMessage::StopWork`: proceed to C. - -C: Receive any message for this block: drop it. - -For large statements (see below), we also keep track of the total received large -statements per peer and have a hard limit on that number for flood protection. -This is necessary as in the current code we only forward statements once we have -all the data, therefore flood protection for large statement is a bit more -subtle. This will become an obsolete problem once [off chain code -upgrades](https://github.com/paritytech/polkadot/issues/2979) are implemented. - -## Peer Knowledge Tracking - -The peer receipt state machine implies that for parsimony of network resources, we should model the knowledge of our peers, and help them out. For example, let's consider a case with peers A, B, and C, validators X and Y, and candidate M. A sends us a `Statement::Second(M)` signed by X. We've double-checked it, and it's valid. While we're checking it, we receive a copy of X's `Statement::Second(M)` from `B`, along with a `Statement::Valid(M)` signed by Y. - -Our response to A is just the `Statement::Valid(M)` signed by Y. However, we haven't heard anything about this from C. Therefore, we send it everything we have: first a copy of X's `Statement::Second`, then Y's `Statement::Valid`. - -This system implies a certain level of duplication of messages--we received X's `Statement::Second` from both our peers, and C may experience the same--but it minimizes the degree to which messages are simply dropped. - -And respect this data-dependency order from our peers. This subsystem is responsible for checking message signatures. - -No jobs. We follow view changes from the [`NetworkBridge`](../utility/network-bridge.md), which in turn is updated by the overseer. - -## Equivocations and Flood Protection - -An equivocation is a double-vote by a validator. The [Candidate Backing](candidate-backing.md) Subsystem is better-suited than this one to detect equivocations as it adds votes to quorum trackers. - -At this level, we are primarily concerned about flood-protection, and to some extent, detecting equivocations is a part of that. In particular, we are interested in detecting equivocations of `Seconded` statements. Since every other statement is dependent on `Seconded` statements, ensuring that we only ever hold a bounded number of `Seconded` statements is sufficient for flood-protection. - -The simple approach is to say that we only receive up to two `Seconded` statements per validator per chain head. However, the marginal cost of equivocation, conditional on having already equivocated, is close to 0, since a single double-vote offence is counted as all double-vote offences for a particular chain-head. Even if it were not, there is some amount of equivocations that can be done such that the marginal cost of issuing further equivocations is close to 0, as there would be an amount of equivocations necessary to be completely and totally obliterated by the slashing algorithm. We fear the validator with nothing left to lose. - -With that in mind, this simple approach has a caveat worth digging deeper into. - -First: We may be aware of two equivocated `Seconded` statements issued by a validator. A totally honest peer of ours can also be aware of one or two different `Seconded` statements issued by the same validator. And yet another peer may be aware of one or two _more_ `Seconded` statements. And so on. This interacts badly with pre-emptive sending logic. Upon sending a `Seconded` statement to a peer, we will want to pre-emptively follow up with all statements relative to that candidate. Waiting for acknowledgment introduces latency at every hop, so that is best avoided. What can happen is that upon receipt of the `Seconded` statement, the peer will discard it as it falls beyond the bound of 2 that it is allowed to store. It cannot store anything in memory about discarded candidates as that would introduce a DoS vector. Then, the peer would receive from us all of the statements pertaining to that candidate, which, from its perspective, would be undesired - they are data-dependent on the `Seconded` statement we sent them, but they have erased all record of that from their memory. Upon receiving a potential flood of undesired statements, this 100% honest peer may choose to disconnect from us. In this way, an adversary may be able to partition the network with careful distribution of equivocated `Seconded` statements. - -The fix is to track, per-peer, the hashes of up to 4 candidates per validator (per relay-parent) that the peer is aware of. It is 4 because we may send them 2 and they may send us 2 different ones. We track the data that they are aware of as the union of things we have sent them and things they have sent us. If we receive a 1st or 2nd `Seconded` statement from a peer, we note it in the peer's known candidates even if we do disregard the data locally. And then, upon receipt of any data dependent on that statement, we do not reduce that peer's standing in our eyes, as the data was not undesired. - -There is another caveat to the fix: we don't want to allow the peer to flood us because it has set things up in a way that it knows we will drop all of its traffic. -We also track how many statements we have received per peer, per candidate, and per chain-head. This is any statement concerning a particular candidate: `Seconded`, `Valid`, or `Invalid`. If we ever receive a statement from a peer which would push any of these counters beyond twice the amount of validators at the chain-head, we begin to lower the peer's standing and eventually disconnect. This bound is a massive overestimate and could be reduced to twice the number of validators in the corresponding validator group. It is worth noting that the goal at the time of writing is to ensure any finite bound on the amount of stored data, as any equivocation results in a large slash. - -## Large statements - -Seconded statements can become quite large on parachain runtime upgrades for -example. For this reason, there exists a `LargeStatement` constructor for the -`StatementDistributionMessage` wire message, which only contains light metadata -of a statement. The actual candidate data is not included. This message type is -used whenever a message is deemed large. The receiver of such a message needs to -request the actual payload via request/response by means of a -`StatementFetchingV1` request. - -This is necessary as distribution of a large payload (mega bytes) via gossip -would make the network collapse and timely distribution of statements would no -longer be possible. By using request/response it is ensured that each peer only -transferes large data once. We only take good care to detect an overloaded -peer early and immediately move on to a different peer for fetching the data. -This mechanism should result in a good load distribution and therefore a rather -optimal distribution path. - -With these optimizations, distribution of payloads in the size of up to 3 to 4 -MB should work with Kusama validator specifications. For scaling up even more, -runtime upgrades and message passing should be done off chain at some point. - -Flood protection considerations: For making DoS attacks slightly harder on this -subsystem, nodes will only respond to large statement requests, when they -previously notified that peer via gossip about that statement. So, it is not -possible to DoS nodes at scale, by requesting candidate data over and over -again. +To address the concern of dealing with large numbers of spam candidates or +statements, the overall design approach is to combine a focused "clustering" +protocol for legitimate fresh candidates with a broad-distribution "grid" +protocol to quickly get backed candidates into the hands of many validators. +Validators do not eagerly send each other heavy `CommittedCandidateReceipt`, +but instead request these lazily through request/response protocols. + +A high-level description of the protocol follows: + +### Messages + +Nodes can send each other a few kinds of messages: `Statement`, +`BackedCandidateManifest`, `BackedCandidateAcknowledgement`. + +- `Statement` messages contain only a signed compact statement, without full + candidate info. +- `BackedCandidateManifest` messages advertise a description of a backed + candidate and stored statements. +- `BackedCandidateAcknowledgement` messages acknowledge that a backed candidate + is fully known. + +### Request/response protocol + +Nodes can request the full `CommittedCandidateReceipt` and +`PersistedValidationData`, along with statements, over a request/response +protocol. This is the `AttestedCandidateRequest`; the response is +`AttestedCandidateResponse`. + +### Importability and the Hypothetical Frontier + +The **prospective parachains** subsystem maintains prospective "fragment trees" +which can be used to determine whether a particular parachain candidate could +possibly be included in the future. Candidates which either are within a +fragment tree or _would be_ part of a fragment tree if accepted are said to be +in the "hypothetical frontier". + +The **statement-distribution** subsystem keeps track of all candidates, and +updates its knowledge of the hypothetical frontier based on events such as new +relay parents, new confirmed candidates, and newly backed candidates. + +We only consider statements as "importable" when the corresponding candidate is +part of the hypothetical frontier, and only send "importable" statements to the +backing subsystem itself. + +### Cluster Mode + +- Validator nodes are partitioned into groups (with some exceptions), and + validators within a group at a relay-parent can send each other `Statement` + messages for any candidates within that group and based on that relay-parent. +- This is referred to as the "cluster" mode. + - Right now these are the same as backing groups, though "cluster" + specifically refers to the set of nodes communicating with each other in the + first phase of distribution. +- `Seconded` statements must be sent before `Valid` statements. +- `Seconded` statements may only be sent to other members of the group when the + candidate is fully known by the local validator. + - "Fully known" means the validator has the full `CommittedCandidateReceipt` + and `PersistedValidationData`, which it receives on request from other + validators or from a collator. + - The reason for this is that sending a statement (which is always a + `CompactStatement` carrying nothing but a hash and signature) to the + cluster, is also a signal that the sending node is available to request the + candidate from. + - This makes the protocol easier to reason about, while also reducing network + messages about candidates that don't really exist. +- Validators in a cluster receiving messages about unknown candidates request + the candidate (and statements) from other cluster members which have it. +- Spam considerations + - The maximum depth of candidates allowed in asynchronous backing determines + the maximum amount of `Seconded` statements originating from a validator V + which each validator in a cluster may send to others. This bounds the number + of candidates. + - There is a small number of validators in each group, which further limits + the amount of candidates. +- We accept candidates which don't fit in the fragment trees of any relay + parents. + - "Accept" means "attempt to request and store in memory until useful or + expired". + - We listen to prospective parachains subsystem to learn of new additions to + the fragment trees. + - Use this to attempt to import the candidate later. + +### Grid Mode + +- Every consensus session provides randomness and a fixed validator set, which + is used to build a redundant grid topology. + - It's redundant in the sense that there are 2 paths from every node to every + other node. See "Grid Topology" section for more details. +- This grid topology is used to create a sending path from each validator group + to every validator. +- When a node observes a candidate as backed, it sends a + `BackedCandidateManifest` to their "receiving" nodes. +- If receiving nodes don't yet know the candidate, they request it. +- Once they know the candidate, they respond with a + `BackedCandidateAcknowledgement`. +- Once two nodes perform a manifest/acknowledgement exchange, they can send + `Statement` messages directly to each other for any new statements they might + need. + - This limits the amount of statements we'd have to deal with w.r.t. + candidates that don't really exist. See "Manifest Exchange" section. +- There are limitations on the number of candidates that can be advertised by + each peer, similar to those in the cluster. Validators do not request + candidates which exceed these limitations. +- Validators request candidates as soon as they are advertised, but do not + import the statements until the candidate is part of the hypothetical + frontier, and do not re-advertise or acknowledge until the candidate is + considered both backable and part of the hypothetical frontier. +- Note that requesting is not an implicit acknowledgement, and an explicit + acknowledgement must be sent upon receipt. + +## Statement distribution messages + +### Input + +- `ActiveLeavesUpdate` + - Notification of a change in the set of active leaves. +- `StatementDistributionMessage::Share` + - Notification of a locally-originating statement. That is, this statement + comes from our node and should be distributed to other nodes. + - Handled by `share_local_statement` +- `StatementDistributionMessage::Backed` + - Notification of a candidate being backed (received enough validity votes + from the backing group). + - Handled by `handle_backed_candidate_message` +- `StatementDistributionMessage::NetworkBridgeUpdate` + - Handled by `handle_network_update` + - v1 compatibility + - `Statement` + - Notification of a signed statement. + - Handled by `handle_incoming_statement` + - `BackedCandidateManifest` + - Notification of a backed candidate being known by the sending node. + - For the candidate being requested by the receiving node if needed. + - Announcement + - Handled by `handle_incoming_manifest` + - `BackedCandidateKnown` + - Notification of a backed candidate being known by the sending node. + - For informing a receiving node which already has the candidate. + - Acknowledgement. + - Handled by `handle_incoming_acknowledgement` + +### Output + +- `NetworkBridgeTxMessage::SendValidationMessages` + - Sends a peer all pending messages / acknowledgements / statements for a + relay parent, either through the cluster or the grid. +- `NetworkBridgeTxMessage::SendValidationMessage` + - Circulates a compact statement to all peers who need it, either through the + cluster or the grid. +- `NetworkBridgeTxMessage::ReportPeer` + - Reports a peer (either good or bad). +- `CandidateBackingMessage::Statement` + - Note a validator's statement about a particular candidate. +- `ProspectiveParachainsMessage::GetHypotheticalFrontier` + - Gets the hypothetical frontier membership of candidates under active leaves' + fragment trees. +- `NetworkBridgeTxMessage::SendRequests` + - Sends requests, initiating request/response protocol. + +## Request/Response + +We also have a request/response protocol because validators do not eagerly send +each other heavy `CommittedCandidateReceipt`, but instead need to request these +lazily. + +### Protocol + +1. Requesting Validator + + - Requests are queued up with `RequestManager::get_or_insert`. + - Done as needed, when handling incoming manifests/statements. + - `RequestManager::dispatch_requests` sends any queued-up requests. + - Calls `RequestManager::next_request` to completion. + - Creates the `OutgoingRequest`, saves the receiver in + `RequestManager::pending_responses`. + - Does nothing if we have more responses pending than the limit of parallel + requests. + +2. Peer + + - Requests come in on a peer on the `IncomingRequestReceiver`. + - Runs in a background responder task which feeds requests to `answer_request` + through `MuxedMessage`. + - This responder task has a limit on the number of parallel requests. + - `answer_request` on the peer takes the request and sends a response. + - Does this using the response sender on the request. + +3. Requesting Validator + + - `receive_response` on the original validator yields a response. + - Response was sent on the request's response sender. + - Uses `RequestManager::await_incoming` to await on pending responses in an + unordered fashion. + - Runs on the `MuxedMessage` receiver. + - `handle_response` handles the response. + +### API + +- `dispatch_requests` + - Dispatches pending requests for candidate data & statements. +- `answer_request` + - Answers an incoming request for a candidate. + - Takes an incoming `AttestedCandidateRequest`. +- `receive_response` + - Wait on the next incoming response. + - If there are no requests pending, this future never resolves. + - Returns `UnhandledResponse` +- `handle_response` + - Handles an incoming response. + - Takes `UnhandledResponse` + +## Manifests + +A manifest is a message about a known backed candidate, along with a description +of the statements backing it. It can be one of two kinds: + +- `Full`: Contains information about the candidate and should be sent to peers + who may not have the candidate yet. +- `Acknowledgement`: Omits information implicit in the candidate, and should be + sent to peers which are guaranteed to have the candidate already. + +### Manifest Exchange + +Manifest exchange is when a receiving node received a `Full` manifest and +replied with an `Acknowledgement`. It indicates that both nodes know the +candidate as valid and backed. This allows the nodes to send `Statement` +messages directly to each other for any new statements. + +Why? This limits the amount of statements we'd have to deal with w.r.t. +candidates that don't really exist. Limiting out-of-group statement distribution +between peers to only candidates that both peers agree are backed and exist +ensures we only have to store statements about real candidates. + +In practice, manifest exchange means that one of three things have happened: + +- They announced, we acknowledged. +- We announced, they acknowledged. +- We announced, they announced. + +Concerning the last case, note that it is possible for two nodes to have each +other in their sending set. Consider: + +``` +1 2 +3 4 +``` + +If validators 2 and 4 are in group B, then there is a path `2->1->3` and +`4->3->1`. Therefore, 1 and 3 might send each other manifests for the same +candidate at the same time, without having seen the other's yet. This also +counts as a manifest exchange, but is only allowed to occur in this way. + +After the exchange is complete, we update pending statements. Pending statements +are those we know locally that the remote node does not. + +#### Alternative Paths Through The Topology + +Nodes should send a `BackedCandidateAcknowledgement(CandidateHash, +StatementFilter)` notification to any peer which has sent a manifest, and the +candidate has been acquired by other means. This keeps alternative paths through +the topology open, which allows nodes to receive additional statements that come +later, but not after the candidate has been posted on-chain. + +This is mostly about the limitation that the runtime has no way for block +authors to post statements that come after the parablock is posted on-chain and +ensure those validators still get rewarded. Technically, we only need enough +statements to back the candidate and the manifest + request will provide that. +But more statements might come shortly afterwards, and we want those to end up +on-chain as well to ensure all validators in the group are rewarded. + +For clarity, here is the full timeline: + +1. candidate seconded +1. backable in cluster +1. distributed along grid +1. latecomers issue statements +1. candidate posted on chain +1. really latecomers issue statements + +## Cluster Module + +The cluster module provides direct distribution of unbacked candidates within a +group. By utilizing this initial phase of propagating only within +clusters/groups, we bound the number of `Seconded` messages per validator per +relay-parent, helping us prevent spam. Validators can try to circumvent this, +but they would only consume a few KB of memory and it is trivially slashable on +chain. + +The cluster module determines whether to accept/reject messages from other +validators in the same group. It keeps track of what we have sent to other +validators in the group, and pending statements. For the full protocol, see +"Protocol". + +## Grid Module + +The grid module provides distribution of backed candidates and late statements +outside the group. For the full protocol, see the "Protocol" section. + +### Grid Topology + +For distributing outside our cluster we use a 2D grid topology. This limits the +amount of peers we send messages to, and handles view updates. + +The basic operation of the grid topology is that: + +- A validator producing a message sends it to its row-neighbors and its + column-neighbors. +- A validator receiving a message originating from one of its row-neighbors + sends it to its column-neighbors. +- A validator receiving a message originating from one of its column-neighbors + sends it to its row-neighbors. + +This grid approach defines 2 unique paths for every validator to reach every +other validator in at most 2 hops, providing redundancy. + +Propagation follows these rules: + +- Each node has a receiving set and a sending set. These are different for each + group. That is, if a node receives a candidate from group A, it checks if it + is allowed to receive from that node for candidates from group A. +- For groups that we are in, receive from nobody and send to our X/Y peers. +- For groups that we are not part of: + - We receive from any validator in the group we share a slice with and send to + the corresponding X/Y slice in the other dimension. + - For any validators we don't share a slice with, we receive from the nodes + which share a slice with them. + +### Example + +For size 11, the matrix would be: + +``` +0 1 2 +3 4 5 +6 7 8 +9 10 +``` + +e.g. for index 10, the neighbors would be 1, 4, 7, 9 -- these are the nodes we +could directly communicate with (e.g. either send to or receive from). + +Now, which of these neighbors can 10 receive from? Recall that the +sending/receiving sets for 10 would be different for different groups. Here are +some hypothetical scenarios: + +- **Scenario 1:** 9 belongs to group A but not 10. Here, 10 can directly receive + candidates from group A from 9. 10 would propagate them to the nodes in {1, 4, + 7} that are not in A. +- **Scenario 2:** 6 is in group A instead of 9, and 7 is not in group A. 10 can + receive from 7 or 9. It would not propagate any further. +- **Scenario 3:** 10 itself is in group A. 10 would not receive candidates from + this group from any other nodes through the grid. It would itself send such + candidates to all its neighbors that are not in A. + +### Seconding Limit + +The seconding limit is a per-validator limit. Before asynchronous backing, we +had a rule that every validator was only allowed to second one candidate per +relay parent. With asynchronous backing, we have a 'maximum depth' which makes +it possible to second multiple candidates per relay parent. The seconding limit +is set to `max depth + 1` to set an upper bound on candidates entering the +system. + +## Candidates Module + +The candidates module provides a tracker for all known candidates in the view, +whether they are confirmed or not, and how peers have advertised the candidates. +What is a confirmed candidate? It is a candidate for which we have the full +receipt and the persisted validation data. This module gets confirmed candidates +from two sources: + +- It can be that a validator fetched a collation directly from the collator and + validated it. +- The first time a validator gets an announcement for an unknown candidate, it + will send a request for the candidate. Upon receiving a response and + validating it (see `UnhandledResponse::validate_response`), it will mark the + candidate as confirmed. + +## Requests Module + +The requests module provides a manager for pending requests for candidate data, +as well as pending responses. See "Request/Response Protocol" for a high-level +description of the flow. See module-docs for full details. + +## Glossary + +- **Acknowledgement:** A notification that is sent to a validator that already + has the candidate, to inform them that the sending node knows the candidate. +- **Announcement:** A notification of a backed candidate being known by the + sending node. Is a full manifest and initiates manifest exchange. +- **Attestation:** See "Statement". +- **Backable vs. Backed:** + - Note that we sometimes use "backed" to refer to candidates that are + "backable", but not yet backed on chain. + - **Backed** should technically mean that the parablock candidate and its + backing statements have been added to a relay chain block. + - **Backable** is when the necessary backing statements have been acquired but + those statements and the parablock candidate haven't been backed in a relay + chain block yet. +- **Fragment tree:** A parachain fragment not referenced by the relay-chain. + It is a tree of prospective parachain blocks. +- **Manifest:** A message about a known backed candidate, along with a + description of the statements backing it. See "Manifests" section. +- **Peer:** Another validator that a validator is connected to. +- **Request/response:** A protocol used to lazily request and receive heavy + candidate data when needed. +- **Reputation:** Tracks reputation of peers. Applies annoyance cost and good + behavior benefits. +- **Statement:** Signed statements that can be made about parachain candidates. + - **Seconded:** Proposal of a parachain candidate. Implicit validity vote. + - **Valid:** States that a parachain candidate is valid. +- **Target:** Target validator to send a statement to. +- **View:** Current knowledge of the chain state. + - **Explicit view** / **immediate view** + - The view a peer has of the relay chain heads and highest finalized block. + - **Implicit view** + - Derived from the immediate view. Composed of active leaves and minimum + relay-parents allowed for candidates of various parachains at those + leaves. From 8c45c2e9413b055e0d15061734aa1a6e641b6258 Mon Sep 17 00:00:00 2001 From: asynchronous rob Date: Fri, 3 Mar 2023 14:20:33 -0700 Subject: [PATCH 39/76] Asynchronous backing statement distribution: Take III (#5999) * add notification types for v2 statement-distribution * improve protocol docs * add empty vstaging module * fmt * add backed candidate packet request types * start putting down structure of new logic * handle activated leaf * some sanity-checking on outbound statements * fmt * update vstaging share to use statements with PVD * tiny refactor, candidate_hash location * import local statements * refactor statement import * first stab at broadcast logic * fmt * fill out some TODOs * start on handling incoming * split off session info into separate map * start in on a knowledge tracker * address some grumbles * format * missed comment * some docs for direct * add note on slashing * amend * simplify 'direct' code * finish up the 'direct' logic * add a bunch of tests for the direct-in-group logic * rename 'direct' to 'cluster', begin a candidate_entry module * distill candidate_entry * start in on a statement-store module * some utilities for the statement store * rewrite 'send_statement_direct' using new tools * filter sending logic on peers which have the relay-parent in their view. * some more logic for handling incoming statements * req/res: BackedCandidatePacket -> AttestedCandidate + tweaks * add a `validated_in_group` bitfield to BackedCandidateInventory * BackedCandidateInventory -> Manifest * start in on requester module * add outgoing request for attested candidate * add a priority mechanism for requester * some request dispatch logic * add seconded mask to tagged-request * amend manifest to hold group index * handle errors and set up scaffold for response validation * validate attested candidate responses * requester -> requests * add some utilities for manipulating requests * begin integrating requester * start grid module * tiny * refactor grid topology to expose more info to subsystems * fix grid_topology test * fix overseer test * implement topology group-based view construction logic * fmt * flesh out grid slightly more * add indexed groups utility * integrate Groups into per-session info * refactor statement store to borrow Groups * implement manifest knowledge utility * add a test for topology setup * don't send to group members * test for conflicting manifests * manifest knowledge tests * fmt * rename field * garbage collection for grid tracker * routines for finding correct/incorrect advertisers * add manifest import logic * tweak naming * more tests for manifest import * add comment * rework candidates into a view-wide tracker * fmt * start writing boilerplate for grid sending * fmt * some more group boilerplate * refactor handling of topology and authority IDs * fmt * send statements directly to grid peers where possible * send to cluster only if statement belongs to cluster * improve handling of cluster statements * handle incoming statements along the grid * API for introduction of candidates into the tree * backing: use new prospective parachains API * fmt prospective parachains changes * fmt statement-dist * fix condition * get ready for tracking importable candidates * prospective parachains: add Cow logic * incomplete and complete hypothetical candidates * remove keep_if_unneeded * fmt * implement more general HypotheticalFrontier * fmt, cleanup * add a by_parent_hash index to candidate tracker * more framework for future code * utilities for getting all hypothetical candidates for frontier * track origin in statement store * fmt * requests should return peer * apply post-confirmation reckoning * flesh out import/announce/circulate logic on new statements * adjust * adjust TODO comment * fix backing tests * update statement-distribution to use new indexedvec * fmt * query hypothetical candidates * implement `note_importable_under` * extract common utility of fragment tree updates * add a helper function for getting statements unknown by backing * import fresh statements to backing * send announcements and acknowledgements over grid * provide freshly importable statements also avoid tracking backed candidates in statement distribution * do not issue requests on newly importable candidates * add TODO for later when confirming candidate * write a routine for handling backed candidate notifications * simplify grid substantially * add some test TODOs * handle confirmed candidates & grid announcements * finish implementing manifest handling, including follow up statements * send follow-up statements when acknowledging freshly backed * fmt * handle incoming acknowledgements * a little DRYing * wire up network messages to handlers * fmt * some skeleton code for peer view update handling * more peer view skeleton stuff * Fix async backing statement distribution tests (#6621) * Fix compile errors in tests * Cargo fmt * Resolve some todos in async backing statement-distribution branch (#6482) * Implement `remove_by_relay_parent` * Extract `minimum_votes` to shared primitives. * Add `can_send_statements_received_with_prejudice` test * Fix test * Update docstrings * Cargo fmt * Fix compile error * Fix compile errors in tests * Cargo fmt * Add module docs; write `test_priority_ordering` (first draft) * Fix `test_priority_ordering` * Move `insert_or_update_priority`: `Drop` -> `set_cluster_priority` * Address review comments * Remove `Entry::get_mut` * fix test compilation * add a TODO for a test * clean up a couple of TODOs * implement sending pending cluster statements * refactor utility function for sending acknowledgement and statements * mostly implement catching peers up via grid * Fix clippy error * alter grid to track all pending statements * fix more TODOs and format * tweak a TODO in requests * some logic for dispatching requests * fmt * skeleton for response receiving * Async backing statement distribution: cluster tests (#6678) * Add `pending_statements_set_when_receiving_fresh_statements` * Add `pending_statements_updated_when_sending_statements` test * fix up * fmt * update TODO * rework seconded mask in requests * change doc * change unhandledresponse not to borrow request manager * only accept responses sufficient to back * finish implementing response handling * extract statement filter to protocol crate * rework requests: use statement filter in network protocol * dispatch cluster requests correctly * rework cluster statement sending * implement request answering * fmt * only send confirmed candidate statement messages on unified relay-parent * Fix Tests In Statement Distribution Branch * Async Backing: Integrate `vstaging` of statement distribution into `lib.rs` (#6715) * Integrate `handle_active_leaves_update` * Integrate `share_local_statement`/`handle_backed_candidate_message` * Start hooking up request/response flow * Finish hooking up request/response flow * Limit number of parallel requests in responder * Fix test compilation errors * Fix missing check for prospective parachains mode * Fix some more compile errors * clean up some review comments * clean up warnings * Async backing statement distribution: grid tests (#6673) * Add `manifest_import_returns_ok_true` test * cargo fmt * Add pending_communication_receiving_manifest_on_confirmed_candidate * Add `senders_can_provide_manifests_in_acknowledgement` test * Add a couple of tests for pending statements * Add `pending_statements_cleared_when_sending` test * Add `pending_statements_respect_remote_knowledge` test * Refactor group creation in tests * Clarify docs * Address some review comments * Make some clarifications * Fix post-merge errors * Clarify test `senders_can_provide_manifests_in_acknowledgement` * Try writing `pending_statements_are_updated_after_manifest_exchange` * Document "seconding limit" and `reject_overflowing_manifests` test * Test that seconding counts are not updated for validators on error * Fix tests * Fix manifest exchange test * Add more tests in `requests.rs` (#6707) This resolves remaining TODOs in this file. * remove outdated inventory terminology * Async backing statement distribution: `Candidates` tests (#6658) * Async Backing: Fix clippy errors in statement distribution branch (#6720) * Integrate `handle_active_leaves_update` * Integrate `share_local_statement`/`handle_backed_candidate_message` * Start hooking up request/response flow * Finish hooking up request/response flow * Limit number of parallel requests in responder * Fix test compilation errors * Fix missing check for prospective parachains mode * Fix some more compile errors * Async Backing: Fix clippy errors in statement distribution branch * Fix some more clippy lints * add tests module * fix warnings in existing tests * create basic test harness * create a test state struct * fmt * create empty cluster & grid modules for tests * some TODOs for cluster test suite * describe test-suite for grid logic * describe request test suite * fix seconding-limit bug * Remove extraneous `pub` This somehow made it into my clippy PR. * Fix some test compile warnings * Remove some unneeded `allow`s * adapt some new test helpers from Marcin * add helper for activating a gossip topology * add utility for signing statements * helpers for connecting/disconnecting peers * round out network utilities * fmt * fix bug in initializing validator-meta * fix compilation * implement first cluster test * TODOs for incoming request tests * Remove unneeded `make_committed_candidate` helper * fmt * some more tests for cluster * add a TODO about grid senders * integrate inbound req/res into test harness * polish off initial cluster test suite * keep introduce candidate request * fix tests after introduce candidate request * fmt * Add grid protocol to module docs * Fix comments * Test `backed_in_path_only: true` * Update node/network/protocol/src/lib.rs Co-authored-by: Chris Sosnin <48099298+slumber@users.noreply.github.com> * Update node/network/protocol/src/request_response/mod.rs Co-authored-by: Chris Sosnin <48099298+slumber@users.noreply.github.com> * Mark receiver with `vstaging` * validate grid senders based on manifest kind * fix mask_seconded/valid * fix unwanted-mask check * fix build * resolve todo on leaf mode * Unify protocol naming to vstaging * fmt, fix grid test after topology change * typo Co-authored-by: Chris Sosnin <48099298+slumber@users.noreply.github.com> * address review * adjust comment, make easier to understand * Fix typo --------- Co-authored-by: Marcin S Co-authored-by: Marcin S Co-authored-by: Chris Sosnin <48099298+slumber@users.noreply.github.com> Co-authored-by: Chris Sosnin --- Cargo.lock | 16 +- node/core/backing/src/lib.rs | 33 +- .../src/tests/prospective_parachains.rs | 170 +- .../src/fragment_tree.rs | 3 - node/core/prospective-parachains/src/lib.rs | 66 +- node/core/prospective-parachains/src/tests.rs | 291 +- node/network/protocol/Cargo.toml | 1 + node/network/protocol/src/lib.rs | 147 +- .../protocol/src/request_response/mod.rs | 86 +- .../protocol/src/request_response/outgoing.rs | 4 + .../protocol/src/request_response/vstaging.rs | 42 +- .../network/statement-distribution/Cargo.toml | 5 +- .../statement-distribution/src/error.rs | 25 +- .../src/legacy_v1/mod.rs | 12 +- .../src/legacy_v1/responder.rs | 4 +- .../src/legacy_v1/tests.rs | 65 + .../network/statement-distribution/src/lib.rs | 216 +- .../src/vstaging/candidates.rs | 1297 ++++++++ .../src/vstaging/cluster.rs | 1203 ++++++++ .../src/vstaging/grid.rs | 2248 ++++++++++++++ .../src/vstaging/groups.rs | 70 + .../src/vstaging/mod.rs | 2658 +++++++++++++++++ .../src/vstaging/requests.rs | 1165 ++++++++ .../src/vstaging/statement_store.rs | 283 ++ .../src/vstaging/tests/cluster.rs | 1216 ++++++++ .../src/vstaging/tests/grid.rs | 41 + .../src/vstaging/tests/mod.rs | 503 ++++ .../src/vstaging/tests/requests.rs | 27 + node/overseer/src/lib.rs | 2 + node/primitives/src/lib.rs | 17 +- node/service/src/lib.rs | 4 + node/service/src/overseer.rs | 5 + node/subsystem-types/src/messages.rs | 32 +- node/subsystem-util/src/lib.rs | 6 +- primitives/src/v2/mod.rs | 20 +- primitives/test-helpers/src/lib.rs | 46 +- runtime/parachains/src/inclusion/mod.rs | 3 +- 37 files changed, 11682 insertions(+), 350 deletions(-) create mode 100644 node/network/statement-distribution/src/vstaging/candidates.rs create mode 100644 node/network/statement-distribution/src/vstaging/cluster.rs create mode 100644 node/network/statement-distribution/src/vstaging/grid.rs create mode 100644 node/network/statement-distribution/src/vstaging/groups.rs create mode 100644 node/network/statement-distribution/src/vstaging/mod.rs create mode 100644 node/network/statement-distribution/src/vstaging/requests.rs create mode 100644 node/network/statement-distribution/src/vstaging/statement_store.rs create mode 100644 node/network/statement-distribution/src/vstaging/tests/cluster.rs create mode 100644 node/network/statement-distribution/src/vstaging/tests/grid.rs create mode 100644 node/network/statement-distribution/src/vstaging/tests/mod.rs create mode 100644 node/network/statement-distribution/src/vstaging/tests/requests.rs diff --git a/Cargo.lock b/Cargo.lock index 741abb459a06..bfc60401eebe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7197,6 +7197,7 @@ name = "polkadot-node-network-protocol" version = "0.9.37" dependencies = [ "async-trait", + "bitvec 1.0.1", "derive_more", "fatality", "futures", @@ -7801,6 +7802,7 @@ version = "0.9.37" dependencies = [ "arrayvec 0.5.2", "assert_matches", + "bitvec 1.0.1", "fatality", "futures", "futures-timer", @@ -7810,9 +7812,11 @@ dependencies = [ "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", + "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "rand_chacha 0.3.1", "sc-keystore", "sc-network", "sp-application-crypto", @@ -8126,14 +8130,14 @@ dependencies = [ [[package]] name = "pretty_assertions" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c89f989ac94207d048d92db058e4f6ec7342b0971fc58d1271ca148b799b3563" +checksum = "a25e9bcb20aa780fd0bb16b72403a9064d6b3f22f026946029acb941a50af755" dependencies = [ - "ansi_term", "ctor", "diff", "output_vt100", + "yansi", ] [[package]] @@ -13951,6 +13955,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "yansi" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" + [[package]] name = "yasna" version = "0.5.1" diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index a640acf6d62c..157181a9d940 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -80,16 +80,16 @@ use futures::{ use error::{Error, FatalResult}; use polkadot_node_primitives::{ - AvailableData, InvalidCandidate, PoV, SignedFullStatementWithPVD, StatementWithPVD, - ValidationResult, BACKING_EXECUTION_TIMEOUT, + minimum_votes, AvailableData, InvalidCandidate, PoV, SignedFullStatementWithPVD, + StatementWithPVD, ValidationResult, BACKING_EXECUTION_TIMEOUT, }; use polkadot_node_subsystem::{ messages::{ AvailabilityDistributionMessage, AvailabilityStoreMessage, CanSecondRequest, CandidateBackingMessage, CandidateValidationMessage, CollatorProtocolMessage, - HypotheticalCandidate, HypotheticalFrontierRequest, ProspectiveParachainsMessage, - ProvisionableData, ProvisionerMessage, RuntimeApiMessage, RuntimeApiRequest, - StatementDistributionMessage, + HypotheticalCandidate, HypotheticalFrontierRequest, IntroduceCandidateRequest, + ProspectiveParachainsMessage, ProvisionableData, ProvisionerMessage, RuntimeApiMessage, + RuntimeApiRequest, StatementDistributionMessage, }, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, }; @@ -374,13 +374,6 @@ struct AttestingData { backing: Vec, } -/// How many votes we need to consider a candidate backed. -/// -/// WARNING: This has to be kept in sync with the runtime check in the inclusion module. -fn minimum_votes(n_validators: usize) -> usize { - std::cmp::min(2, n_validators) -} - #[derive(Default)] struct TableContext { validator: Option, @@ -1514,10 +1507,12 @@ async fn import_statement( if !per_candidate.contains_key(&candidate_hash) { if rp_state.prospective_parachains_mode.is_enabled() { let (tx, rx) = oneshot::channel(); - ctx.send_message(ProspectiveParachainsMessage::CandidateSeconded( - candidate.descriptor().para_id, - candidate.clone(), - pvd.clone(), + ctx.send_message(ProspectiveParachainsMessage::IntroduceCandidate( + IntroduceCandidateRequest { + candidate_para: candidate.descriptor().para_id, + candidate_receipt: candidate.clone(), + persisted_validation_data: pvd.clone(), + }, tx, )) .await; @@ -1536,6 +1531,12 @@ async fn import_statement( return Err(Error::RejectedByProspectiveParachains) }, } + + ctx.send_message(ProspectiveParachainsMessage::CandidateSeconded( + candidate.descriptor().para_id, + candidate_hash, + )) + .await; } // Only save the candidate if it was approved by prospective parachains. diff --git a/node/core/backing/src/tests/prospective_parachains.rs b/node/core/backing/src/tests/prospective_parachains.rs index c29a3382510e..fe935c2746e5 100644 --- a/node/core/backing/src/tests/prospective_parachains.rs +++ b/node/core/backing/src/tests/prospective_parachains.rs @@ -420,18 +420,27 @@ fn seconding_sanity_check_allowed() { assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded( - candidate_para, - candidate_receipt, - _pvd, + ProspectiveParachainsMessage::IntroduceCandidate( + req, tx, ), - ) if candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => { + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data => { // Any non-empty response will do. tx.send(vec![(leaf_a_hash, vec![0, 1, 2, 3])]).unwrap(); } ); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains(ProspectiveParachainsMessage::CandidateSeconded( + _, + _ + )) + ); + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -557,18 +566,27 @@ fn seconding_sanity_check_disallowed() { assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded( - candidate_para, - candidate_receipt, - _pvd, + ProspectiveParachainsMessage::IntroduceCandidate( + req, tx, ), - ) if candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => { + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data => { // Any non-empty response will do. tx.send(vec![(leaf_a_hash, vec![0, 2, 3])]).unwrap(); } ); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains(ProspectiveParachainsMessage::CandidateSeconded( + _, + _ + )) + ); + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -757,13 +775,14 @@ fn prospective_parachains_reject_candidate() { assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded( - candidate_para, - candidate_receipt, - _pvd, + ProspectiveParachainsMessage::IntroduceCandidate( + req, tx, ), - ) if candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => { + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data => { // Reject it. tx.send(Vec::new()).unwrap(); } @@ -808,18 +827,27 @@ fn prospective_parachains_reject_candidate() { assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded( - candidate_para, - candidate_receipt, - _pvd, + ProspectiveParachainsMessage::IntroduceCandidate( + req, tx, ), - ) if candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => { + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data => { // Any non-empty response will do. tx.send(vec![(leaf_a_hash, vec![0, 2, 3])]).unwrap(); } ); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains(ProspectiveParachainsMessage::CandidateSeconded( + _, + _ + )) + ); + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( @@ -938,19 +966,28 @@ fn second_multiple_candidates_per_relay_parent() { .await; // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceCandidate( + req, + tx, + ), + ) if + &req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data + => { + // Any non-empty response will do. + tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); + } + ); + assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded( - candidate_para, - candidate_receipt, - _pvd, - tx, - ), - ) if &candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => { - // Any non-empty response will do. - tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); - } + ProspectiveParachainsMessage::CandidateSeconded(_, _) + ) ); assert_matches!( @@ -1072,18 +1109,27 @@ fn backing_works() { assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded( - candidate_para, - candidate_receipt, - _pvd, + ProspectiveParachainsMessage::IntroduceCandidate( + req, tx, ), - ) if candidate_receipt == candidate_a && candidate_para == para_id && pvd == _pvd => { + ) if + req.candidate_receipt == candidate_a + && req.candidate_para == para_id + && pvd == req.persisted_validation_data => { // Any non-empty response will do. tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); } ); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains(ProspectiveParachainsMessage::CandidateSeconded( + _, + _ + )) + ); + assert_validate_seconded_candidate( &mut virtual_overseer, candidate_a.descriptor().relay_parent, @@ -1276,10 +1322,13 @@ fn concurrent_dependent_candidates() { // Order is not guaranteed since we have 2 statements being handled concurrently. match msg { AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded(.., tx), + ProspectiveParachainsMessage::IntroduceCandidate(_, tx), ) => { tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); }, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::CandidateSeconded(_, _), + ) => {}, AllMessages::RuntimeApi(RuntimeApiMessage::Request( _, RuntimeApiRequest::ValidationCodeByHash(_, tx), @@ -1472,19 +1521,28 @@ fn seconding_sanity_check_occupy_same_depth() { .await; // Prospective parachains are notified. + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::IntroduceCandidate( + req, + tx, + ), + ) if + &req.candidate_receipt == candidate + && &req.candidate_para == para_id + && pvd == req.persisted_validation_data + => { + // Any non-empty response will do. + tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); + } + ); + assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded( - candidate_para, - candidate_receipt, - _pvd, - tx, - ), - ) if &candidate_receipt == candidate && candidate_para == *para_id && pvd == _pvd => { - // Any non-empty response will do. - tx.send(vec![(leaf_hash, vec![0, 2, 3])]).unwrap(); - } + ProspectiveParachainsMessage::CandidateSeconded(_, _) + ) ); assert_matches!( @@ -1610,18 +1668,28 @@ fn occupied_core_assignment() { assert_matches!( virtual_overseer.recv().await, AllMessages::ProspectiveParachains( - ProspectiveParachainsMessage::CandidateSeconded( - candidate_para, - candidate_receipt, - _pvd, + ProspectiveParachainsMessage::IntroduceCandidate( + req, tx, ), - ) if candidate_receipt == candidate && candidate_para == para_id && pvd == _pvd => { + ) if + req.candidate_receipt == candidate + && req.candidate_para == para_id + && pvd == req.persisted_validation_data + => { // Any non-empty response will do. tx.send(vec![(leaf_a_hash, vec![0, 1, 2, 3])]).unwrap(); } ); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains(ProspectiveParachainsMessage::CandidateSeconded( + _, + _ + )) + ); + assert_matches!( virtual_overseer.recv().await, AllMessages::StatementDistribution( diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index eea0f6969291..1ded5fa44fdb 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -144,7 +144,6 @@ impl CandidateStorage { } /// Remove a candidate from the store. - #[allow(dead_code)] pub fn remove_candidate(&mut self, candidate_hash: &CandidateHash) { if let Some(entry) = self.by_candidate_hash.remove(candidate_hash) { let parent_head_hash = entry.candidate.persisted_validation_data.parent_head.hash(); @@ -158,7 +157,6 @@ impl CandidateStorage { } /// Note that an existing candidate has been seconded. - #[allow(dead_code)] pub fn mark_seconded(&mut self, candidate_hash: &CandidateHash) { if let Some(entry) = self.by_candidate_hash.get_mut(candidate_hash) { if entry.state != CandidateState::Backed { @@ -251,7 +249,6 @@ enum CandidateState { /// is not necessarily backed. Introduced, /// The candidate has been seconded. - #[allow(dead_code)] Seconded, /// The candidate has been completely backed by the group. Backed, diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 01d45f6a0e2e..f0e6eaaa14c8 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -37,7 +37,7 @@ use futures::{channel::oneshot, prelude::*}; use polkadot_node_subsystem::{ messages::{ ChainApiMessage, FragmentTreeMembership, HypotheticalCandidate, - HypotheticalFrontierRequest, ProspectiveParachainsMessage, + HypotheticalFrontierRequest, IntroduceCandidateRequest, ProspectiveParachainsMessage, ProspectiveValidationDataRequest, RuntimeApiMessage, RuntimeApiRequest, }, overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, SubsystemError, @@ -47,8 +47,7 @@ use polkadot_node_subsystem_util::{ runtime::{prospective_parachains_mode, ProspectiveParachainsMode}, }; use polkadot_primitives::vstaging::{ - BlockNumber, CandidateHash, CommittedCandidateReceipt, CoreState, Hash, Id as ParaId, - PersistedValidationData, + BlockNumber, CandidateHash, CoreState, Hash, Id as ParaId, PersistedValidationData, }; use crate::{ @@ -136,8 +135,10 @@ async fn run_iteration( }, FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => {}, FromOrchestra::Communication { msg } => match msg { - ProspectiveParachainsMessage::CandidateSeconded(para, candidate, pvd, tx) => - handle_candidate_seconded(&mut *ctx, view, para, candidate, pvd, tx).await?, + ProspectiveParachainsMessage::IntroduceCandidate(request, tx) => + handle_candidate_introduced(&mut *ctx, view, request, tx).await?, + ProspectiveParachainsMessage::CandidateSeconded(para, candidate_hash) => + handle_candidate_seconded(view, para, candidate_hash), ProspectiveParachainsMessage::CandidateBacked(para, candidate_hash) => handle_candidate_backed(&mut *ctx, view, para, candidate_hash).await?, ProspectiveParachainsMessage::GetBackableCandidate( @@ -289,14 +290,18 @@ fn prune_view_candidate_storage(view: &mut View, metrics: &Metrics) { } #[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] -async fn handle_candidate_seconded( +async fn handle_candidate_introduced( _ctx: &mut Context, view: &mut View, - para: ParaId, - candidate: CommittedCandidateReceipt, - pvd: PersistedValidationData, + request: IntroduceCandidateRequest, tx: oneshot::Sender, ) -> JfyiErrorResult<()> { + let IntroduceCandidateRequest { + candidate_para: para, + candidate_receipt: candidate, + persisted_validation_data: pvd, + } = request; + // Add the candidate to storage. // Then attempt to add it to all trees. let storage = match view.candidate_storage.get_mut(¶) { @@ -316,8 +321,9 @@ async fn handle_candidate_seconded( let candidate_hash = match storage.add_candidate(candidate, pvd) { Ok(c) => c, - Err(crate::fragment_tree::CandidateStorageInsertionError::CandidateAlreadyKnown(_)) => { - let _ = tx.send(Vec::new()); + Err(crate::fragment_tree::CandidateStorageInsertionError::CandidateAlreadyKnown(c)) => { + // Candidate known - return existing fragment tree membership. + let _ = tx.send(fragment_tree_membership(&view.active_leaves, para, c)); return Ok(()) }, Err( @@ -347,11 +353,45 @@ async fn handle_candidate_seconded( } } } + + if membership.is_empty() { + storage.remove_candidate(&candidate_hash); + } + let _ = tx.send(membership); Ok(()) } +fn handle_candidate_seconded(view: &mut View, para: ParaId, candidate_hash: CandidateHash) { + let storage = match view.candidate_storage.get_mut(¶) { + None => { + gum::warn!( + target: LOG_TARGET, + para_id = ?para, + ?candidate_hash, + "Received instruction to second unknown candidate", + ); + + return + }, + Some(storage) => storage, + }; + + if !storage.contains(&candidate_hash) { + gum::warn!( + target: LOG_TARGET, + para_id = ?para, + ?candidate_hash, + "Received instruction to second unknown candidate", + ); + + return + } + + storage.mark_seconded(&candidate_hash); +} + #[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] async fn handle_candidate_backed( _ctx: &mut Context, @@ -365,7 +405,7 @@ async fn handle_candidate_backed( target: LOG_TARGET, para_id = ?para, ?candidate_hash, - "Received instruction to back candidate", + "Received instruction to back unknown candidate", ); return Ok(()) @@ -378,7 +418,7 @@ async fn handle_candidate_backed( target: LOG_TARGET, para_id = ?para, ?candidate_hash, - "Received instruction to mark unknown candidate as backed.", + "Received instruction to back unknown candidate", ); return Ok(()) diff --git a/node/core/prospective-parachains/src/tests.rs b/node/core/prospective-parachains/src/tests.rs index d8d99eba1bff..1936a482e685 100644 --- a/node/core/prospective-parachains/src/tests.rs +++ b/node/core/prospective-parachains/src/tests.rs @@ -15,7 +15,6 @@ // along with Polkadot. If not, see . use super::*; -use ::polkadot_primitives_test_helpers::{dummy_candidate_receipt_bad_sig, dummy_hash}; use assert_matches::assert_matches; use polkadot_node_subsystem::{ errors::RuntimeApiError, @@ -27,12 +26,11 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_types::{jaeger, ActivatedLeaf, LeafStatus}; use polkadot_primitives::{ - v2::{ - CandidateCommitments, HeadData, Header, PersistedValidationData, ScheduledCore, - ValidationCodeHash, - }, vstaging::{AsyncBackingParameters, Constraints, InboundHrmpLimitations}, + CommittedCandidateReceipt, HeadData, Header, PersistedValidationData, ScheduledCore, + ValidationCodeHash, }; +use polkadot_primitives_test_helpers::make_candidate; use std::sync::Arc; const ALLOWED_ANCESTRY_LEN: u32 = 3; @@ -59,7 +57,7 @@ fn dummy_constraints( ump_remaining: 10, ump_remaining_bytes: 1_000, max_ump_num_per_candidate: 10, - dmp_remaining_messages: vec![10], + dmp_remaining_messages: vec![], hrmp_inbound: InboundHrmpLimitations { valid_watermarks }, hrmp_channels_out: vec![], max_hrmp_num_per_candidate: 0, @@ -70,42 +68,6 @@ fn dummy_constraints( } } -fn dummy_pvd(parent_head: HeadData, relay_parent_number: u32) -> PersistedValidationData { - PersistedValidationData { - parent_head, - relay_parent_number, - max_pov_size: MAX_POV_SIZE, - relay_parent_storage_root: dummy_hash(), - } -} - -fn make_candidate( - leaf: &TestLeaf, - para_id: ParaId, - parent_head: HeadData, - head_data: HeadData, - validation_code_hash: ValidationCodeHash, -) -> (CommittedCandidateReceipt, PersistedValidationData) { - let pvd = dummy_pvd(parent_head, leaf.number); - let commitments = CandidateCommitments { - head_data, - horizontal_messages: Default::default(), - upward_messages: Default::default(), - new_validation_code: None, - processed_downward_messages: 0, - hrmp_watermark: leaf.number, - }; - - let mut candidate = dummy_candidate_receipt_bad_sig(leaf.hash, Some(Default::default())); - candidate.commitments_hash = commitments.hash(); - candidate.descriptor.para_id = para_id; - candidate.descriptor.persisted_validation_data_hash = pvd.hash(); - candidate.descriptor.validation_code_hash = validation_code_hash; - let candidate = CommittedCandidateReceipt { descriptor: candidate.descriptor, commitments }; - - (candidate, pvd) -} - struct TestState { availability_cores: Vec, validation_code_hash: ValidationCodeHash, @@ -334,25 +296,36 @@ async fn deactivate_leaf(virtual_overseer: &mut VirtualOverseer, hash: Hash) { .await; } -async fn second_candidate( +async fn introduce_candidate( virtual_overseer: &mut VirtualOverseer, candidate: CommittedCandidateReceipt, pvd: PersistedValidationData, - expected_candidate_response: Vec<(Hash, Vec)>, ) { - let (tx, rx) = oneshot::channel(); + let req = IntroduceCandidateRequest { + candidate_para: candidate.descriptor().para_id, + candidate_receipt: candidate, + persisted_validation_data: pvd, + }; + let (tx, _) = oneshot::channel(); + virtual_overseer + .send(overseer::FromOrchestra::Communication { + msg: ProspectiveParachainsMessage::IntroduceCandidate(req, tx), + }) + .await; +} + +async fn second_candidate( + virtual_overseer: &mut VirtualOverseer, + candidate: CommittedCandidateReceipt, +) { virtual_overseer .send(overseer::FromOrchestra::Communication { msg: ProspectiveParachainsMessage::CandidateSeconded( candidate.descriptor.para_id, - candidate, - pvd, - tx, + candidate.hash(), ), }) .await; - let resp = rx.await.unwrap(); - assert_eq!(resp, expected_candidate_response); } async fn back_candidate( @@ -414,6 +387,7 @@ async fn get_hypothetical_frontier( receipt: CommittedCandidateReceipt, persisted_validation_data: PersistedValidationData, fragment_tree_relay_parent: Hash, + backed_in_path_only: bool, expected_depths: Vec, ) { let hypothetical_candidate = HypotheticalCandidate::Complete { @@ -424,7 +398,7 @@ async fn get_hypothetical_frontier( let request = HypotheticalFrontierRequest { candidates: vec![hypothetical_candidate.clone()], fragment_tree_relay_parent: Some(fragment_tree_relay_parent), - backed_in_path_only: false, + backed_in_path_only, }; let (tx, rx) = oneshot::channel(); virtual_overseer @@ -433,8 +407,11 @@ async fn get_hypothetical_frontier( }) .await; let resp = rx.await.unwrap(); - let expected_frontier = - vec![(hypothetical_candidate, vec![(fragment_tree_relay_parent, expected_depths)])]; + let expected_frontier = if expected_depths.is_empty() { + vec![(hypothetical_candidate, vec![])] + } else { + vec![(hypothetical_candidate, vec![(fragment_tree_relay_parent, expected_depths)])] + }; assert_eq!(resp, expected_frontier); } @@ -540,7 +517,8 @@ fn send_candidates_and_check_if_found() { // Candidate A1 let (candidate_a1, pvd_a1) = make_candidate( - &leaf_a, + leaf_a.hash, + leaf_a.number, 1.into(), HeadData(vec![1, 2, 3]), HeadData(vec![1]), @@ -551,7 +529,8 @@ fn send_candidates_and_check_if_found() { // Candidate A2 let (candidate_a2, pvd_a2) = make_candidate( - &leaf_a, + leaf_a.hash, + leaf_a.number, 2.into(), HeadData(vec![2, 3, 4]), HeadData(vec![2]), @@ -562,7 +541,8 @@ fn send_candidates_and_check_if_found() { // Candidate B let (candidate_b, pvd_b) = make_candidate( - &leaf_b, + leaf_b.hash, + leaf_b.number, 1.into(), HeadData(vec![3, 4, 5]), HeadData(vec![3]), @@ -573,7 +553,8 @@ fn send_candidates_and_check_if_found() { // Candidate C let (candidate_c, pvd_c) = make_candidate( - &leaf_c, + leaf_c.hash, + leaf_c.number, 2.into(), HeadData(vec![6, 7, 8]), HeadData(vec![4]), @@ -582,11 +563,11 @@ fn send_candidates_and_check_if_found() { let candidate_hash_c = candidate_c.hash(); let response_c = vec![(leaf_c.hash, vec![0])]; - // Second candidates. - second_candidate(&mut virtual_overseer, candidate_a1, pvd_a1, response_a1.clone()).await; - second_candidate(&mut virtual_overseer, candidate_a2, pvd_a2, response_a2.clone()).await; - second_candidate(&mut virtual_overseer, candidate_b, pvd_b, response_b.clone()).await; - second_candidate(&mut virtual_overseer, candidate_c, pvd_c, response_c.clone()).await; + // Introduce candidates. + introduce_candidate(&mut virtual_overseer, candidate_a1, pvd_a1).await; + introduce_candidate(&mut virtual_overseer, candidate_a2, pvd_a2).await; + introduce_candidate(&mut virtual_overseer, candidate_b, pvd_b).await; + introduce_candidate(&mut virtual_overseer, candidate_c, pvd_c).await; // Check candidate tree membership. get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a1, response_a1).await; @@ -650,29 +631,30 @@ fn check_candidate_parent_leaving_view() { // Candidate A1 let (candidate_a1, pvd_a1) = make_candidate( - &leaf_a, + leaf_a.hash, + leaf_a.number, 1.into(), HeadData(vec![1, 2, 3]), HeadData(vec![1]), test_state.validation_code_hash, ); let candidate_hash_a1 = candidate_a1.hash(); - let response_a1 = vec![(leaf_a.hash, vec![0])]; // Candidate A2 let (candidate_a2, pvd_a2) = make_candidate( - &leaf_a, + leaf_a.hash, + leaf_a.number, 2.into(), HeadData(vec![2, 3, 4]), HeadData(vec![2]), test_state.validation_code_hash, ); let candidate_hash_a2 = candidate_a2.hash(); - let response_a2 = vec![(leaf_a.hash, vec![0])]; // Candidate B let (candidate_b, pvd_b) = make_candidate( - &leaf_b, + leaf_b.hash, + leaf_b.number, 1.into(), HeadData(vec![3, 4, 5]), HeadData(vec![3]), @@ -683,7 +665,8 @@ fn check_candidate_parent_leaving_view() { // Candidate C let (candidate_c, pvd_c) = make_candidate( - &leaf_c, + leaf_c.hash, + leaf_c.number, 2.into(), HeadData(vec![6, 7, 8]), HeadData(vec![4]), @@ -692,11 +675,11 @@ fn check_candidate_parent_leaving_view() { let candidate_hash_c = candidate_c.hash(); let response_c = vec![(leaf_c.hash, vec![0])]; - // Second candidates. - second_candidate(&mut virtual_overseer, candidate_a1, pvd_a1, response_a1.clone()).await; - second_candidate(&mut virtual_overseer, candidate_a2, pvd_a2, response_a2.clone()).await; - second_candidate(&mut virtual_overseer, candidate_b, pvd_b, response_b.clone()).await; - second_candidate(&mut virtual_overseer, candidate_c, pvd_c, response_c.clone()).await; + // Introduce candidates. + introduce_candidate(&mut virtual_overseer, candidate_a1, pvd_a1).await; + introduce_candidate(&mut virtual_overseer, candidate_a2, pvd_a2).await; + introduce_candidate(&mut virtual_overseer, candidate_b, pvd_b).await; + introduce_candidate(&mut virtual_overseer, candidate_c, pvd_c).await; // Deactivate leaf A. deactivate_leaf(&mut virtual_overseer, leaf_a.hash).await; @@ -772,7 +755,8 @@ fn check_candidate_on_multiple_forks() { // Candidate on leaf A. let (candidate_a, pvd_a) = make_candidate( - &leaf_a, + leaf_a.hash, + leaf_a.number, 1.into(), HeadData(vec![1, 2, 3]), HeadData(vec![1]), @@ -783,7 +767,8 @@ fn check_candidate_on_multiple_forks() { // Candidate on leaf B. let (candidate_b, pvd_b) = make_candidate( - &leaf_b, + leaf_b.hash, + leaf_b.number, 1.into(), HeadData(vec![3, 4, 5]), HeadData(vec![1]), @@ -794,7 +779,8 @@ fn check_candidate_on_multiple_forks() { // Candidate on leaf C. let (candidate_c, pvd_c) = make_candidate( - &leaf_c, + leaf_c.hash, + leaf_c.number, 1.into(), HeadData(vec![5, 6, 7]), HeadData(vec![1]), @@ -803,28 +789,10 @@ fn check_candidate_on_multiple_forks() { let candidate_hash_c = candidate_c.hash(); let response_c = vec![(leaf_c.hash, vec![0])]; - // Second candidate on all three leaves. - second_candidate( - &mut virtual_overseer, - candidate_a.clone(), - pvd_a.clone(), - response_a.clone(), - ) - .await; - second_candidate( - &mut virtual_overseer, - candidate_b.clone(), - pvd_b.clone(), - response_b.clone(), - ) - .await; - second_candidate( - &mut virtual_overseer, - candidate_c.clone(), - pvd_c.clone(), - response_c.clone(), - ) - .await; + // Introduce candidates on all three leaves. + introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; + introduce_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await; + introduce_candidate(&mut virtual_overseer, candidate_c.clone(), pvd_c).await; // Check candidate tree membership. get_membership(&mut virtual_overseer, 1.into(), candidate_hash_a, response_a).await; @@ -861,18 +829,19 @@ fn check_backable_query() { // Candidate A let (candidate_a, pvd_a) = make_candidate( - &leaf_a, + leaf_a.hash, + leaf_a.number, 1.into(), HeadData(vec![1, 2, 3]), HeadData(vec![1]), test_state.validation_code_hash, ); let candidate_hash_a = candidate_a.hash(); - let response_a = vec![(leaf_a.hash, vec![0])]; // Candidate B let (mut candidate_b, pvd_b) = make_candidate( - &leaf_a, + leaf_a.hash, + leaf_a.number, 1.into(), HeadData(vec![1]), HeadData(vec![2]), @@ -881,24 +850,25 @@ fn check_backable_query() { // Set a field to make this candidate unique. candidate_b.descriptor.para_head = Hash::from_low_u64_le(1000); let candidate_hash_b = candidate_b.hash(); - let response_b = vec![(leaf_a.hash, vec![1])]; - // Second candidates. - second_candidate( - &mut virtual_overseer, - candidate_a.clone(), - pvd_a.clone(), - response_a.clone(), - ) - .await; - second_candidate( + // Introduce candidates. + introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; + introduce_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await; + + // Should not get any backable candidates. + get_backable_candidate( &mut virtual_overseer, - candidate_b.clone(), - pvd_b.clone(), - response_b.clone(), + &leaf_a, + 1.into(), + vec![candidate_hash_a], + None, ) .await; + // Second candidates. + second_candidate(&mut virtual_overseer, candidate_a.clone()).await; + second_candidate(&mut virtual_overseer, candidate_b.clone()).await; + // Should not get any backable candidates. get_backable_candidate( &mut virtual_overseer, @@ -953,7 +923,7 @@ fn check_backable_query() { // Test depth query. #[test] -fn check_depth_query() { +fn check_hypothetical_frontier_query() { let test_state = TestState::default(); let view = test_harness(|mut virtual_overseer| async move { // Leaf A @@ -971,36 +941,36 @@ fn check_depth_query() { // Candidate A. let (candidate_a, pvd_a) = make_candidate( - &leaf_a, + leaf_a.hash, + leaf_a.number, 1.into(), HeadData(vec![1, 2, 3]), HeadData(vec![1]), test_state.validation_code_hash, ); let candidate_hash_a = candidate_a.hash(); - let response_a = vec![(leaf_a.hash, vec![0])]; // Candidate B. let (candidate_b, pvd_b) = make_candidate( - &leaf_a, + leaf_a.hash, + leaf_a.number, 1.into(), HeadData(vec![1]), HeadData(vec![2]), test_state.validation_code_hash, ); let candidate_hash_b = candidate_b.hash(); - let response_b = vec![(leaf_a.hash, vec![1])]; // Candidate C. let (candidate_c, pvd_c) = make_candidate( - &leaf_a, + leaf_a.hash, + leaf_a.number, 1.into(), HeadData(vec![2]), HeadData(vec![3]), test_state.validation_code_hash, ); let candidate_hash_c = candidate_c.hash(); - let response_c = vec![(leaf_a.hash, vec![2])]; // Get hypothetical frontier of candidate A before adding it. get_hypothetical_frontier( @@ -1009,19 +979,25 @@ fn check_depth_query() { candidate_a.clone(), pvd_a.clone(), leaf_a.hash, + false, vec![0], ) .await; - - // Add candidate A. - second_candidate( + // Should work with `backed_in_path_only: true`, too. + get_hypothetical_frontier( &mut virtual_overseer, + candidate_hash_a, candidate_a.clone(), pvd_a.clone(), - response_a.clone(), + leaf_a.hash, + true, + vec![0], ) .await; + // Add candidate A. + introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()).await; + // Get frontier of candidate A after adding it. get_hypothetical_frontier( &mut virtual_overseer, @@ -1029,6 +1005,7 @@ fn check_depth_query() { candidate_a.clone(), pvd_a.clone(), leaf_a.hash, + false, vec![0], ) .await; @@ -1040,18 +1017,13 @@ fn check_depth_query() { candidate_b.clone(), pvd_b.clone(), leaf_a.hash, + false, vec![1], ) .await; // Add candidate B. - second_candidate( - &mut virtual_overseer, - candidate_b.clone(), - pvd_b.clone(), - response_b.clone(), - ) - .await; + introduce_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b.clone()).await; // Get frontier of candidate B after adding it. get_hypothetical_frontier( @@ -1060,6 +1032,7 @@ fn check_depth_query() { candidate_b, pvd_b.clone(), leaf_a.hash, + false, vec![1], ) .await; @@ -1071,29 +1044,47 @@ fn check_depth_query() { candidate_c.clone(), pvd_c.clone(), leaf_a.hash, + false, vec![2], ) .await; - - // Add candidate C. - second_candidate( + // Should be empty with `backed_in_path_only` because we haven't backed anything. + get_hypothetical_frontier( &mut virtual_overseer, + candidate_hash_c, candidate_c.clone(), pvd_c.clone(), - response_c.clone(), + leaf_a.hash, + true, + vec![], ) .await; + // Add candidate C. + introduce_candidate(&mut virtual_overseer, candidate_c.clone(), pvd_c.clone()).await; + // Get frontier of candidate C after adding it. get_hypothetical_frontier( &mut virtual_overseer, candidate_hash_c, - candidate_c, + candidate_c.clone(), pvd_c.clone(), leaf_a.hash, + false, vec![2], ) .await; + // Should be empty with `backed_in_path_only` because we haven't backed anything. + get_hypothetical_frontier( + &mut virtual_overseer, + candidate_hash_c, + candidate_c.clone(), + pvd_c.clone(), + leaf_a.hash, + true, + vec![], + ) + .await; virtual_overseer }); @@ -1121,33 +1112,33 @@ fn check_pvd_query() { // Candidate A. let (candidate_a, pvd_a) = make_candidate( - &leaf_a, + leaf_a.hash, + leaf_a.number, 1.into(), HeadData(vec![1, 2, 3]), HeadData(vec![1]), test_state.validation_code_hash, ); - let response_a = vec![(leaf_a.hash, vec![0])]; // Candidate B. let (candidate_b, pvd_b) = make_candidate( - &leaf_a, + leaf_a.hash, + leaf_a.number, 1.into(), HeadData(vec![1]), HeadData(vec![2]), test_state.validation_code_hash, ); - let response_b = vec![(leaf_a.hash, vec![1])]; // Candidate C. let (candidate_c, pvd_c) = make_candidate( - &leaf_a, + leaf_a.hash, + leaf_a.number, 1.into(), HeadData(vec![2]), HeadData(vec![3]), test_state.validation_code_hash, ); - let response_c = vec![(leaf_a.hash, vec![2])]; // Get pvd of candidate A before adding it. get_pvd( @@ -1160,13 +1151,7 @@ fn check_pvd_query() { .await; // Add candidate A. - second_candidate( - &mut virtual_overseer, - candidate_a.clone(), - pvd_a.clone(), - response_a.clone(), - ) - .await; + introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a.clone()).await; back_candidate(&mut virtual_overseer, &candidate_a, candidate_a.hash()).await; // Get pvd of candidate A after adding it. @@ -1190,8 +1175,7 @@ fn check_pvd_query() { .await; // Add candidate B. - second_candidate(&mut virtual_overseer, candidate_b, pvd_b.clone(), response_b.clone()) - .await; + introduce_candidate(&mut virtual_overseer, candidate_b, pvd_b.clone()).await; // Get pvd of candidate B after adding it. get_pvd( @@ -1214,8 +1198,7 @@ fn check_pvd_query() { .await; // Add candidate C. - second_candidate(&mut virtual_overseer, candidate_c, pvd_c.clone(), response_c.clone()) - .await; + introduce_candidate(&mut virtual_overseer, candidate_c, pvd_c.clone()).await; // Get pvd of candidate C after adding it. get_pvd( diff --git a/node/network/protocol/Cargo.toml b/node/network/protocol/Cargo.toml index e170d76a713b..8d840413dcc3 100644 --- a/node/network/protocol/Cargo.toml +++ b/node/network/protocol/Cargo.toml @@ -22,6 +22,7 @@ fatality = "0.0.6" rand = "0.8" derive_more = "0.99" gum = { package = "tracing-gum", path = "../../gum" } +bitvec = "1" [dev-dependencies] rand_chacha = "0.3.1" diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index 25c16de5ccdf..9f50bf29406e 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -589,11 +589,12 @@ pub mod v1 { /// vstaging network protocol types. pub mod vstaging { + use bitvec::{order::Lsb0, slice::BitSlice, vec::BitVec}; use parity_scale_codec::{Decode, Encode}; use polkadot_primitives::vstaging::{ - CandidateHash, CandidateIndex, CollatorId, CollatorSignature, Hash, Id as ParaId, - UncheckedSignedAvailabilityBitfield, + CandidateHash, CandidateIndex, CollatorId, CollatorSignature, GroupIndex, Hash, + Id as ParaId, UncheckedSignedAvailabilityBitfield, UncheckedSignedStatement, }; use polkadot_node_primitives::{ @@ -609,17 +610,155 @@ pub mod vstaging { Bitfield(Hash, UncheckedSignedAvailabilityBitfield), } + /// Bitfields indicating the statements that are known or undesired + /// about a candidate. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] + pub struct StatementFilter { + /// Seconded statements. '1' is known or undesired. + pub seconded_in_group: BitVec, + /// Valid statements. '1' is known or undesired. + pub validated_in_group: BitVec, + } + + impl StatementFilter { + /// Create a new blank filter with the given group size. + pub fn blank(group_size: usize) -> Self { + StatementFilter { + seconded_in_group: BitVec::repeat(false, group_size), + validated_in_group: BitVec::repeat(false, group_size), + } + } + + /// Create a new full filter with the given group size. + pub fn full(group_size: usize) -> Self { + StatementFilter { + seconded_in_group: BitVec::repeat(true, group_size), + validated_in_group: BitVec::repeat(true, group_size), + } + } + + /// Whether the filter has a specific expected length, consistent across both + /// bitfields. + pub fn has_len(&self, len: usize) -> bool { + self.seconded_in_group.len() == len && self.validated_in_group.len() == len + } + + /// Determine the number of backing validators in the statement filter. + pub fn backing_validators(&self) -> usize { + self.seconded_in_group + .iter() + .by_vals() + .zip(self.validated_in_group.iter().by_vals()) + .filter(|&(s, v)| s || v) // no double-counting + .count() + } + + /// Whether the statement filter has at least one seconded statement. + pub fn has_seconded(&self) -> bool { + self.seconded_in_group.iter().by_vals().any(|x| x) + } + + /// Mask out `Seconded` statements in `self` according to the provided + /// bitvec. Bits appearing in `mask` will not appear in `self` afterwards. + pub fn mask_seconded(&mut self, mask: &BitSlice) { + for (mut x, mask) in self + .seconded_in_group + .iter_mut() + .zip(mask.iter().by_vals().chain(std::iter::repeat(false))) + { + // (x, mask) => x + // (true, true) => false + // (true, false) => true + // (false, true) => false + // (false, false) => false + *x = *x && !mask; + } + } + + /// Mask out `Valid` statements in `self` according to the provided + /// bitvec. Bits appearing in `mask` will not appear in `self` afterwards. + pub fn mask_valid(&mut self, mask: &BitSlice) { + for (mut x, mask) in self + .validated_in_group + .iter_mut() + .zip(mask.iter().by_vals().chain(std::iter::repeat(false))) + { + // (x, mask) => x + // (true, true) => false + // (true, false) => true + // (false, true) => false + // (false, false) => false + *x = *x && !mask; + } + } + } + + /// A manifest of a known backed candidate, along with a description + /// of the statements backing it. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] + pub struct BackedCandidateManifest { + /// The relay-parent of the candidate. + pub relay_parent: Hash, + /// The hash of the candidate. + pub candidate_hash: CandidateHash, + /// The group index backing the candidate at the relay-parent. + pub group_index: GroupIndex, + /// The para ID of the candidate. It is illegal for this to + /// be a para ID which is not assigned to the group indicated + /// in this manifest. + pub para_id: ParaId, + /// The head-data corresponding to the candidate. + pub parent_head_data_hash: Hash, + /// A statement filter which indicates which validators in the + /// para's group at the relay-parent have validated this candidate + /// and issued statements about it, to the advertiser's knowledge. + /// + /// This MUST have exactly the minimum amount of bytes + /// necessary to represent the number of validators in the assigned + /// backing group as-of the relay-parent. + pub statement_knowledge: StatementFilter, + } + + /// An acknowledgement of a backed candidate being known. + #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] + pub struct BackedCandidateAcknowledgement { + /// The hash of the candidate. + pub candidate_hash: CandidateHash, + /// A statement filter which indicates which validators in the + /// para's group at the relay-parent have validated this candidate + /// and issued statements about it, to the advertiser's knowledge. + /// + /// This MUST have exactly the minimum amount of bytes + /// necessary to represent the number of validators in the assigned + /// backing group as-of the relay-parent. + pub statement_knowledge: StatementFilter, + } + /// Network messages used by the statement distribution subsystem. #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] pub enum StatementDistributionMessage { - // TODO [now]: notifications for v2 + /// A notification of a signed statement in compact form. + #[codec(index = 0)] + Statement(Hash, UncheckedSignedStatement), + + /// A notification of a backed candidate being known by the + /// sending node, for the purpose of being requested by the receiving node + /// if needed. + #[codec(index = 1)] + BackedCandidateManifest(BackedCandidateManifest), + + /// A notification of a backed candidate being known by the sending node, + /// for the purpose of informing a receiving node which already has the candidate. + #[codec(index = 2)] + BackedCandidateKnown(BackedCandidateAcknowledgement), + /// All messages for V1 for compatibility with the statement distribution /// protocol, for relay-parents that don't support asynchronous backing. /// /// These are illegal to send to V1 peers, and illegal to send concerning relay-parents /// which support asynchronous backing. This backwards compatibility should be /// considered immediately deprecated and can be removed once the node software - /// is not required to support asynchronous backing anymore. + /// is not required to support logic from before asynchronous backing anymore. #[codec(index = 255)] V1Compatibility(crate::v1::StatementDistributionMessage), } diff --git a/node/network/protocol/src/request_response/mod.rs b/node/network/protocol/src/request_response/mod.rs index 21d3c5bed478..83e2ac12df96 100644 --- a/node/network/protocol/src/request_response/mod.rs +++ b/node/network/protocol/src/request_response/mod.rs @@ -52,10 +52,10 @@ pub use outgoing::{OutgoingRequest, OutgoingResult, Recipient, Requests, Respons ///// Multiplexer for incoming requests. // pub mod multiplexer; -/// Actual versioned requests and responses, that are sent over the wire. +/// Actual versioned requests and responses that are sent over the wire. pub mod v1; -/// Staging requests to be sent over the wire. +/// Actual versioned requests and responses that are sent over the wire. pub mod vstaging; /// A protocol per subsystem seems to make the most sense, this way we don't need any dispatching @@ -76,6 +76,10 @@ pub enum Protocol { StatementFetchingV1, /// Sending of dispute statements with application level confirmations. DisputeSendingV1, + + /// Protocol for requesting candidates with attestations in statement distribution + /// when async backing is enabled. + AttestedCandidateVStaging, } /// Minimum bandwidth we expect for validators - 500Mbit/s is the recommendation, so approximately @@ -107,12 +111,30 @@ const POV_REQUEST_TIMEOUT_CONNECTED: Duration = Duration::from_millis(1200); /// fit statement distribution within a block of 6 seconds.) const STATEMENTS_TIMEOUT: Duration = Duration::from_secs(1); +/// We want attested candidate requests to time out relatively fast, +/// because slow requests will bottleneck the backing system. Ideally, we'd have +/// an adaptive timeout based on the candidate size, because there will be a lot of variance +/// in candidate sizes: candidates with no code and no messages vs candidates with code +/// and messages. +/// +/// We supply leniency because there are often large candidates and asynchronous +/// backing allows them to be included over a longer window of time. Exponential back-off +/// up to a maximum of 10 seconds would be ideal, but isn't supported by the +/// infrastructure here yet: see https://github.com/paritytech/polkadot/issues/6009 +const ATTESTED_CANDIDATE_TIMEOUT: Duration = Duration::from_millis(2500); + /// We don't want a slow peer to slow down all the others, at the same time we want to get out the /// data quickly in full to at least some peers (as this will reduce load on us as they then can /// start serving the data). So this value is a trade-off. 3 seems to be sensible. So we would need /// to have 3 slow nodes connected, to delay transfer for others by `STATEMENTS_TIMEOUT`. pub const MAX_PARALLEL_STATEMENT_REQUESTS: u32 = 3; +/// We don't want a slow peer to slow down all the others, at the same time we want to get out the +/// data quickly in full to at least some peers (as this will reduce load on us as they then can +/// start serving the data). So this value is a tradeoff. 5 seems to be sensible. So we would need +/// to have 5 slow nodes connected, to delay transfer for others by `ATTESTED_CANDIDATE_TIMEOUT`. +pub const MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS: u32 = 5; + /// Response size limit for responses of POV like data. /// /// This is larger than `MAX_POV_SIZE` to account for protocol overhead and for additional data in @@ -126,6 +148,12 @@ const POV_RESPONSE_SIZE: u64 = MAX_POV_SIZE as u64 + 10_000; /// This is `MAX_CODE_SIZE` plus some additional space for protocol overhead. const STATEMENT_RESPONSE_SIZE: u64 = MAX_CODE_SIZE as u64 + 10_000; +/// Maximum response sizes for `AttestedCandidateVStaging`. +/// +/// This is `MAX_CODE_SIZE` plus some additional space for protocol overhead and +/// additional backing statements. +const ATTESTED_CANDIDATE_RESPONSE_SIZE: u64 = MAX_CODE_SIZE as u64 + 100_000; + /// We can have relative large timeouts here, there is no value of hitting a /// timeout as we want to get statements through to each node in any case. pub const DISPUTE_REQUEST_TIMEOUT: Duration = Duration::from_secs(12); @@ -227,6 +255,14 @@ impl Protocol { request_timeout: DISPUTE_REQUEST_TIMEOUT, inbound_queue: tx, }, + Protocol::AttestedCandidateVStaging => RequestResponseConfig { + name, + fallback_names, + max_request_size: 1_000, + max_response_size: ATTESTED_CANDIDATE_RESPONSE_SIZE, + request_timeout: ATTESTED_CANDIDATE_TIMEOUT, + inbound_queue: tx, + }, } } @@ -271,24 +307,46 @@ impl Protocol { // average, so something in the ballpark of 100 should be fine. Nodes will retry on // failure, so having a good value here is mostly about performance tuning. Protocol::DisputeSendingV1 => 100, + + Protocol::AttestedCandidateVStaging => { + // We assume we can utilize up to 70% of the available bandwidth for statements. + // This is just a guess/estimate, with the following considerations: If we are + // faster than that, queue size will stay low anyway, even if not - requesters will + // get an immediate error, but if we are slower, requesters will run in a timeout - + // wasting precious time. + let available_bandwidth = 7 * MIN_BANDWIDTH_BYTES / 10; + let size = u64::saturating_sub( + ATTESTED_CANDIDATE_TIMEOUT.as_millis() as u64 * available_bandwidth / + (1000 * MAX_CODE_SIZE as u64), + MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS as u64, + ); + debug_assert!( + size > 0, + "We should have a channel size greater zero, otherwise we won't accept any requests." + ); + size as usize + }, } } /// Fallback protocol names of this protocol, as understood by substrate networking. fn get_fallback_names(self) -> Vec { - std::iter::once(self.get_legacy_name().into()).collect() + self.get_legacy_name().into_iter().map(Into::into).collect() } - /// Legacy protocol name associated with each peer set. - const fn get_legacy_name(self) -> &'static str { + /// Legacy protocol name associated with each peer set, if any. + const fn get_legacy_name(self) -> Option<&'static str> { match self { - Protocol::ChunkFetchingV1 => "/polkadot/req_chunk/1", - Protocol::CollationFetchingV1 => "/polkadot/req_collation/1", - Protocol::CollationFetchingVStaging => "/polkadot/req_collation/2", - Protocol::PoVFetchingV1 => "/polkadot/req_pov/1", - Protocol::AvailableDataFetchingV1 => "/polkadot/req_available_data/1", - Protocol::StatementFetchingV1 => "/polkadot/req_statement/1", - Protocol::DisputeSendingV1 => "/polkadot/send_dispute/1", + Protocol::ChunkFetchingV1 => Some("/polkadot/req_chunk/1"), + Protocol::CollationFetchingV1 => Some("/polkadot/req_collation/1"), + Protocol::PoVFetchingV1 => Some("/polkadot/req_pov/1"), + Protocol::AvailableDataFetchingV1 => Some("/polkadot/req_available_data/1"), + Protocol::StatementFetchingV1 => Some("/polkadot/req_statement/1"), + Protocol::DisputeSendingV1 => Some("/polkadot/send_dispute/1"), + + // Introduced after legacy names became legacy. + Protocol::AttestedCandidateVStaging => None, + Protocol::CollationFetchingVStaging => None, } } } @@ -340,11 +398,13 @@ impl ReqProtocolNames { let short_name = match protocol { Protocol::ChunkFetchingV1 => "/req_chunk/1", Protocol::CollationFetchingV1 => "/req_collation/1", - Protocol::CollationFetchingVStaging => "/req_collation/2", Protocol::PoVFetchingV1 => "/req_pov/1", Protocol::AvailableDataFetchingV1 => "/req_available_data/1", Protocol::StatementFetchingV1 => "/req_statement/1", Protocol::DisputeSendingV1 => "/send_dispute/1", + + Protocol::CollationFetchingVStaging => "/req_collation/2", + Protocol::AttestedCandidateVStaging => "/req_attested_candidate/2", }; format!("{}{}", prefix, short_name).into() diff --git a/node/network/protocol/src/request_response/outgoing.rs b/node/network/protocol/src/request_response/outgoing.rs index 79e9f37b60fe..e5aa117ff654 100644 --- a/node/network/protocol/src/request_response/outgoing.rs +++ b/node/network/protocol/src/request_response/outgoing.rs @@ -41,6 +41,8 @@ pub enum Requests { /// Requests for notifying about an ongoing dispute. DisputeSendingV1(OutgoingRequest), + /// Request a candidate and attestations. + AttestedCandidateVStaging(OutgoingRequest), /// Fetch a collation from a collator which previously announced it. /// Compared to V1 it requires specifying which candidate is requested by its hash. CollationFetchingVStaging(OutgoingRequest), @@ -57,6 +59,7 @@ impl Requests { Self::AvailableDataFetchingV1(_) => Protocol::AvailableDataFetchingV1, Self::StatementFetchingV1(_) => Protocol::StatementFetchingV1, Self::DisputeSendingV1(_) => Protocol::DisputeSendingV1, + Self::AttestedCandidateVStaging(_) => Protocol::AttestedCandidateVStaging, } } @@ -76,6 +79,7 @@ impl Requests { Self::AvailableDataFetchingV1(r) => r.encode_request(), Self::StatementFetchingV1(r) => r.encode_request(), Self::DisputeSendingV1(r) => r.encode_request(), + Self::AttestedCandidateVStaging(r) => r.encode_request(), } } } diff --git a/node/network/protocol/src/request_response/vstaging.rs b/node/network/protocol/src/request_response/vstaging.rs index 0b8d223e3aee..f84de9505534 100644 --- a/node/network/protocol/src/request_response/vstaging.rs +++ b/node/network/protocol/src/request_response/vstaging.rs @@ -14,10 +14,50 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +//! Requests and responses as sent over the wire for the individual protocols. + use parity_scale_codec::{Decode, Encode}; -use polkadot_primitives::v2::{CandidateHash, Hash, Id as ParaId}; + +use polkadot_primitives::vstaging::{ + CandidateHash, CommittedCandidateReceipt, Hash, Id as ParaId, PersistedValidationData, + UncheckedSignedStatement, +}; use super::{IsRequest, Protocol}; +use crate::vstaging::StatementFilter; + +/// Request a candidate with statements. +#[derive(Debug, Clone, Encode, Decode)] +pub struct AttestedCandidateRequest { + /// Hash of the candidate we want to request. + pub candidate_hash: CandidateHash, + /// Statement filter with 'OR' semantics, indicating which validators + /// not to send statements for. + /// + /// The filter must have exactly the minimum size required to + /// fit all validators from the backing group. + /// + /// The response may not contain any statements masked out by this mask. + pub mask: StatementFilter, +} + +/// Response to an `AttestedCandidateRequest`. +#[derive(Debug, Clone, Encode, Decode)] +pub struct AttestedCandidateResponse { + /// The candidate receipt, with commitments. + pub candidate_receipt: CommittedCandidateReceipt, + /// The [`PersistedValidationData`] corresponding to the candidate. + pub persisted_validation_data: PersistedValidationData, + /// All known statements about the candidate, in compact form, + /// omitting `Seconded` statements which were intended to be masked + /// out. + pub statements: Vec, +} + +impl IsRequest for AttestedCandidateRequest { + type Response = AttestedCandidateResponse; + const PROTOCOL: Protocol = Protocol::AttestedCandidateVStaging; +} /// Responses as sent by collators. pub type CollationFetchingResponse = super::v1::CollationFetchingResponse; diff --git a/node/network/statement-distribution/Cargo.toml b/node/network/statement-distribution/Cargo.toml index 5dcb2a75d3f5..dc5d9b15d7b3 100644 --- a/node/network/statement-distribution/Cargo.toml +++ b/node/network/statement-distribution/Cargo.toml @@ -20,10 +20,11 @@ indexmap = "1.9.1" parity-scale-codec = { version = "3.3.0", default-features = false, features = ["derive"] } thiserror = "1.0.31" fatality = "0.0.6" +bitvec = "1" [dev-dependencies] -polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } assert_matches = "1.4.0" +polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } sp-authority-discovery = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } @@ -34,3 +35,5 @@ sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "maste sc-network = { git = "https://github.com/paritytech/substrate", branch = "master" } futures-timer = "3.0.2" polkadot-primitives-test-helpers = { path = "../../../primitives/test-helpers" } +rand_chacha = "0.3" +polkadot-node-subsystem-types = { path = "../../subsystem-types" } diff --git a/node/network/statement-distribution/src/error.rs b/node/network/statement-distribution/src/error.rs index 70dadc43ed8c..c49b208adbfb 100644 --- a/node/network/statement-distribution/src/error.rs +++ b/node/network/statement-distribution/src/error.rs @@ -19,8 +19,10 @@ use polkadot_node_network_protocol::PeerId; use polkadot_node_subsystem::{RuntimeApiError, SubsystemError}; -use polkadot_node_subsystem_util::runtime; -use polkadot_primitives::{CandidateHash, Hash, Id as ParaId}; +use polkadot_node_subsystem_util::{ + backing_implicit_view::FetchError as ImplicitViewFetchError, runtime, +}; +use polkadot_primitives::v2::{CandidateHash, Hash, Id as ParaId}; use futures::channel::oneshot; @@ -64,6 +66,21 @@ pub enum Error { #[error("Fetching persisted validation data for para {0:?}, {1:?}")] FetchPersistedValidationData(ParaId, RuntimeApiError), + #[error("Fetching session index failed {0:?}")] + FetchSessionIndex(RuntimeApiError), + + #[error("Fetching session info failed {0:?}")] + FetchSessionInfo(RuntimeApiError), + + #[error("Fetching availability cores failed {0:?}")] + FetchAvailabilityCores(RuntimeApiError), + + #[error("Fetching validator groups failed {0:?}")] + FetchValidatorGroups(RuntimeApiError), + + #[error("Attempted to share statement when not a validator or not assigned")] + InvalidShare, + #[error("Relay parent could not be found in active heads")] NoSuchHead(Hash), @@ -84,6 +101,10 @@ pub enum Error { // Responder no longer waits for our data. (Should not happen right now.) #[error("Oneshot `GetData` channel closed")] ResponderGetDataCanceled, + + // Failed to activate leaf due to a fetch error. + #[error("Implicit view failure while activating leaf")] + ActivateLeafFailure(ImplicitViewFetchError), } /// Utility for eating top level errors and log them. diff --git a/node/network/statement-distribution/src/legacy_v1/mod.rs b/node/network/statement-distribution/src/legacy_v1/mod.rs index 752385e1aea6..f5c174f28184 100644 --- a/node/network/statement-distribution/src/legacy_v1/mod.rs +++ b/node/network/statement-distribution/src/legacy_v1/mod.rs @@ -426,7 +426,7 @@ impl PeerRelayParentKnowledge { } } -struct PeerData { +pub struct PeerData { view: View, protocol_version: ValidationVersion, view_knowledge: HashMap, @@ -1439,6 +1439,16 @@ async fn handle_incoming_message<'a, Context>( Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::V1Compatibility( m, )) => m, + Versioned::VStaging(_) => { + // The higher-level subsystem code is supposed to filter out + // all non v1 messages. + gum::debug!( + target: LOG_TARGET, + "Legacy statement-distribution code received unintended v2 message" + ); + + return None + }, }; let relay_parent = message.get_relay_parent(); diff --git a/node/network/statement-distribution/src/legacy_v1/responder.rs b/node/network/statement-distribution/src/legacy_v1/responder.rs index 8db38385e581..e9e45f56fe68 100644 --- a/node/network/statement-distribution/src/legacy_v1/responder.rs +++ b/node/network/statement-distribution/src/legacy_v1/responder.rs @@ -48,8 +48,8 @@ pub enum ResponderMessage { /// A fetching task, taking care of fetching large statements via request/response. /// -/// A fetch task does not know about a particular `Statement` instead it just tries fetching a -/// `CommittedCandidateReceipt` from peers, whether this can be used to re-assemble one ore +/// A fetch task does not know about a particular `Statement`, instead it just tries fetching a +/// `CommittedCandidateReceipt` from peers, whether this can be used to re-assemble one or /// many `SignedFullStatement`s needs to be verified by the caller. pub async fn respond( mut receiver: IncomingRequestReceiver, diff --git a/node/network/statement-distribution/src/legacy_v1/tests.rs b/node/network/statement-distribution/src/legacy_v1/tests.rs index f768a7370363..0764040921cd 100644 --- a/node/network/statement-distribution/src/legacy_v1/tests.rs +++ b/node/network/statement-distribution/src/legacy_v1/tests.rs @@ -762,11 +762,13 @@ fn receiving_from_one_sends_to_another_and_to_candidate_backing() { let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); let (statement_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names); + let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names); let bg = async move { let s = StatementDistributionSubsystem::new( Arc::new(LocalKeystore::in_memory()), statement_req_receiver, + candidate_req_receiver, Default::default(), AlwaysZeroRng, ); @@ -786,6 +788,17 @@ fn receiving_from_one_sends_to_another_and_to_candidate_backing() { ))) .await; + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) + ) + if r == hash_a + => { + let _ = tx.send(Err(polkadot_node_subsystem::RuntimeApiError::NotSupported{runtime_api_name: "async_backing_parameters"})); + } + ); + assert_matches!( handle.recv().await, AllMessages::RuntimeApi( @@ -984,11 +997,13 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); let (statement_req_receiver, mut req_cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); + let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names); let bg = async move { let s = StatementDistributionSubsystem::new( make_ferdie_keystore(), statement_req_receiver, + candidate_req_receiver, Default::default(), AlwaysZeroRng, ); @@ -1008,6 +1023,17 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( ))) .await; + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) + ) + if r == hash_a + => { + let _ = tx.send(Err(polkadot_node_subsystem::RuntimeApiError::NotSupported{runtime_api_name: "async_backing_parameters"})); + } + ); + assert_matches!( handle.recv().await, AllMessages::RuntimeApi( @@ -1512,11 +1538,13 @@ fn share_prioritizes_backing_group() { let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); let (statement_req_receiver, mut req_cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); + let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names); let bg = async move { let s = StatementDistributionSubsystem::new( make_ferdie_keystore(), statement_req_receiver, + candidate_req_receiver, Default::default(), AlwaysZeroRng, ); @@ -1536,6 +1564,17 @@ fn share_prioritizes_backing_group() { ))) .await; + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) + ) + if r == hash_a + => { + let _ = tx.send(Err(polkadot_node_subsystem::RuntimeApiError::NotSupported{runtime_api_name: "async_backing_parameters"})); + } + ); + assert_matches!( handle.recv().await, AllMessages::RuntimeApi( @@ -1817,10 +1856,12 @@ fn peer_cant_flood_with_large_statements() { let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); let (statement_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names); + let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names); let bg = async move { let s = StatementDistributionSubsystem::new( make_ferdie_keystore(), statement_req_receiver, + candidate_req_receiver, Default::default(), AlwaysZeroRng, ); @@ -1840,6 +1881,17 @@ fn peer_cant_flood_with_large_statements() { ))) .await; + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) + ) + if r == hash_a + => { + let _ = tx.send(Err(polkadot_node_subsystem::RuntimeApiError::NotSupported{runtime_api_name: "async_backing_parameters"})); + } + ); + assert_matches!( handle.recv().await, AllMessages::RuntimeApi( @@ -2023,11 +2075,13 @@ fn handle_multiple_seconded_statements() { let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); let (statement_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names); + let (candidate_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names); let virtual_overseer_fut = async move { let s = StatementDistributionSubsystem::new( Arc::new(LocalKeystore::in_memory()), statement_req_receiver, + candidate_req_receiver, Default::default(), AlwaysZeroRng, ); @@ -2047,6 +2101,17 @@ fn handle_multiple_seconded_statements() { ))) .await; + assert_matches!( + handle.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) + ) + if r == relay_parent_hash + => { + let _ = tx.send(Err(polkadot_node_subsystem::RuntimeApiError::NotSupported{runtime_api_name: "async_backing_parameters"})); + } + ); + assert_matches!( handle.recv().await, AllMessages::RuntimeApi( diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index c1ab9bd50821..11d765d7aa95 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -25,7 +25,9 @@ use error::{log_error, FatalResult}; use polkadot_node_network_protocol::{ - request_response::{v1 as request_v1, IncomingRequestReceiver}, + request_response::{ + v1 as request_v1, vstaging::AttestedCandidateRequest, IncomingRequestReceiver, + }, vstaging as protocol_vstaging, Versioned, }; use polkadot_node_primitives::StatementWithPVD; @@ -56,6 +58,8 @@ use legacy_v1::{ ResponderMessage as V1ResponderMessage, }; +mod vstaging; + const LOG_TARGET: &str = "parachain::statement-distribution"; /// The statement distribution subsystem. @@ -64,6 +68,8 @@ pub struct StatementDistributionSubsystem { keystore: SyncCryptoStorePtr, /// Receiver for incoming large statement requests. v1_req_receiver: Option>, + /// Receiver for incoming candidate requests. + req_receiver: Option>, /// Prometheus metrics metrics: Metrics, /// Pseudo-random generator for peers selection logic @@ -93,25 +99,41 @@ enum MuxedMessage { V1Requester(Option), /// Messages from spawned v1 (legacy) responder background task. V1Responder(Option), + /// Messages from candidate responder background task. + Responder(Option), + /// Messages from answered requests. + Response(vstaging::UnhandledResponse), } #[overseer::contextbounds(StatementDistribution, prefix = self::overseer)] impl MuxedMessage { async fn receive( ctx: &mut Context, + state: &mut vstaging::State, from_v1_requester: &mut mpsc::Receiver, from_v1_responder: &mut mpsc::Receiver, + from_responder: &mut mpsc::Receiver, ) -> MuxedMessage { // We are only fusing here to make `select` happy, in reality we will quit if one of those // streams end: let from_orchestra = ctx.recv().fuse(); let from_v1_requester = from_v1_requester.next(); let from_v1_responder = from_v1_responder.next(); - futures::pin_mut!(from_orchestra, from_v1_requester, from_v1_responder); + let from_responder = from_responder.next(); + let receive_response = vstaging::receive_response(state).fuse(); + futures::pin_mut!( + from_orchestra, + from_v1_requester, + from_v1_responder, + from_responder, + receive_response + ); futures::select! { msg = from_orchestra => MuxedMessage::Subsystem(msg.map_err(FatalError::SubsystemReceive)), msg = from_v1_requester => MuxedMessage::V1Requester(msg), msg = from_v1_responder => MuxedMessage::V1Responder(msg), + msg = from_responder => MuxedMessage::Responder(msg), + msg = receive_response => MuxedMessage::Response(msg), } } } @@ -122,14 +144,22 @@ impl StatementDistributionSubsystem { pub fn new( keystore: SyncCryptoStorePtr, v1_req_receiver: IncomingRequestReceiver, + req_receiver: IncomingRequestReceiver, metrics: Metrics, rng: R, ) -> Self { - Self { keystore, v1_req_receiver: Some(v1_req_receiver), metrics, rng } + Self { + keystore, + v1_req_receiver: Some(v1_req_receiver), + req_receiver: Some(req_receiver), + metrics, + rng, + } } async fn run(mut self, mut ctx: Context) -> std::result::Result<(), FatalError> { let mut legacy_v1_state = crate::legacy_v1::State::new(self.keystore.clone()); + let mut state = crate::vstaging::State::new(self.keystore.clone()); // Sender/Receiver for getting news from our statement fetching tasks. let (v1_req_sender, mut v1_req_receiver) = mpsc::channel(1); @@ -146,14 +176,34 @@ impl StatementDistributionSubsystem { ) .map_err(FatalError::SpawnTask)?; + // Sender/receiver for getting news from our candidate responder task. + let (res_sender, mut res_receiver) = mpsc::channel(1); + + ctx.spawn( + "candidate-responder", + vstaging::respond_task( + self.req_receiver.take().expect("Mandatory argument to new. qed"), + res_sender.clone(), + ) + .boxed(), + ) + .map_err(FatalError::SpawnTask)?; + loop { - let message = - MuxedMessage::receive(&mut ctx, &mut v1_req_receiver, &mut v1_res_receiver).await; + let message = MuxedMessage::receive( + &mut ctx, + &mut state, + &mut v1_req_receiver, + &mut v1_res_receiver, + &mut res_receiver, + ) + .await; match message { MuxedMessage::Subsystem(result) => { let result = self .handle_subsystem_message( &mut ctx, + &mut state, &mut legacy_v1_state, &v1_req_sender, result?, @@ -185,7 +235,18 @@ impl StatementDistributionSubsystem { .await; log_error(result.map_err(From::from), "handle_responder_message")?; }, + MuxedMessage::Responder(result) => { + vstaging::answer_request( + &mut state, + result.ok_or(FatalError::RequesterReceiverFinished)?, + ); + }, + MuxedMessage::Response(result) => { + vstaging::handle_response(&mut ctx, &mut state, result).await; + }, }; + + vstaging::dispatch_requests(&mut ctx, &mut state).await; } Ok(()) } @@ -193,6 +254,7 @@ impl StatementDistributionSubsystem { async fn handle_subsystem_message( &mut self, ctx: &mut Context, + state: &mut vstaging::State, legacy_v1_state: &mut legacy_v1::State, v1_req_sender: &mpsc::Sender, message: FromOrchestra, @@ -206,70 +268,112 @@ impl StatementDistributionSubsystem { })) => { let _timer = metrics.time_active_leaves_update(); - for deactivated in deactivated { - crate::legacy_v1::handle_deactivate_leaf(legacy_v1_state, deactivated); - } - - if let Some(activated) = activated { - // Legacy, activate only if no prospective parachains support. + // vstaging should handle activated first because of implicit view. + if let Some(ref activated) = activated { let mode = prospective_parachains_mode(ctx.sender(), activated.hash).await?; - if let ProspectiveParachainsMode::Disabled = mode { - crate::legacy_v1::handle_activated_leaf(ctx, legacy_v1_state, activated) - .await?; + if let ProspectiveParachainsMode::Enabled { .. } = mode { + vstaging::handle_active_leaves_update(ctx, state, activated, mode).await?; + } else if let ProspectiveParachainsMode::Disabled = mode { + for deactivated in &deactivated { + crate::legacy_v1::handle_deactivate_leaf(legacy_v1_state, *deactivated); + } + + crate::legacy_v1::handle_activated_leaf( + ctx, + legacy_v1_state, + activated.clone(), + ) + .await?; + } + } else { + for deactivated in &deactivated { + crate::legacy_v1::handle_deactivate_leaf(legacy_v1_state, *deactivated); } + vstaging::handle_deactivate_leaves(state, &deactivated); } }, FromOrchestra::Signal(OverseerSignal::BlockFinalized(..)) => { // do nothing }, FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(true), - FromOrchestra::Communication { msg } => - match msg { - StatementDistributionMessage::Share(relay_parent, statement) => { - let _timer = metrics.time_share(); - - // pass to legacy if legacy state contains head. - if legacy_v1_state.contains_relay_parent(&relay_parent) { - crate::legacy_v1::share_local_statement( - ctx, - legacy_v1_state, - relay_parent, - StatementWithPVD::drop_pvd_from_signed(statement), - &mut self.rng, - metrics, - ) + FromOrchestra::Communication { msg } => match msg { + StatementDistributionMessage::Share(relay_parent, statement) => { + let _timer = metrics.time_share(); + + // pass to legacy if legacy state contains head. + if legacy_v1_state.contains_relay_parent(&relay_parent) { + crate::legacy_v1::share_local_statement( + ctx, + legacy_v1_state, + relay_parent, + StatementWithPVD::drop_pvd_from_signed(statement), + &mut self.rng, + metrics, + ) + .await?; + } else { + vstaging::share_local_statement(ctx, state, relay_parent, statement) .await?; + } + }, + StatementDistributionMessage::NetworkBridgeUpdate(event) => { + // pass all events to both protocols except for messages, + // which are filtered. + enum VersionTarget { + Legacy, + Current, + Both, + } + + impl VersionTarget { + fn targets_legacy(&self) -> bool { + match self { + &VersionTarget::Legacy | &VersionTarget::Both => true, + _ => false, + } } - }, - StatementDistributionMessage::NetworkBridgeUpdate(event) => { - // pass to legacy, but not if the message isn't - // v1. - let legacy = match &event { - NetworkBridgeEvent::PeerMessage(_, message) => match message { - Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::V1Compatibility(_)) => true, - Versioned::V1(_) => true, - // TODO [now]: _ => false, - }, - _ => true, - }; - - if legacy { - crate::legacy_v1::handle_network_update( - ctx, - legacy_v1_state, - v1_req_sender, - event, - &mut self.rng, - metrics, - ) - .await; + + fn targets_current(&self) -> bool { + match self { + &VersionTarget::Current | &VersionTarget::Both => true, + _ => false, + } } + } - // TODO [now]: pass to vstaging, but not if the message is - // v1 or the connecting peer is v1. - }, - StatementDistributionMessage::Backed(_candidate_hash) => {}, + let target = match &event { + NetworkBridgeEvent::PeerMessage(_, message) => match message { + Versioned::VStaging( + protocol_vstaging::StatementDistributionMessage::V1Compatibility(_), + ) => VersionTarget::Legacy, + Versioned::V1(_) => VersionTarget::Legacy, + Versioned::VStaging(_) => VersionTarget::Current, + }, + _ => VersionTarget::Both, + }; + + if target.targets_legacy() { + crate::legacy_v1::handle_network_update( + ctx, + legacy_v1_state, + v1_req_sender, + event.clone(), + &mut self.rng, + metrics, + ) + .await; + } + + if target.targets_current() { + // pass to vstaging. + vstaging::handle_network_update(ctx, state, event).await; + } + }, + StatementDistributionMessage::Backed(candidate_hash) => { + crate::vstaging::handle_backed_candidate_message(ctx, state, candidate_hash) + .await; }, + }, } Ok(false) } diff --git a/node/network/statement-distribution/src/vstaging/candidates.rs b/node/network/statement-distribution/src/vstaging/candidates.rs new file mode 100644 index 000000000000..804da987ba6d --- /dev/null +++ b/node/network/statement-distribution/src/vstaging/candidates.rs @@ -0,0 +1,1297 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! The [`Candidates`] store tracks information about advertised candidates +//! as well as which peers have advertised them. +//! +//! Due to the request-oriented nature of this protocol, we often learn +//! about candidates just as a hash, alongside claimed properties that the +//! receipt would commit to. However, it is only later on that we can +//! confirm those claimed properties. This store lets us keep track of +//! all candidates which are currently 'relevant' after spam-protection, and +//! gives us the ability to detect mis-advertisements after the fact +//! and punish them accordingly. + +use polkadot_node_network_protocol::PeerId; +use polkadot_node_subsystem::messages::HypotheticalCandidate; +use polkadot_primitives::vstaging::{ + CandidateHash, CommittedCandidateReceipt, GroupIndex, Hash, Id as ParaId, + PersistedValidationData, +}; + +use std::{ + collections::{ + hash_map::{Entry, HashMap}, + HashSet, + }, + sync::Arc, +}; + +/// This encapsulates the correct and incorrect advertisers +/// post-confirmation of a candidate. +#[derive(Debug, Default, PartialEq)] +pub struct PostConfirmationReckoning { + /// Peers which advertised correctly. + pub correct: HashSet, + /// Peers which advertised the candidate incorrectly. + pub incorrect: HashSet, +} + +/// Outputs generated by initial confirmation of a candidate. +#[derive(Debug, PartialEq)] +pub struct PostConfirmation { + /// The hypothetical candidate used to determine importability and membership + /// in the hypothetical frontier. + pub hypothetical: HypotheticalCandidate, + /// A "reckoning" of peers who have advertised the candidate previously, + /// either accurately or inaccurately. + pub reckoning: PostConfirmationReckoning, +} + +/// A tracker for all known candidates in the view. +/// +/// See module docs for more info. +#[derive(Default)] +pub struct Candidates { + candidates: HashMap, + by_parent: HashMap<(Hash, ParaId), HashSet>, +} + +impl Candidates { + /// Insert an advertisement. + /// + /// This should be invoked only after performing + /// spam protection and only for advertisements that + /// are valid within the current view. [`Candidates`] never prunes + /// candidate by peer ID, to avoid peers skirting misbehavior + /// reports by disconnecting intermittently. Therefore, this presumes + /// that spam protection limits the peers which can send advertisements + /// about unconfirmed candidates. + /// + /// It returns either `Ok(())` or an immediate error in the + /// case that the candidate is already known and reality conflicts + /// with the advertisement. + pub fn insert_unconfirmed( + &mut self, + peer: PeerId, + candidate_hash: CandidateHash, + claimed_relay_parent: Hash, + claimed_group_index: GroupIndex, + claimed_parent_hash_and_id: Option<(Hash, ParaId)>, + ) -> Result<(), BadAdvertisement> { + let entry = self.candidates.entry(candidate_hash).or_insert_with(|| { + CandidateState::Unconfirmed(UnconfirmedCandidate { + claims: Vec::new(), + parent_claims: HashMap::new(), + unconfirmed_importable_under: HashSet::new(), + }) + }); + + match entry { + CandidateState::Confirmed(ref c) => { + if c.relay_parent() != claimed_relay_parent { + return Err(BadAdvertisement) + } + + if c.group_index() != claimed_group_index { + return Err(BadAdvertisement) + } + + if let Some((claimed_parent_hash, claimed_id)) = claimed_parent_hash_and_id { + if c.parent_head_data_hash() != claimed_parent_hash { + return Err(BadAdvertisement) + } + + if c.para_id() != claimed_id { + return Err(BadAdvertisement) + } + } + }, + CandidateState::Unconfirmed(ref mut c) => { + c.add_claims( + peer, + CandidateClaims { + relay_parent: claimed_relay_parent, + group_index: claimed_group_index, + parent_hash_and_id: claimed_parent_hash_and_id, + }, + ); + + if let Some(parent_claims) = claimed_parent_hash_and_id { + self.by_parent.entry(parent_claims).or_default().insert(candidate_hash); + } + }, + } + + Ok(()) + } + + /// Note that a candidate has been confirmed. If the candidate has just been + /// confirmed (previous state was `Unconfirmed`), then this returns `Some`. Otherwise, `None`. + /// + /// If we are confirming for the first time, then remove any outdated claims, and generate a + /// reckoning of which peers advertised correctly and incorrectly. + /// + /// This does no sanity-checking of input data, and will overwrite already-confirmed candidates. + pub fn confirm_candidate( + &mut self, + candidate_hash: CandidateHash, + candidate_receipt: CommittedCandidateReceipt, + persisted_validation_data: PersistedValidationData, + assigned_group: GroupIndex, + ) -> Option { + let parent_hash = persisted_validation_data.parent_head.hash(); + let relay_parent = candidate_receipt.descriptor().relay_parent; + let para_id = candidate_receipt.descriptor().para_id; + + let prev_state = self.candidates.insert( + candidate_hash, + CandidateState::Confirmed(ConfirmedCandidate { + receipt: Arc::new(candidate_receipt), + persisted_validation_data, + assigned_group, + parent_hash, + importable_under: HashSet::new(), + }), + ); + let new_confirmed = + match self.candidates.get_mut(&candidate_hash).expect("just inserted; qed") { + CandidateState::Confirmed(x) => x, + _ => panic!("just inserted as confirmed; qed"), + }; + + self.by_parent.entry((parent_hash, para_id)).or_default().insert(candidate_hash); + + match prev_state { + None => Some(PostConfirmation { + reckoning: Default::default(), + hypothetical: new_confirmed.to_hypothetical(candidate_hash), + }), + Some(CandidateState::Confirmed(_)) => None, + Some(CandidateState::Unconfirmed(u)) => Some({ + let mut reckoning = PostConfirmationReckoning::default(); + + for (leaf_hash, x) in u.unconfirmed_importable_under { + if x.relay_parent == relay_parent && + x.parent_hash == parent_hash && + x.para_id == para_id + { + new_confirmed.importable_under.insert(leaf_hash); + } + } + + for (peer, claims) in u.claims { + // Update the by-parent-hash index not to store any outdated + // claims. + if let Some((claimed_parent_hash, claimed_id)) = claims.parent_hash_and_id { + if claimed_parent_hash != parent_hash || claimed_id != para_id { + if let Entry::Occupied(mut e) = + self.by_parent.entry((claimed_parent_hash, claimed_id)) + { + e.get_mut().remove(&candidate_hash); + if e.get().is_empty() { + e.remove(); + } + } + } + } + + if claims.check(relay_parent, assigned_group, parent_hash, para_id) { + reckoning.correct.insert(peer); + } else { + reckoning.incorrect.insert(peer); + } + } + + PostConfirmation { + reckoning, + hypothetical: new_confirmed.to_hypothetical(candidate_hash), + } + }), + } + } + + /// Whether a candidate is confirmed. + pub fn is_confirmed(&self, candidate_hash: &CandidateHash) -> bool { + match self.candidates.get(candidate_hash) { + Some(CandidateState::Confirmed(_)) => true, + _ => false, + } + } + + /// Get a reference to the candidate, if it's known and confirmed. + pub fn get_confirmed(&self, candidate_hash: &CandidateHash) -> Option<&ConfirmedCandidate> { + match self.candidates.get(candidate_hash) { + Some(CandidateState::Confirmed(ref c)) => Some(c), + _ => None, + } + } + + /// Whether statements from a candidate are importable. + /// + /// This is only true when the candidate is known, confirmed, + /// and is importable in a fragment tree. + pub fn is_importable(&self, candidate_hash: &CandidateHash) -> bool { + self.get_confirmed(candidate_hash).map_or(false, |c| c.is_importable(None)) + } + + /// Note that a candidate is importable in a fragment tree indicated by the given + /// leaf hash. + pub fn note_importable_under(&mut self, candidate: &HypotheticalCandidate, leaf_hash: Hash) { + match candidate { + HypotheticalCandidate::Incomplete { + candidate_hash, + candidate_para, + parent_head_data_hash, + candidate_relay_parent, + } => { + let u = UnconfirmedImportable { + relay_parent: *candidate_relay_parent, + parent_hash: *parent_head_data_hash, + para_id: *candidate_para, + }; + + if let Some(&mut CandidateState::Unconfirmed(ref mut c)) = + self.candidates.get_mut(&candidate_hash) + { + c.note_maybe_importable_under(leaf_hash, u); + } + }, + HypotheticalCandidate::Complete { candidate_hash, .. } => { + if let Some(&mut CandidateState::Confirmed(ref mut c)) = + self.candidates.get_mut(&candidate_hash) + { + c.importable_under.insert(leaf_hash); + } + }, + } + } + + /// Get all hypothetical candidates which should be tested + /// for inclusion in the frontier. + /// + /// Provide optional parent parablock information to filter hypotheticals to only + /// potential children of that parent. + pub fn frontier_hypotheticals( + &self, + parent: Option<(Hash, ParaId)>, + ) -> Vec { + fn extend_hypotheticals<'a>( + v: &mut Vec, + i: impl IntoIterator, + maybe_required_parent: Option<(Hash, ParaId)>, + ) { + for (c_hash, candidate) in i { + match candidate { + CandidateState::Unconfirmed(u) => + u.extend_hypotheticals(*c_hash, v, maybe_required_parent), + CandidateState::Confirmed(c) => v.push(c.to_hypothetical(*c_hash)), + } + } + } + + let mut v = Vec::new(); + if let Some(parent) = parent { + let maybe_children = self.by_parent.get(&parent); + let i = maybe_children + .into_iter() + .flatten() + .filter_map(|c_hash| self.candidates.get_key_value(c_hash)); + + extend_hypotheticals(&mut v, i, Some(parent)); + } else { + extend_hypotheticals(&mut v, self.candidates.iter(), None); + } + v + } + + /// Prune all candidates according to the relay-parent predicate + /// provided. + pub fn on_deactivate_leaves( + &mut self, + leaves: &[Hash], + relay_parent_live: impl Fn(&Hash) -> bool, + ) { + let by_parent = &mut self.by_parent; + let mut remove_parent_claims = |c_hash, parent_hash, id| { + if let Entry::Occupied(mut e) = by_parent.entry((parent_hash, id)) { + e.get_mut().remove(&c_hash); + if e.get().is_empty() { + e.remove(); + } + } + }; + self.candidates.retain(|c_hash, state| match state { + CandidateState::Confirmed(ref mut c) => + if !relay_parent_live(&c.relay_parent()) { + remove_parent_claims(*c_hash, c.parent_head_data_hash(), c.para_id()); + false + } else { + for leaf_hash in leaves { + c.importable_under.remove(leaf_hash); + } + true + }, + CandidateState::Unconfirmed(ref mut c) => { + c.on_deactivate_leaves( + leaves, + |parent_hash, id| remove_parent_claims(*c_hash, parent_hash, id), + &relay_parent_live, + ); + c.has_claims() + }, + }) + } +} + +/// A bad advertisement was recognized. +#[derive(Debug, PartialEq)] +pub struct BadAdvertisement; + +#[derive(Debug, PartialEq)] +enum CandidateState { + Unconfirmed(UnconfirmedCandidate), + Confirmed(ConfirmedCandidate), +} + +/// Claims made alongside the advertisement of a candidate. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +struct CandidateClaims { + /// The relay-parent committed to by the candidate. + relay_parent: Hash, + /// The group index assigned to this candidate. + group_index: GroupIndex, + /// The hash of the parent head-data and the ParaId. This is optional, + /// as only some types of advertisements include this data. + parent_hash_and_id: Option<(Hash, ParaId)>, +} + +impl CandidateClaims { + fn check( + &self, + relay_parent: Hash, + group_index: GroupIndex, + parent_hash: Hash, + para_id: ParaId, + ) -> bool { + self.relay_parent == relay_parent && + self.group_index == group_index && + self.parent_hash_and_id.map_or(true, |p| p == (parent_hash, para_id)) + } +} + +// properties of an unconfirmed but hypothetically importable candidate. +#[derive(Debug, Hash, PartialEq, Eq)] +struct UnconfirmedImportable { + relay_parent: Hash, + parent_hash: Hash, + para_id: ParaId, +} + +// An unconfirmed candidate may have have been advertised under +// multiple identifiers. We track here, on the basis of unique identifier, +// the peers which advertised each candidate in a specific way. +#[derive(Debug, PartialEq)] +struct UnconfirmedCandidate { + claims: Vec<(PeerId, CandidateClaims)>, + // ref-counted + parent_claims: HashMap<(Hash, ParaId), Vec<(Hash, usize)>>, + unconfirmed_importable_under: HashSet<(Hash, UnconfirmedImportable)>, +} + +impl UnconfirmedCandidate { + fn add_claims(&mut self, peer: PeerId, claims: CandidateClaims) { + // This does no deduplication, but this is only called after + // spam prevention is already done. In practice we expect that + // each peer will be able to announce the same candidate about 1 time per live relay-parent, + // but in doing so it limits the amount of other candidates it can advertise. on balance, + // memory consumption is bounded in the same way. + if let Some(parent_claims) = claims.parent_hash_and_id { + let sub_claims = self.parent_claims.entry(parent_claims).or_default(); + match sub_claims.iter().position(|x| x.0 == claims.relay_parent) { + Some(p) => sub_claims[p].1 += 1, + None => sub_claims.push((claims.relay_parent, 1)), + } + } + self.claims.push((peer, claims)); + } + + fn note_maybe_importable_under( + &mut self, + active_leaf: Hash, + unconfirmed_importable: UnconfirmedImportable, + ) { + self.unconfirmed_importable_under.insert((active_leaf, unconfirmed_importable)); + } + + fn on_deactivate_leaves( + &mut self, + leaves: &[Hash], + mut remove_parent_index: impl FnMut(Hash, ParaId), + relay_parent_live: impl Fn(&Hash) -> bool, + ) { + self.claims.retain(|c| { + if relay_parent_live(&c.1.relay_parent) { + true + } else { + if let Some(parent_claims) = c.1.parent_hash_and_id { + if let Entry::Occupied(mut e) = self.parent_claims.entry(parent_claims) { + if let Some(p) = e.get().iter().position(|x| x.0 == c.1.relay_parent) { + let sub_claims = e.get_mut(); + sub_claims[p].1 -= 1; + if sub_claims[p].1 == 0 { + sub_claims.remove(p); + } + }; + + if e.get().is_empty() { + remove_parent_index(parent_claims.0, parent_claims.1); + e.remove(); + } + } + } + + false + } + }); + + self.unconfirmed_importable_under + .retain(|(l, props)| leaves.contains(l) && relay_parent_live(&props.relay_parent)); + } + + fn extend_hypotheticals( + &self, + candidate_hash: CandidateHash, + v: &mut Vec, + required_parent: Option<(Hash, ParaId)>, + ) { + fn extend_hypotheticals_inner<'a>( + candidate_hash: CandidateHash, + v: &mut Vec, + i: impl IntoIterator)>, + ) { + for ((parent_head_hash, para_id), possible_relay_parents) in i { + for (relay_parent, _rc) in possible_relay_parents { + v.push(HypotheticalCandidate::Incomplete { + candidate_hash, + candidate_para: *para_id, + parent_head_data_hash: *parent_head_hash, + candidate_relay_parent: *relay_parent, + }); + } + } + } + + match required_parent { + Some(parent) => extend_hypotheticals_inner( + candidate_hash, + v, + self.parent_claims.get_key_value(&parent), + ), + None => extend_hypotheticals_inner(candidate_hash, v, self.parent_claims.iter()), + } + } + + fn has_claims(&self) -> bool { + !self.claims.is_empty() + } +} + +/// A confirmed candidate. +#[derive(Debug, PartialEq)] +pub struct ConfirmedCandidate { + receipt: Arc, + persisted_validation_data: PersistedValidationData, + assigned_group: GroupIndex, + parent_hash: Hash, + // active leaves statements about this candidate are importable under. + importable_under: HashSet, +} + +impl ConfirmedCandidate { + /// Get the relay-parent of the candidate. + pub fn relay_parent(&self) -> Hash { + self.receipt.descriptor().relay_parent + } + + /// Get the para-id of the candidate. + pub fn para_id(&self) -> ParaId { + self.receipt.descriptor().para_id + } + + /// Get the underlying candidate receipt. + pub fn candidate_receipt(&self) -> &Arc { + &self.receipt + } + + /// Get the persisted validation data. + pub fn persisted_validation_data(&self) -> &PersistedValidationData { + &self.persisted_validation_data + } + + /// Whether the candidate is importable. + pub fn is_importable<'a>(&self, under_active_leaf: impl Into>) -> bool { + match under_active_leaf.into() { + Some(h) => self.importable_under.contains(h), + None => !self.importable_under.is_empty(), + } + } + + /// Get the parent head data hash. + pub fn parent_head_data_hash(&self) -> Hash { + self.parent_hash + } + + /// Get the group index of the assigned group. Note that this is in the context + /// of the state of the chain at the candidate's relay parent and its para-id. + pub fn group_index(&self) -> GroupIndex { + self.assigned_group + } + + fn to_hypothetical(&self, candidate_hash: CandidateHash) -> HypotheticalCandidate { + HypotheticalCandidate::Complete { + candidate_hash, + receipt: self.receipt.clone(), + persisted_validation_data: self.persisted_validation_data.clone(), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use polkadot_primitives::HeadData; + use polkadot_primitives_test_helpers::make_candidate; + + #[test] + fn inserting_unconfirmed_rejects_on_incompatible_claims() { + let relay_head_data_a = HeadData(vec![1, 2, 3]); + let relay_head_data_b = HeadData(vec![4, 5, 6]); + let relay_hash_a = relay_head_data_a.hash(); + let relay_hash_b = relay_head_data_b.hash(); + + let para_id_a = 1.into(); + let para_id_b = 2.into(); + + let (candidate_a, pvd_a) = make_candidate( + relay_hash_a, + 1, + para_id_a, + relay_head_data_a, + HeadData(vec![1]), + Hash::from_low_u64_be(1000).into(), + ); + + let candidate_hash_a = candidate_a.hash(); + + let peer = PeerId::random(); + + let group_index_a = 100.into(); + let group_index_b = 200.into(); + + let mut candidates = Candidates::default(); + + // Confirm a candidate first. + candidates.confirm_candidate(candidate_hash_a, candidate_a, pvd_a, group_index_a); + + // Relay parent does not match. + assert_eq!( + candidates.insert_unconfirmed( + peer, + candidate_hash_a, + relay_hash_b, + group_index_a, + Some((relay_hash_a, para_id_a)), + ), + Err(BadAdvertisement) + ); + + // Group index does not match. + assert_eq!( + candidates.insert_unconfirmed( + peer, + candidate_hash_a, + relay_hash_a, + group_index_b, + Some((relay_hash_a, para_id_a)), + ), + Err(BadAdvertisement) + ); + + // Parent head data does not match. + assert_eq!( + candidates.insert_unconfirmed( + peer, + candidate_hash_a, + relay_hash_a, + group_index_a, + Some((relay_hash_b, para_id_a)), + ), + Err(BadAdvertisement) + ); + + // Para ID does not match. + assert_eq!( + candidates.insert_unconfirmed( + peer, + candidate_hash_a, + relay_hash_a, + group_index_a, + Some((relay_hash_a, para_id_b)), + ), + Err(BadAdvertisement) + ); + + // Everything matches. + assert_eq!( + candidates.insert_unconfirmed( + peer, + candidate_hash_a, + relay_hash_a, + group_index_a, + Some((relay_hash_a, para_id_a)), + ), + Ok(()) + ); + } + + // Tests that: + // + // - When the advertisement matches, confirming does not change the parent hash index. + // - When it doesn't match, confirming updates the index. Specifically, confirming should prune + // unconfirmed claims. + #[test] + fn confirming_maintains_parent_hash_index() { + let relay_head_data = HeadData(vec![1, 2, 3]); + let relay_hash = relay_head_data.hash(); + + let candidate_head_data_a = HeadData(vec![1]); + let candidate_head_data_b = HeadData(vec![2]); + let candidate_head_data_c = HeadData(vec![3]); + let candidate_head_data_d = HeadData(vec![4]); + let candidate_head_data_hash_a = candidate_head_data_a.hash(); + let candidate_head_data_hash_b = candidate_head_data_b.hash(); + let candidate_head_data_hash_c = candidate_head_data_c.hash(); + + let (candidate_a, pvd_a) = make_candidate( + relay_hash, + 1, + 1.into(), + relay_head_data, + candidate_head_data_a.clone(), + Hash::from_low_u64_be(1000).into(), + ); + let (candidate_b, pvd_b) = make_candidate( + relay_hash, + 1, + 1.into(), + candidate_head_data_a, + candidate_head_data_b.clone(), + Hash::from_low_u64_be(2000).into(), + ); + let (candidate_c, _) = make_candidate( + relay_hash, + 1, + 1.into(), + candidate_head_data_b.clone(), + candidate_head_data_c.clone(), + Hash::from_low_u64_be(3000).into(), + ); + let (candidate_d, pvd_d) = make_candidate( + relay_hash, + 1, + 1.into(), + candidate_head_data_c.clone(), + candidate_head_data_d, + Hash::from_low_u64_be(4000).into(), + ); + + let candidate_hash_a = candidate_a.hash(); + let candidate_hash_b = candidate_b.hash(); + let candidate_hash_c = candidate_c.hash(); + let candidate_hash_d = candidate_d.hash(); + + let peer = PeerId::random(); + let group_index = 100.into(); + + let mut candidates = Candidates::default(); + + // Insert some unconfirmed candidates. + + // Advertise A without parent hash. + candidates + .insert_unconfirmed(peer, candidate_hash_a, relay_hash, group_index, None) + .ok() + .unwrap(); + assert_eq!(candidates.by_parent, HashMap::default()); + + // Advertise A with parent hash and ID. + candidates + .insert_unconfirmed( + peer, + candidate_hash_a, + relay_hash, + group_index, + Some((relay_hash, 1.into())), + ) + .ok() + .unwrap(); + assert_eq!( + candidates.by_parent, + HashMap::from([((relay_hash, 1.into()), HashSet::from([candidate_hash_a]))]) + ); + + // Advertise B with parent A. + candidates + .insert_unconfirmed( + peer, + candidate_hash_b, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ) + .ok() + .unwrap(); + assert_eq!( + candidates.by_parent, + HashMap::from([ + ((relay_hash, 1.into()), HashSet::from([candidate_hash_a])), + ((candidate_head_data_hash_a, 1.into()), HashSet::from([candidate_hash_b])) + ]) + ); + + // Advertise C with parent A. + candidates + .insert_unconfirmed( + peer, + candidate_hash_c, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ) + .ok() + .unwrap(); + assert_eq!( + candidates.by_parent, + HashMap::from([ + ((relay_hash, 1.into()), HashSet::from([candidate_hash_a])), + ( + (candidate_head_data_hash_a, 1.into()), + HashSet::from([candidate_hash_b, candidate_hash_c]) + ) + ]) + ); + + // Advertise D with parent A. + candidates + .insert_unconfirmed( + peer, + candidate_hash_d, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ) + .ok() + .unwrap(); + assert_eq!( + candidates.by_parent, + HashMap::from([ + ((relay_hash, 1.into()), HashSet::from([candidate_hash_a])), + ( + (candidate_head_data_hash_a, 1.into()), + HashSet::from([candidate_hash_b, candidate_hash_c, candidate_hash_d]) + ) + ]) + ); + + // Insert confirmed candidates and check parent hash index. + + // Confirmation matches advertisement. Index should be unchanged. + candidates.confirm_candidate(candidate_hash_a, candidate_a, pvd_a, group_index); + assert_eq!( + candidates.by_parent, + HashMap::from([ + ((relay_hash, 1.into()), HashSet::from([candidate_hash_a])), + ( + (candidate_head_data_hash_a, 1.into()), + HashSet::from([candidate_hash_b, candidate_hash_c, candidate_hash_d]) + ) + ]) + ); + candidates.confirm_candidate(candidate_hash_b, candidate_b, pvd_b, group_index); + assert_eq!( + candidates.by_parent, + HashMap::from([ + ((relay_hash, 1.into()), HashSet::from([candidate_hash_a])), + ( + (candidate_head_data_hash_a, 1.into()), + HashSet::from([candidate_hash_b, candidate_hash_c, candidate_hash_d]) + ) + ]) + ); + + // Confirmation does not match advertisement. Index should be updated. + candidates.confirm_candidate(candidate_hash_d, candidate_d, pvd_d, group_index); + assert_eq!( + candidates.by_parent, + HashMap::from([ + ((relay_hash, 1.into()), HashSet::from([candidate_hash_a])), + ( + (candidate_head_data_hash_a, 1.into()), + HashSet::from([candidate_hash_b, candidate_hash_c]) + ), + ((candidate_head_data_hash_c, 1.into()), HashSet::from([candidate_hash_d])) + ]) + ); + + // Make a new candidate for C with a different para ID. + let (new_candidate_c, new_pvd_c) = make_candidate( + relay_hash, + 1, + 2.into(), + candidate_head_data_b, + candidate_head_data_c.clone(), + Hash::from_low_u64_be(3000).into(), + ); + candidates.confirm_candidate(candidate_hash_c, new_candidate_c, new_pvd_c, group_index); + assert_eq!( + candidates.by_parent, + HashMap::from([ + ((relay_hash, 1.into()), HashSet::from([candidate_hash_a])), + ((candidate_head_data_hash_a, 1.into()), HashSet::from([candidate_hash_b])), + ((candidate_head_data_hash_b, 2.into()), HashSet::from([candidate_hash_c])), + ((candidate_head_data_hash_c, 1.into()), HashSet::from([candidate_hash_d])) + ]) + ); + } + + #[test] + fn test_returned_post_confirmation() { + let relay_head_data = HeadData(vec![1, 2, 3]); + let relay_hash = relay_head_data.hash(); + + let candidate_head_data_a = HeadData(vec![1]); + let candidate_head_data_b = HeadData(vec![2]); + let candidate_head_data_c = HeadData(vec![3]); + let candidate_head_data_d = HeadData(vec![4]); + let candidate_head_data_hash_a = candidate_head_data_a.hash(); + let candidate_head_data_hash_b = candidate_head_data_b.hash(); + + let (candidate_a, pvd_a) = make_candidate( + relay_hash, + 1, + 1.into(), + relay_head_data, + candidate_head_data_a.clone(), + Hash::from_low_u64_be(1000).into(), + ); + let (candidate_b, pvd_b) = make_candidate( + relay_hash, + 1, + 1.into(), + candidate_head_data_a.clone(), + candidate_head_data_b.clone(), + Hash::from_low_u64_be(2000).into(), + ); + let (candidate_c, _) = make_candidate( + relay_hash, + 1, + 1.into(), + candidate_head_data_a.clone(), + candidate_head_data_c.clone(), + Hash::from_low_u64_be(3000).into(), + ); + let (candidate_d, pvd_d) = make_candidate( + relay_hash, + 1, + 1.into(), + candidate_head_data_b.clone(), + candidate_head_data_d, + Hash::from_low_u64_be(4000).into(), + ); + + let candidate_hash_a = candidate_a.hash(); + let candidate_hash_b = candidate_b.hash(); + let candidate_hash_c = candidate_c.hash(); + let candidate_hash_d = candidate_d.hash(); + + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + let peer_c = PeerId::random(); + let peer_d = PeerId::random(); + + let group_index = 100.into(); + + let mut candidates = Candidates::default(); + + // Insert some unconfirmed candidates. + + // Advertise A without parent hash. + candidates + .insert_unconfirmed(peer_a, candidate_hash_a, relay_hash, group_index, None) + .ok() + .unwrap(); + + // Advertise A with parent hash and ID. + candidates + .insert_unconfirmed( + peer_a, + candidate_hash_a, + relay_hash, + group_index, + Some((relay_hash, 1.into())), + ) + .ok() + .unwrap(); + + // (Correctly) advertise B with parent A. Do it from a couple of peers. + candidates + .insert_unconfirmed( + peer_a, + candidate_hash_b, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ) + .ok() + .unwrap(); + candidates + .insert_unconfirmed( + peer_b, + candidate_hash_b, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ) + .ok() + .unwrap(); + + // (Wrongly) advertise C with parent A. Do it from a couple peers. + candidates + .insert_unconfirmed( + peer_b, + candidate_hash_c, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ) + .ok() + .unwrap(); + candidates + .insert_unconfirmed( + peer_c, + candidate_hash_c, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ) + .ok() + .unwrap(); + + // Advertise D. Do it correctly from one peer (parent B) and wrongly from another (parent A). + candidates + .insert_unconfirmed( + peer_c, + candidate_hash_d, + relay_hash, + group_index, + Some((candidate_head_data_hash_b, 1.into())), + ) + .ok() + .unwrap(); + candidates + .insert_unconfirmed( + peer_d, + candidate_hash_d, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ) + .ok() + .unwrap(); + + assert_eq!( + candidates.by_parent, + HashMap::from([ + ((relay_hash, 1.into()), HashSet::from([candidate_hash_a])), + ( + (candidate_head_data_hash_a, 1.into()), + HashSet::from([candidate_hash_b, candidate_hash_c, candidate_hash_d]) + ), + ((candidate_head_data_hash_b, 1.into()), HashSet::from([candidate_hash_d])) + ]) + ); + + // Insert confirmed candidates and check parent hash index. + + // Confirmation matches advertisement. + let post_confirmation = candidates.confirm_candidate( + candidate_hash_a, + candidate_a.clone(), + pvd_a.clone(), + group_index, + ); + assert_eq!( + post_confirmation, + Some(PostConfirmation { + hypothetical: HypotheticalCandidate::Complete { + candidate_hash: candidate_hash_a, + receipt: Arc::new(candidate_a), + persisted_validation_data: pvd_a, + }, + reckoning: PostConfirmationReckoning { + correct: HashSet::from([peer_a]), + incorrect: HashSet::from([]), + }, + }) + ); + + let post_confirmation = candidates.confirm_candidate( + candidate_hash_b, + candidate_b.clone(), + pvd_b.clone(), + group_index, + ); + assert_eq!( + post_confirmation, + Some(PostConfirmation { + hypothetical: HypotheticalCandidate::Complete { + candidate_hash: candidate_hash_b, + receipt: Arc::new(candidate_b), + persisted_validation_data: pvd_b, + }, + reckoning: PostConfirmationReckoning { + correct: HashSet::from([peer_a, peer_b]), + incorrect: HashSet::from([]), + }, + }) + ); + + // Confirm candidate with two wrong peers (different group index). + let (new_candidate_c, new_pvd_c) = make_candidate( + relay_hash, + 1, + 2.into(), + candidate_head_data_b, + candidate_head_data_c.clone(), + Hash::from_low_u64_be(3000).into(), + ); + let post_confirmation = candidates.confirm_candidate( + candidate_hash_c, + new_candidate_c.clone(), + new_pvd_c.clone(), + group_index, + ); + assert_eq!( + post_confirmation, + Some(PostConfirmation { + hypothetical: HypotheticalCandidate::Complete { + candidate_hash: candidate_hash_c, + receipt: Arc::new(new_candidate_c), + persisted_validation_data: new_pvd_c, + }, + reckoning: PostConfirmationReckoning { + correct: HashSet::from([]), + incorrect: HashSet::from([peer_b, peer_c]), + }, + }) + ); + + // Confirm candidate with one wrong peer (different parent head data). + let post_confirmation = candidates.confirm_candidate( + candidate_hash_d, + candidate_d.clone(), + pvd_d.clone(), + group_index, + ); + assert_eq!( + post_confirmation, + Some(PostConfirmation { + hypothetical: HypotheticalCandidate::Complete { + candidate_hash: candidate_hash_d, + receipt: Arc::new(candidate_d), + persisted_validation_data: pvd_d, + }, + reckoning: PostConfirmationReckoning { + correct: HashSet::from([peer_c]), + incorrect: HashSet::from([peer_d]), + }, + }) + ); + } + + #[test] + fn test_hypothetical_frontiers() { + let relay_head_data = HeadData(vec![1, 2, 3]); + let relay_hash = relay_head_data.hash(); + + let candidate_head_data_a = HeadData(vec![1]); + let candidate_head_data_b = HeadData(vec![2]); + let candidate_head_data_c = HeadData(vec![3]); + let candidate_head_data_d = HeadData(vec![4]); + let candidate_head_data_hash_a = candidate_head_data_a.hash(); + let candidate_head_data_hash_b = candidate_head_data_b.hash(); + let candidate_head_data_hash_d = candidate_head_data_d.hash(); + + let (candidate_a, pvd_a) = make_candidate( + relay_hash, + 1, + 1.into(), + relay_head_data, + candidate_head_data_a.clone(), + Hash::from_low_u64_be(1000).into(), + ); + let (candidate_b, _) = make_candidate( + relay_hash, + 1, + 1.into(), + candidate_head_data_a.clone(), + candidate_head_data_b.clone(), + Hash::from_low_u64_be(2000).into(), + ); + let (candidate_c, _) = make_candidate( + relay_hash, + 1, + 1.into(), + candidate_head_data_a.clone(), + candidate_head_data_c.clone(), + Hash::from_low_u64_be(3000).into(), + ); + let (candidate_d, _) = make_candidate( + relay_hash, + 1, + 1.into(), + candidate_head_data_b.clone(), + candidate_head_data_d, + Hash::from_low_u64_be(4000).into(), + ); + + let candidate_hash_a = candidate_a.hash(); + let candidate_hash_b = candidate_b.hash(); + let candidate_hash_c = candidate_c.hash(); + let candidate_hash_d = candidate_d.hash(); + + let peer = PeerId::random(); + let group_index = 100.into(); + + let mut candidates = Candidates::default(); + + // Confirm A. + candidates.confirm_candidate( + candidate_hash_a, + candidate_a.clone(), + pvd_a.clone(), + group_index, + ); + + // Advertise B with parent A. + candidates + .insert_unconfirmed( + peer, + candidate_hash_b, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ) + .ok() + .unwrap(); + + // Advertise C with parent A. + candidates + .insert_unconfirmed( + peer, + candidate_hash_c, + relay_hash, + group_index, + Some((candidate_head_data_hash_a, 1.into())), + ) + .ok() + .unwrap(); + + // Advertise D with parent B. + candidates + .insert_unconfirmed( + peer, + candidate_hash_d, + relay_hash, + group_index, + Some((candidate_head_data_hash_b, 1.into())), + ) + .ok() + .unwrap(); + + assert_eq!( + candidates.by_parent, + HashMap::from([ + ((relay_hash, 1.into()), HashSet::from([candidate_hash_a])), + ( + (candidate_head_data_hash_a, 1.into()), + HashSet::from([candidate_hash_b, candidate_hash_c]) + ), + ((candidate_head_data_hash_b, 1.into()), HashSet::from([candidate_hash_d])) + ]) + ); + + let hypothetical_a = HypotheticalCandidate::Complete { + candidate_hash: candidate_hash_a, + receipt: Arc::new(candidate_a), + persisted_validation_data: pvd_a, + }; + let hypothetical_b = HypotheticalCandidate::Incomplete { + candidate_hash: candidate_hash_b, + candidate_para: 1.into(), + parent_head_data_hash: candidate_head_data_hash_a, + candidate_relay_parent: relay_hash, + }; + let hypothetical_c = HypotheticalCandidate::Incomplete { + candidate_hash: candidate_hash_c, + candidate_para: 1.into(), + parent_head_data_hash: candidate_head_data_hash_a, + candidate_relay_parent: relay_hash, + }; + let hypothetical_d = HypotheticalCandidate::Incomplete { + candidate_hash: candidate_hash_d, + candidate_para: 1.into(), + parent_head_data_hash: candidate_head_data_hash_b, + candidate_relay_parent: relay_hash, + }; + + let hypotheticals = candidates.frontier_hypotheticals(Some((relay_hash, 1.into()))); + assert_eq!(hypotheticals.len(), 1); + assert!(hypotheticals.contains(&hypothetical_a)); + + let hypotheticals = + candidates.frontier_hypotheticals(Some((candidate_head_data_hash_a, 2.into()))); + assert_eq!(hypotheticals.len(), 0); + + let hypotheticals = + candidates.frontier_hypotheticals(Some((candidate_head_data_hash_a, 1.into()))); + assert_eq!(hypotheticals.len(), 2); + assert!(hypotheticals.contains(&hypothetical_b)); + assert!(hypotheticals.contains(&hypothetical_c)); + + let hypotheticals = + candidates.frontier_hypotheticals(Some((candidate_head_data_hash_d, 1.into()))); + assert_eq!(hypotheticals.len(), 0); + + let hypotheticals = candidates.frontier_hypotheticals(None); + assert_eq!(hypotheticals.len(), 4); + assert!(hypotheticals.contains(&hypothetical_a)); + assert!(hypotheticals.contains(&hypothetical_b)); + assert!(hypotheticals.contains(&hypothetical_c)); + assert!(hypotheticals.contains(&hypothetical_d)); + } +} diff --git a/node/network/statement-distribution/src/vstaging/cluster.rs b/node/network/statement-distribution/src/vstaging/cluster.rs new file mode 100644 index 000000000000..49852a912d2f --- /dev/null +++ b/node/network/statement-distribution/src/vstaging/cluster.rs @@ -0,0 +1,1203 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Direct distribution of statements within a cluster, +//! even those concerning candidates which are not yet backed. +//! +//! Members of a validation group assigned to a para at a given relay-parent +//! always distribute statements directly to each other. +//! +//! The main way we limit the amount of candidates that have to be handled by +//! the system is to limit the amount of `Seconded` messages that we allow +//! each validator to issue at each relay-parent. Since the amount of relay-parents +//! that we have to deal with at any time is itself bounded, this lets us bound +//! the memory and work that we have here. Bounding `Seconded` statements is enough +//! because they imply a bounded amount of `Valid` statements about the same candidate +//! which may follow. +//! +//! The motivation for this piece of code is that the statements that each validator +//! sees may differ. i.e. even though a validator is allowed to issue X `Seconded` +//! statements at a relay-parent, they may in fact issue X*2 and issue one set to +//! one partition of the backing group and one set to another. Of course, in practice +//! these types of partitions will not exist, but in the worst case each validator in the +//! group would see an entirely different set of X `Seconded` statements from some validator +//! and each validator is in its own partition. After that partition resolves, we'd have to +//! deal with up to `limit*group_size` `Seconded` statements from that validator. And then +//! if every validator in the group does the same thing, we're dealing with something like +//! `limit*group_size^2` `Seconded` statements in total. +//! +//! Given that both our group sizes and our limits per relay-parent are small, this is +//! quite manageable, and the utility here lets us deal with it in only a few kilobytes +//! of memory. +//! +//! It's also worth noting that any case where a validator issues more than the legal limit +//! of `Seconded` statements at a relay parent is trivially slashable on-chain, which means +//! the 'worst case' adversary that this code defends against is effectively lighting money +//! on fire. Nevertheless, we handle the case here to ensure that the behavior of the +//! system is well-defined even if an adversary is willing to be slashed. +//! +//! More concretely, this module exposes a [`ClusterTracker`] utility which allows us to determine +//! whether to accept or reject messages from other validators in the same group as we +//! are in, based on _the most charitable possible interpretation of our protocol rules_, +//! and to keep track of what we have sent to other validators in the group and what we may +//! continue to send them. + +use polkadot_primitives::vstaging::{CandidateHash, CompactStatement, ValidatorIndex}; + +use std::collections::{HashMap, HashSet}; + +#[derive(Hash, PartialEq, Eq)] +struct ValidStatementManifest { + remote: ValidatorIndex, + originator: ValidatorIndex, + candidate_hash: CandidateHash, +} + +// A piece of knowledge about a candidate +#[derive(Hash, Clone, PartialEq, Eq)] +enum Knowledge { + // General knowledge. + General(CandidateHash), + // Specific knowledge of a given statement (with its originator) + Specific(CompactStatement, ValidatorIndex), +} + +// Knowledge paired with its source. +#[derive(Hash, Clone, PartialEq, Eq)] +enum TaggedKnowledge { + // Knowledge we have received from the validator on the p2p layer. + IncomingP2P(Knowledge), + // Knowledge we have sent to the validator on the p2p layer. + OutgoingP2P(Knowledge), + // Knowledge of candidates the validator has seconded. + // This is limited only to `Seconded` statements we have accepted + // _without prejudice_. + Seconded(CandidateHash), +} + +/// Utility for keeping track of limits on direct statements within a group. +/// +/// See module docs for more details. +pub struct ClusterTracker { + validators: Vec, + seconding_limit: usize, + knowledge: HashMap>, + // Statements known locally which haven't been sent to particular validators. + // maps target validator to (originator, statement) pairs. + pending: HashMap>, +} + +impl ClusterTracker { + /// Instantiate a new `ClusterTracker` tracker. Fails if `cluster_validators` is empty + pub fn new(cluster_validators: Vec, seconding_limit: usize) -> Option { + if cluster_validators.is_empty() { + return None + } + Some(ClusterTracker { + validators: cluster_validators, + seconding_limit, + knowledge: HashMap::new(), + pending: HashMap::new(), + }) + } + + /// Query whether we can receive some statement from the given validator. + /// + /// This does no deduplication of `Valid` statements. + pub fn can_receive( + &self, + sender: ValidatorIndex, + originator: ValidatorIndex, + statement: CompactStatement, + ) -> Result { + if !self.is_in_group(sender) || !self.is_in_group(originator) { + return Err(RejectIncoming::NotInGroup) + } + + if self.they_sent(sender, Knowledge::Specific(statement.clone(), originator)) { + return Err(RejectIncoming::Duplicate) + } + + match statement { + CompactStatement::Seconded(candidate_hash) => { + // check whether the sender has not sent too many seconded statements for the originator. + // we know by the duplicate check above that this iterator doesn't include the + // statement itself. + let other_seconded_for_orig_from_remote = self + .knowledge + .get(&sender) + .into_iter() + .flat_map(|v_knowledge| v_knowledge.iter()) + .filter(|k| match k { + TaggedKnowledge::IncomingP2P(Knowledge::Specific( + CompactStatement::Seconded(_), + orig, + )) if orig == &originator => true, + _ => false, + }) + .count(); + + if other_seconded_for_orig_from_remote == self.seconding_limit { + return Err(RejectIncoming::ExcessiveSeconded) + } + + // at this point, it doesn't seem like the remote has done anything wrong. + if self.seconded_already_or_within_limit(originator, candidate_hash) { + Ok(Accept::Ok) + } else { + Ok(Accept::WithPrejudice) + } + }, + CompactStatement::Valid(candidate_hash) => { + if !self.knows_candidate(sender, candidate_hash) { + return Err(RejectIncoming::CandidateUnknown) + } + + Ok(Accept::Ok) + }, + } + } + + /// Note that we issued a statement. This updates internal structures. + pub fn note_issued(&mut self, originator: ValidatorIndex, statement: CompactStatement) { + for cluster_member in &self.validators { + if !self.they_know_statement(*cluster_member, originator, statement.clone()) { + // add the statement to pending knowledge for all peers + // which don't know the statement. + self.pending + .entry(*cluster_member) + .or_default() + .insert((originator, statement.clone())); + } + } + } + + /// Note that we accepted an incoming statement. This updates internal structures. + /// + /// Should only be called after a successful `can_receive` call. + pub fn note_received( + &mut self, + sender: ValidatorIndex, + originator: ValidatorIndex, + statement: CompactStatement, + ) { + for cluster_member in &self.validators { + if cluster_member == &sender { + if let Some(pending) = self.pending.get_mut(&sender) { + pending.remove(&(originator, statement.clone())); + } + } else if !self.they_know_statement(*cluster_member, originator, statement.clone()) { + // add the statement to pending knowledge for all peers + // which don't know the statement. + self.pending + .entry(*cluster_member) + .or_default() + .insert((originator, statement.clone())); + } + } + + { + let sender_knowledge = self.knowledge.entry(sender).or_default(); + sender_knowledge.insert(TaggedKnowledge::IncomingP2P(Knowledge::Specific( + statement.clone(), + originator, + ))); + + if let CompactStatement::Seconded(candidate_hash) = statement.clone() { + sender_knowledge + .insert(TaggedKnowledge::IncomingP2P(Knowledge::General(candidate_hash))); + } + } + + if let CompactStatement::Seconded(candidate_hash) = statement { + // since we accept additional `Seconded` statements beyond the limits + // 'with prejudice', we must respect the limit here. + if self.seconded_already_or_within_limit(originator, candidate_hash) { + let originator_knowledge = self.knowledge.entry(originator).or_default(); + originator_knowledge.insert(TaggedKnowledge::Seconded(candidate_hash)); + } + } + } + + /// Query whether we can send a statement to a given validator. + pub fn can_send( + &self, + target: ValidatorIndex, + originator: ValidatorIndex, + statement: CompactStatement, + ) -> Result<(), RejectOutgoing> { + if !self.is_in_group(target) || !self.is_in_group(originator) { + return Err(RejectOutgoing::NotInGroup) + } + + if self.they_know_statement(target, originator, statement.clone()) { + return Err(RejectOutgoing::Known) + } + + match statement { + CompactStatement::Seconded(candidate_hash) => { + // we send the same `Seconded` statements to all our peers, and only the first `k` from + // each originator. + if !self.seconded_already_or_within_limit(originator, candidate_hash) { + return Err(RejectOutgoing::ExcessiveSeconded) + } + + Ok(()) + }, + CompactStatement::Valid(candidate_hash) => { + if !self.knows_candidate(target, candidate_hash) { + return Err(RejectOutgoing::CandidateUnknown) + } + + Ok(()) + }, + } + } + + /// Note that we sent an outgoing statement to a peer in the group. + /// This must be preceded by a successful `can_send` call. + pub fn note_sent( + &mut self, + target: ValidatorIndex, + originator: ValidatorIndex, + statement: CompactStatement, + ) { + { + let target_knowledge = self.knowledge.entry(target).or_default(); + target_knowledge.insert(TaggedKnowledge::OutgoingP2P(Knowledge::Specific( + statement.clone(), + originator, + ))); + + if let CompactStatement::Seconded(candidate_hash) = statement.clone() { + target_knowledge + .insert(TaggedKnowledge::OutgoingP2P(Knowledge::General(candidate_hash))); + } + } + + if let CompactStatement::Seconded(candidate_hash) = statement { + let originator_knowledge = self.knowledge.entry(originator).or_default(); + originator_knowledge.insert(TaggedKnowledge::Seconded(candidate_hash)); + } + + if let Some(pending) = self.pending.get_mut(&target) { + pending.remove(&(originator, statement)); + } + } + + /// Get all targets as validator-indices. This doesn't attempt to filter + /// out the local validator index. + pub fn targets(&self) -> &[ValidatorIndex] { + &self.validators + } + + /// Get all possible senders for the given originator. + /// Returns the empty slice in the case that the originator + /// is not part of the cluster. + // note: this API is future-proofing for a case where we may + // extend clusters beyond just the assigned group, for optimization + // purposes. + pub fn senders_for_originator(&self, originator: ValidatorIndex) -> &[ValidatorIndex] { + if self.validators.contains(&originator) { + &self.validators[..] + } else { + &[] + } + } + + /// Whether a validator knows the candidate is `Seconded`. + pub fn knows_candidate( + &self, + validator: ValidatorIndex, + candidate_hash: CandidateHash, + ) -> bool { + // we sent, they sent, or they signed and we received from someone else. + + self.we_sent_seconded(validator, candidate_hash) || + self.they_sent_seconded(validator, candidate_hash) || + self.validator_seconded(validator, candidate_hash) + } + + /// Returns a Vec of pending statements to be sent to a particular validator + /// index. `Seconded` statements are sorted to the front of the vector. + /// + /// Pending statements have the form (originator, compact statement). + pub fn pending_statements_for( + &self, + target: ValidatorIndex, + ) -> Vec<(ValidatorIndex, CompactStatement)> { + let mut v = self + .pending + .get(&target) + .map(|x| x.iter().cloned().collect::>()) + .unwrap_or_default(); + + v.sort_by_key(|(_, s)| match s { + CompactStatement::Seconded(_) => 0u8, + CompactStatement::Valid(_) => 1u8, + }); + + v + } + + // returns true if it's legal to accept a new `Seconded` message from this validator. + // This is either + // 1. because we've already accepted it. + // 2. because there's space for more seconding. + fn seconded_already_or_within_limit( + &self, + validator: ValidatorIndex, + candidate_hash: CandidateHash, + ) -> bool { + let seconded_other_candidates = self + .knowledge + .get(&validator) + .into_iter() + .flat_map(|v_knowledge| v_knowledge.iter()) + .filter(|k| match k { + TaggedKnowledge::Seconded(c) if c != &candidate_hash => true, + _ => false, + }) + .count(); + + // This fulfills both properties by under-counting when the validator is at the limit + // but _has_ seconded the candidate already. + seconded_other_candidates < self.seconding_limit + } + + fn they_know_statement( + &self, + validator: ValidatorIndex, + originator: ValidatorIndex, + statement: CompactStatement, + ) -> bool { + let knowledge = Knowledge::Specific(statement, originator); + self.we_sent(validator, knowledge.clone()) || self.they_sent(validator, knowledge) + } + + fn they_sent(&self, validator: ValidatorIndex, knowledge: Knowledge) -> bool { + self.knowledge + .get(&validator) + .map_or(false, |k| k.contains(&TaggedKnowledge::IncomingP2P(knowledge))) + } + + fn we_sent(&self, validator: ValidatorIndex, knowledge: Knowledge) -> bool { + self.knowledge + .get(&validator) + .map_or(false, |k| k.contains(&TaggedKnowledge::OutgoingP2P(knowledge))) + } + + fn we_sent_seconded(&self, validator: ValidatorIndex, candidate_hash: CandidateHash) -> bool { + self.we_sent(validator, Knowledge::General(candidate_hash)) + } + + fn they_sent_seconded(&self, validator: ValidatorIndex, candidate_hash: CandidateHash) -> bool { + self.they_sent(validator, Knowledge::General(candidate_hash)) + } + + fn validator_seconded(&self, validator: ValidatorIndex, candidate_hash: CandidateHash) -> bool { + self.knowledge + .get(&validator) + .map_or(false, |k| k.contains(&TaggedKnowledge::Seconded(candidate_hash))) + } + + fn is_in_group(&self, validator: ValidatorIndex) -> bool { + self.validators.contains(&validator) + } +} + +/// Incoming statement was accepted. +#[derive(Debug, PartialEq)] +pub enum Accept { + /// Neither the peer nor the originator have apparently exceeded limits. + /// Candidate or statement may already be known. + Ok, + /// Accept the message; the peer hasn't exceeded limits but the originator has. + WithPrejudice, +} + +/// Incoming statement was rejected. +#[derive(Debug, PartialEq)] +pub enum RejectIncoming { + /// Peer sent excessive `Seconded` statements. + ExcessiveSeconded, + /// Sender or originator is not in the group. + NotInGroup, + /// Candidate is unknown to us. Only applies to `Valid` statements. + CandidateUnknown, + /// Statement is duplicate. + Duplicate, +} + +/// Outgoing statement was rejected. +#[derive(Debug, PartialEq)] +pub enum RejectOutgoing { + /// Candidate was unknown. Only applies to `Valid` statements. + CandidateUnknown, + /// We attempted to send excessive `Seconded` statements. + /// indicates a bug on the local node's code. + ExcessiveSeconded, + /// The statement was already known to the peer. + Known, + /// Target or originator not in the group. + NotInGroup, +} + +#[cfg(test)] +mod tests { + use super::*; + use polkadot_primitives::vstaging::Hash; + + #[test] + fn rejects_incoming_outside_of_group() { + let group = + vec![ValidatorIndex(5), ValidatorIndex(200), ValidatorIndex(24), ValidatorIndex(146)]; + + let seconding_limit = 2; + + let tracker = ClusterTracker::new(group.clone(), seconding_limit).expect("not empty"); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(100), + ValidatorIndex(5), + CompactStatement::Seconded(CandidateHash(Hash::repeat_byte(1))), + ), + Err(RejectIncoming::NotInGroup), + ); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(5), + ValidatorIndex(100), + CompactStatement::Seconded(CandidateHash(Hash::repeat_byte(1))), + ), + Err(RejectIncoming::NotInGroup), + ); + } + + #[test] + fn begrudgingly_accepts_too_many_seconded_from_multiple_peers() { + let group = + vec![ValidatorIndex(5), ValidatorIndex(200), ValidatorIndex(24), ValidatorIndex(146)]; + + let seconding_limit = 2; + let hash_a = CandidateHash(Hash::repeat_byte(1)); + let hash_b = CandidateHash(Hash::repeat_byte(2)); + let hash_c = CandidateHash(Hash::repeat_byte(3)); + + let mut tracker = ClusterTracker::new(group.clone(), seconding_limit).expect("not empty"); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ), + Ok(Accept::Ok), + ); + tracker.note_received( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Seconded(hash_b), + ), + Ok(Accept::Ok), + ); + tracker.note_received( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Seconded(hash_b), + ); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Seconded(hash_c), + ), + Err(RejectIncoming::ExcessiveSeconded), + ); + } + + #[test] + fn rejects_too_many_seconded_from_sender() { + let group = + vec![ValidatorIndex(5), ValidatorIndex(200), ValidatorIndex(24), ValidatorIndex(146)]; + + let seconding_limit = 2; + let hash_a = CandidateHash(Hash::repeat_byte(1)); + let hash_b = CandidateHash(Hash::repeat_byte(2)); + let hash_c = CandidateHash(Hash::repeat_byte(3)); + + let mut tracker = ClusterTracker::new(group.clone(), seconding_limit).expect("not empty"); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ), + Ok(Accept::Ok), + ); + tracker.note_received( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Seconded(hash_b), + ), + Ok(Accept::Ok), + ); + tracker.note_received( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Seconded(hash_b), + ); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(200), + ValidatorIndex(5), + CompactStatement::Seconded(hash_c), + ), + Ok(Accept::WithPrejudice), + ); + } + + #[test] + fn rejects_duplicates() { + let group = + vec![ValidatorIndex(5), ValidatorIndex(200), ValidatorIndex(24), ValidatorIndex(146)]; + + let seconding_limit = 2; + let hash_a = CandidateHash(Hash::repeat_byte(1)); + + let mut tracker = ClusterTracker::new(group, seconding_limit).expect("not empty"); + + tracker.note_received( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ); + + tracker.note_received( + ValidatorIndex(5), + ValidatorIndex(200), + CompactStatement::Valid(hash_a), + ); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ), + Err(RejectIncoming::Duplicate), + ); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(5), + ValidatorIndex(200), + CompactStatement::Valid(hash_a), + ), + Err(RejectIncoming::Duplicate), + ); + } + + #[test] + fn rejects_incoming_valid_without_seconded() { + let group = + vec![ValidatorIndex(5), ValidatorIndex(200), ValidatorIndex(24), ValidatorIndex(146)]; + + let seconding_limit = 2; + + let tracker = ClusterTracker::new(group, seconding_limit).expect("not empty"); + + let hash_a = CandidateHash(Hash::repeat_byte(1)); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Valid(hash_a), + ), + Err(RejectIncoming::CandidateUnknown), + ); + } + + #[test] + fn accepts_incoming_valid_after_receiving_seconded() { + let group = + vec![ValidatorIndex(5), ValidatorIndex(200), ValidatorIndex(24), ValidatorIndex(146)]; + + let seconding_limit = 2; + + let mut tracker = ClusterTracker::new(group.clone(), seconding_limit).expect("not empty"); + let hash_a = CandidateHash(Hash::repeat_byte(1)); + + tracker.note_received( + ValidatorIndex(5), + ValidatorIndex(200), + CompactStatement::Seconded(hash_a), + ); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Valid(hash_a), + ), + Ok(Accept::Ok) + ); + } + + #[test] + fn accepts_incoming_valid_after_outgoing_seconded() { + let group = + vec![ValidatorIndex(5), ValidatorIndex(200), ValidatorIndex(24), ValidatorIndex(146)]; + + let seconding_limit = 2; + + let mut tracker = ClusterTracker::new(group.clone(), seconding_limit).expect("not empty"); + let hash_a = CandidateHash(Hash::repeat_byte(1)); + + tracker.note_sent( + ValidatorIndex(5), + ValidatorIndex(200), + CompactStatement::Seconded(hash_a), + ); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Valid(hash_a), + ), + Ok(Accept::Ok) + ); + } + + #[test] + fn cannot_send_too_many_seconded_even_to_multiple_peers() { + let group = + vec![ValidatorIndex(5), ValidatorIndex(200), ValidatorIndex(24), ValidatorIndex(146)]; + + let seconding_limit = 2; + + let mut tracker = ClusterTracker::new(group.clone(), seconding_limit).expect("not empty"); + let hash_a = CandidateHash(Hash::repeat_byte(1)); + let hash_b = CandidateHash(Hash::repeat_byte(2)); + let hash_c = CandidateHash(Hash::repeat_byte(3)); + + tracker.note_sent( + ValidatorIndex(200), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ); + + tracker.note_sent( + ValidatorIndex(200), + ValidatorIndex(5), + CompactStatement::Seconded(hash_b), + ); + + assert_eq!( + tracker.can_send( + ValidatorIndex(200), + ValidatorIndex(5), + CompactStatement::Seconded(hash_c), + ), + Err(RejectOutgoing::ExcessiveSeconded), + ); + + assert_eq!( + tracker.can_send( + ValidatorIndex(24), + ValidatorIndex(5), + CompactStatement::Seconded(hash_c), + ), + Err(RejectOutgoing::ExcessiveSeconded), + ); + } + + #[test] + fn cannot_send_duplicate() { + let group = + vec![ValidatorIndex(5), ValidatorIndex(200), ValidatorIndex(24), ValidatorIndex(146)]; + + let seconding_limit = 2; + + let mut tracker = ClusterTracker::new(group.clone(), seconding_limit).expect("not empty"); + let hash_a = CandidateHash(Hash::repeat_byte(1)); + + tracker.note_sent( + ValidatorIndex(200), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ); + + assert_eq!( + tracker.can_send( + ValidatorIndex(200), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ), + Err(RejectOutgoing::Known), + ); + } + + #[test] + fn cannot_send_what_was_received() { + let group = + vec![ValidatorIndex(5), ValidatorIndex(200), ValidatorIndex(24), ValidatorIndex(146)]; + + let seconding_limit = 2; + + let mut tracker = ClusterTracker::new(group.clone(), seconding_limit).expect("not empty"); + let hash_a = CandidateHash(Hash::repeat_byte(1)); + + tracker.note_received( + ValidatorIndex(200), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ); + + assert_eq!( + tracker.can_send( + ValidatorIndex(200), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ), + Err(RejectOutgoing::Known), + ); + } + + // Ensure statements received with prejudice don't prevent sending later. + #[test] + fn can_send_statements_received_with_prejudice() { + let group = + vec![ValidatorIndex(5), ValidatorIndex(200), ValidatorIndex(24), ValidatorIndex(146)]; + + let seconding_limit = 1; + + let mut tracker = ClusterTracker::new(group.clone(), seconding_limit).expect("not empty"); + let hash_a = CandidateHash(Hash::repeat_byte(1)); + let hash_b = CandidateHash(Hash::repeat_byte(2)); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(200), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ), + Ok(Accept::Ok), + ); + + tracker.note_received( + ValidatorIndex(200), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ); + + assert_eq!( + tracker.can_receive( + ValidatorIndex(24), + ValidatorIndex(5), + CompactStatement::Seconded(hash_b), + ), + Ok(Accept::WithPrejudice), + ); + + tracker.note_received( + ValidatorIndex(24), + ValidatorIndex(5), + CompactStatement::Seconded(hash_b), + ); + + assert_eq!( + tracker.can_send( + ValidatorIndex(24), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ), + Ok(()), + ); + } + + // Test that the `pending_statements` are set whenever we receive a fresh statement. + // + // Also test that pending statements are sorted, with `Seconded` statements in the front. + #[test] + fn pending_statements_set_when_receiving_fresh_statements() { + let group = + vec![ValidatorIndex(5), ValidatorIndex(200), ValidatorIndex(24), ValidatorIndex(146)]; + + let seconding_limit = 1; + + let mut tracker = ClusterTracker::new(group.clone(), seconding_limit).expect("not empty"); + let hash_a = CandidateHash(Hash::repeat_byte(1)); + let hash_b = CandidateHash(Hash::repeat_byte(2)); + + // Receive a 'Seconded' statement for candidate A. + { + assert_eq!( + tracker.can_receive( + ValidatorIndex(200), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ), + Ok(Accept::Ok), + ); + tracker.note_received( + ValidatorIndex(200), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ); + + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(5)), + vec![(ValidatorIndex(5), CompactStatement::Seconded(hash_a))] + ); + assert_eq!(tracker.pending_statements_for(ValidatorIndex(200)), vec![]); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(24)), + vec![(ValidatorIndex(5), CompactStatement::Seconded(hash_a))] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(146)), + vec![(ValidatorIndex(5), CompactStatement::Seconded(hash_a))] + ); + } + + // Receive a 'Valid' statement for candidate A. + { + // First, send a `Seconded` statement for the candidate. + assert_eq!( + tracker.can_send( + ValidatorIndex(24), + ValidatorIndex(200), + CompactStatement::Seconded(hash_a) + ), + Ok(()) + ); + tracker.note_sent( + ValidatorIndex(24), + ValidatorIndex(200), + CompactStatement::Seconded(hash_a), + ); + + // We have to see that the candidate is known by the sender, e.g. we sent them 'Seconded' + // above. + assert_eq!( + tracker.can_receive( + ValidatorIndex(24), + ValidatorIndex(200), + CompactStatement::Valid(hash_a), + ), + Ok(Accept::Ok), + ); + tracker.note_received( + ValidatorIndex(24), + ValidatorIndex(200), + CompactStatement::Valid(hash_a), + ); + + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(5)), + vec![ + (ValidatorIndex(5), CompactStatement::Seconded(hash_a)), + (ValidatorIndex(200), CompactStatement::Valid(hash_a)) + ] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(200)), + vec![(ValidatorIndex(200), CompactStatement::Valid(hash_a))] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(24)), + vec![(ValidatorIndex(5), CompactStatement::Seconded(hash_a))] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(146)), + vec![ + (ValidatorIndex(5), CompactStatement::Seconded(hash_a)), + (ValidatorIndex(200), CompactStatement::Valid(hash_a)) + ] + ); + } + + // Receive a 'Seconded' statement for candidate B. + { + assert_eq!( + tracker.can_receive( + ValidatorIndex(5), + ValidatorIndex(146), + CompactStatement::Seconded(hash_b), + ), + Ok(Accept::Ok), + ); + tracker.note_received( + ValidatorIndex(5), + ValidatorIndex(146), + CompactStatement::Seconded(hash_b), + ); + + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(5)), + vec![ + (ValidatorIndex(5), CompactStatement::Seconded(hash_a)), + (ValidatorIndex(200), CompactStatement::Valid(hash_a)) + ] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(200)), + vec![ + (ValidatorIndex(146), CompactStatement::Seconded(hash_b)), + (ValidatorIndex(200), CompactStatement::Valid(hash_a)), + ] + ); + { + let mut pending_statements = tracker.pending_statements_for(ValidatorIndex(24)); + pending_statements.sort(); + assert_eq!( + pending_statements, + vec![ + (ValidatorIndex(5), CompactStatement::Seconded(hash_a)), + (ValidatorIndex(146), CompactStatement::Seconded(hash_b)) + ], + ); + } + { + let mut pending_statements = tracker.pending_statements_for(ValidatorIndex(146)); + pending_statements.sort(); + assert_eq!( + pending_statements, + vec![ + (ValidatorIndex(5), CompactStatement::Seconded(hash_a)), + (ValidatorIndex(146), CompactStatement::Seconded(hash_b)), + (ValidatorIndex(200), CompactStatement::Valid(hash_a)), + ] + ); + } + } + } + + // Test that the `pending_statements` are updated when we send or receive statements from others + // in the cluster. + #[test] + fn pending_statements_updated_when_sending_statements() { + let group = + vec![ValidatorIndex(5), ValidatorIndex(200), ValidatorIndex(24), ValidatorIndex(146)]; + + let seconding_limit = 1; + + let mut tracker = ClusterTracker::new(group.clone(), seconding_limit).expect("not empty"); + let hash_a = CandidateHash(Hash::repeat_byte(1)); + let hash_b = CandidateHash(Hash::repeat_byte(2)); + + // Receive a 'Seconded' statement for candidate A. + { + assert_eq!( + tracker.can_receive( + ValidatorIndex(200), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ), + Ok(Accept::Ok), + ); + tracker.note_received( + ValidatorIndex(200), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ); + + // Pending statements should be updated. + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(5)), + vec![(ValidatorIndex(5), CompactStatement::Seconded(hash_a))] + ); + assert_eq!(tracker.pending_statements_for(ValidatorIndex(200)), vec![]); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(24)), + vec![(ValidatorIndex(5), CompactStatement::Seconded(hash_a))] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(146)), + vec![(ValidatorIndex(5), CompactStatement::Seconded(hash_a))] + ); + } + + // Receive a 'Valid' statement for candidate B. + { + // First, send a `Seconded` statement for the candidate. + assert_eq!( + tracker.can_send( + ValidatorIndex(24), + ValidatorIndex(200), + CompactStatement::Seconded(hash_b) + ), + Ok(()) + ); + tracker.note_sent( + ValidatorIndex(24), + ValidatorIndex(200), + CompactStatement::Seconded(hash_b), + ); + + // We have to see the candidate is known by the sender, e.g. we sent them 'Seconded'. + assert_eq!( + tracker.can_receive( + ValidatorIndex(24), + ValidatorIndex(200), + CompactStatement::Valid(hash_b), + ), + Ok(Accept::Ok), + ); + tracker.note_received( + ValidatorIndex(24), + ValidatorIndex(200), + CompactStatement::Valid(hash_b), + ); + + // Pending statements should be updated. + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(5)), + vec![ + (ValidatorIndex(5), CompactStatement::Seconded(hash_a)), + (ValidatorIndex(200), CompactStatement::Valid(hash_b)) + ] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(200)), + vec![(ValidatorIndex(200), CompactStatement::Valid(hash_b))] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(24)), + vec![(ValidatorIndex(5), CompactStatement::Seconded(hash_a))] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(146)), + vec![ + (ValidatorIndex(5), CompactStatement::Seconded(hash_a)), + (ValidatorIndex(200), CompactStatement::Valid(hash_b)) + ] + ); + } + + // Send a 'Seconded' statement. + { + assert_eq!( + tracker.can_send( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a) + ), + Ok(()) + ); + tracker.note_sent( + ValidatorIndex(5), + ValidatorIndex(5), + CompactStatement::Seconded(hash_a), + ); + + // Pending statements should be updated. + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(5)), + vec![(ValidatorIndex(200), CompactStatement::Valid(hash_b))] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(200)), + vec![(ValidatorIndex(200), CompactStatement::Valid(hash_b))] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(24)), + vec![(ValidatorIndex(5), CompactStatement::Seconded(hash_a))] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(146)), + vec![ + (ValidatorIndex(5), CompactStatement::Seconded(hash_a)), + (ValidatorIndex(200), CompactStatement::Valid(hash_b)) + ] + ); + } + + // Send a 'Valid' statement. + { + // First, send a `Seconded` statement for the candidate. + assert_eq!( + tracker.can_send( + ValidatorIndex(5), + ValidatorIndex(200), + CompactStatement::Seconded(hash_b) + ), + Ok(()) + ); + tracker.note_sent( + ValidatorIndex(5), + ValidatorIndex(200), + CompactStatement::Seconded(hash_b), + ); + + // We have to see that the candidate is known by the sender, e.g. we sent them 'Seconded' + // above. + assert_eq!( + tracker.can_send( + ValidatorIndex(5), + ValidatorIndex(200), + CompactStatement::Valid(hash_b) + ), + Ok(()) + ); + tracker.note_sent( + ValidatorIndex(5), + ValidatorIndex(200), + CompactStatement::Valid(hash_b), + ); + + // Pending statements should be updated. + assert_eq!(tracker.pending_statements_for(ValidatorIndex(5)), vec![]); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(200)), + vec![(ValidatorIndex(200), CompactStatement::Valid(hash_b))] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(24)), + vec![(ValidatorIndex(5), CompactStatement::Seconded(hash_a))] + ); + assert_eq!( + tracker.pending_statements_for(ValidatorIndex(146)), + vec![ + (ValidatorIndex(5), CompactStatement::Seconded(hash_a)), + (ValidatorIndex(200), CompactStatement::Valid(hash_b)) + ] + ); + } + } +} diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs new file mode 100644 index 000000000000..bdcbabffd3e5 --- /dev/null +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -0,0 +1,2248 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Utilities for handling distribution of backed candidates along the grid (outside the group to +//! the rest of the network). +//! +//! The grid uses the gossip topology defined in [`polkadot_node_network_protocol::grid_topology`]. +//! It defines how messages and statements are forwarded between validators. +//! +//! # Protocol +//! +//! - Once the candidate is backed, produce a 'backed candidate packet' +//! `(CommittedCandidateReceipt, Statements)`. +//! - Members of a backing group produce an announcement of a fully-backed candidate +//! (aka "full manifest") when they are finished. +//! - `BackedCandidateManifest` +//! - Manifests are sent along the grid topology to peers who have the relay-parent +//! in their implicit view. +//! - Only sent by 1st-hop nodes after downloading the backed candidate packet. +//! - The grid topology is a 2-dimensional grid that provides either a 1 +//! or 2-hop path from any originator to any recipient - 1st-hop nodes +//! are those which share either a row or column with the originator, +//! and 2nd-hop nodes are those which share a column or row with that +//! 1st-hop node. +//! - Note that for the purposes of statement distribution, we actually +//! take the union of the routing paths from each validator in a group +//! to the local node to determine the sending and receiving paths. +//! - Ignored when received out-of-topology +//! - On every local view change, members of the backing group rebroadcast the +//! manifest for all candidates under every new relay-parent across the grid. +//! - Nodes should send a `BackedCandidateAcknowledgement(CandidateHash, +//! StatementFilter)` notification to any peer which has sent a manifest, and +//! the candidate has been acquired by other means. +//! - Request/response for the candidate + votes. +//! - Ignore if they are inconsistent with the manifest. +//! - A malicious backing group is capable of producing an unbounded number of +//! backed candidates. +//! - We request the candidate only if the candidate has a hypothetical depth in +//! any of our fragment trees, and: +//! - the seconding validators have not seconded any other candidates at that +//! depth in any of those fragment trees +//! - All members of the group attempt to circulate all statements (in compact form) +//! from the rest of the group on candidates that have already been backed. +//! - They do this via the grid topology. +//! - They add the statements to their backed candidate packet for future +//! requestors, and also: +//! - send the statement to any peer, which: +//! - we advertised the backed candidate to (sent manifest), and: +//! - has previously & successfully requested the backed candidate packet, +//! or: +//! - which has sent a `BackedCandidateAcknowledgement` +//! - 1st-hop nodes do the same thing + +use polkadot_node_network_protocol::{ + grid_topology::SessionGridTopology, vstaging::StatementFilter, +}; +use polkadot_primitives::vstaging::{ + CandidateHash, CompactStatement, GroupIndex, Hash, ValidatorIndex, +}; + +use std::collections::{ + hash_map::{Entry, HashMap}, + HashSet, +}; + +use bitvec::{order::Lsb0, slice::BitSlice}; + +use super::{groups::Groups, LOG_TARGET}; + +/// Our local view of a subset of the grid topology organized around a specific validator +/// group. +/// +/// This tracks which authorities we expect to communicate with concerning +/// candidates from the group. This includes both the authorities we are +/// expected to send to as well as the authorities we expect to receive from. +/// +/// In the case that this group is the group that we are locally assigned to, +/// the 'receiving' side will be empty. +#[derive(Debug, PartialEq)] +struct GroupSubView { + // validators we are 'sending' to. + sending: HashSet, + // validators we are 'receiving' from. + receiving: HashSet, +} + +/// Our local view of the topology for a session, as it pertains to backed +/// candidate distribution. +#[derive(Debug)] +pub struct SessionTopologyView { + group_views: HashMap, +} + +impl SessionTopologyView { + /// Returns an iterator over all validator indices from the group who are allowed to + /// send us manifests of the given kind. + pub fn iter_sending_for_group( + &self, + group: GroupIndex, + kind: ManifestKind, + ) -> impl Iterator + '_ { + self.group_views.get(&group).into_iter().flat_map(move |sub| match kind { + ManifestKind::Full => sub.receiving.iter().cloned(), + ManifestKind::Acknowledgement => sub.sending.iter().cloned(), + }) + } +} + +/// Build a view of the topology for the session. +/// For groups that we are part of: we receive from nobody and send to our X/Y peers. +/// For groups that we are not part of: we receive from any validator in the group we share a slice with +/// and send to the corresponding X/Y slice in the other dimension. +/// For any validators we don't share a slice with, we receive from the nodes +/// which share a slice with them. +pub fn build_session_topology<'a>( + groups: impl IntoIterator>, + topology: &SessionGridTopology, + our_index: Option, +) -> SessionTopologyView { + let mut view = SessionTopologyView { group_views: HashMap::new() }; + + let our_index = match our_index { + None => return view, + Some(i) => i, + }; + + let our_neighbors = match topology.compute_grid_neighbors_for(our_index) { + None => { + gum::warn!(target: LOG_TARGET, ?our_index, "our index unrecognized in topology?"); + + return view + }, + Some(n) => n, + }; + + for (i, group) in groups.into_iter().enumerate() { + let mut sub_view = GroupSubView { sending: HashSet::new(), receiving: HashSet::new() }; + + if group.contains(&our_index) { + sub_view.sending.extend(our_neighbors.validator_indices_x.iter().cloned()); + sub_view.sending.extend(our_neighbors.validator_indices_y.iter().cloned()); + + // remove all other same-group validators from this set, they are + // in the cluster. + // TODO [now]: test this behavior. + for v in group { + sub_view.sending.remove(v); + } + } else { + for &group_val in group { + // If the validator shares a slice with us, we expect to + // receive from them and send to our neighbors in the other + // dimension. + + if our_neighbors.validator_indices_x.contains(&group_val) { + sub_view.receiving.insert(group_val); + sub_view.sending.extend( + our_neighbors + .validator_indices_y + .iter() + .filter(|v| !group.contains(v)) + .cloned(), + ); + + continue + } + + if our_neighbors.validator_indices_y.contains(&group_val) { + sub_view.receiving.insert(group_val); + sub_view.sending.extend( + our_neighbors + .validator_indices_x + .iter() + .filter(|v| !group.contains(v)) + .cloned(), + ); + + continue + } + + // If they don't share a slice with us, we don't send to anybody + // but receive from any peers sharing a dimension with both of us + let their_neighbors = match topology.compute_grid_neighbors_for(group_val) { + None => { + gum::warn!( + target: LOG_TARGET, + index = ?group_val, + "validator index unrecognized in topology?" + ); + + continue + }, + Some(n) => n, + }; + + // their X, our Y + for potential_link in &their_neighbors.validator_indices_x { + if our_neighbors.validator_indices_y.contains(potential_link) { + sub_view.receiving.insert(*potential_link); + break // one max + } + } + + // their Y, our X + for potential_link in &their_neighbors.validator_indices_y { + if our_neighbors.validator_indices_x.contains(potential_link) { + sub_view.receiving.insert(*potential_link); + break // one max + } + } + } + } + + view.group_views.insert(GroupIndex(i as _), sub_view); + } + + view +} + +/// The kind of backed candidate manifest we should send to a remote peer. +#[derive(Debug, Clone, Copy, PartialEq)] +pub enum ManifestKind { + /// Full manifests contain information about the candidate and should be sent + /// to peers which aren't guaranteed to have the candidate already. + Full, + /// Acknowledgement manifests omit information which is implicit in the candidate + /// itself, and should be sent to peers which are guaranteed to have the candidate + /// already. + Acknowledgement, +} + +/// A tracker of knowledge from authorities within the grid for a particular +/// relay-parent. +#[derive(Default)] +pub struct GridTracker { + received: HashMap, + confirmed_backed: HashMap, + unconfirmed: HashMap>, + pending_manifests: HashMap>, + + // maps target to (originator, statement) pairs. + pending_statements: HashMap>, +} + +impl GridTracker { + /// Attempt to import a manifest advertised by a remote peer. + /// + /// This checks whether the peer is allowed to send us manifests + /// about this group at this relay-parent. This also does sanity + /// checks on the format of the manifest and the amount of votes + /// it contains. It has effects on the stored state only when successful. + /// + /// This returns a `bool` on success, which if true indicates that an acknowledgement is + /// to be sent in response to the received manifest. This only occurs when the + /// candidate is already known to be confirmed and backed. + pub fn import_manifest( + &mut self, + session_topology: &SessionTopologyView, + groups: &Groups, + candidate_hash: CandidateHash, + seconding_limit: usize, + manifest: ManifestSummary, + kind: ManifestKind, + sender: ValidatorIndex, + ) -> Result { + let claimed_group_index = manifest.claimed_group_index; + + let group_topology = match session_topology.group_views.get(&manifest.claimed_group_index) { + None => return Err(ManifestImportError::Disallowed), + Some(g) => g, + }; + + let receiving_from = group_topology.receiving.contains(&sender); + let sending_to = group_topology.sending.contains(&sender); + let manifest_allowed = match kind { + // Peers can send manifests _if_: + // * They are in the receiving set for the group AND the manifest is full OR + // * They are in the sending set for the group AND we have sent them + // a manifest AND the received manifest is partial. + ManifestKind::Full => receiving_from, + ManifestKind::Acknowledgement => + sending_to && + self.confirmed_backed + .get(&candidate_hash) + .map_or(false, |c| c.has_sent_manifest_to(sender)), + }; + + if !manifest_allowed { + return Err(ManifestImportError::Disallowed) + } + + let (group_size, backing_threshold) = + match groups.get_size_and_backing_threshold(manifest.claimed_group_index) { + Some(x) => x, + None => return Err(ManifestImportError::Malformed), + }; + + let remote_knowledge = manifest.statement_knowledge.clone(); + + if !remote_knowledge.has_len(group_size) { + return Err(ManifestImportError::Malformed) + } + + if !remote_knowledge.has_seconded() { + return Err(ManifestImportError::Malformed) + } + + // ensure votes are sufficient to back. + let votes = remote_knowledge.backing_validators(); + + if votes < backing_threshold { + return Err(ManifestImportError::Insufficient) + } + + self.received.entry(sender).or_default().import_received( + group_size, + seconding_limit, + candidate_hash, + manifest, + )?; + + let mut ack = false; + if let Some(confirmed) = self.confirmed_backed.get_mut(&candidate_hash) { + if receiving_from && !confirmed.has_sent_manifest_to(sender) { + // due to checks above, the manifest `kind` is guaranteed to be `Full` + self.pending_manifests + .entry(sender) + .or_default() + .insert(candidate_hash, ManifestKind::Acknowledgement); + + ack = true; + } + + // add all statements in local_knowledge & !remote_knowledge + // to `pending_statements` for this validator. + confirmed.manifest_received_from(sender, remote_knowledge); + if let Some(pending_statements) = confirmed.pending_statements(sender) { + self.pending_statements.entry(sender).or_default().extend( + decompose_statement_filter( + groups, + claimed_group_index, + candidate_hash, + &pending_statements, + ), + ); + } + } else { + // `received` prevents conflicting manifests so this is max 1 per validator. + self.unconfirmed + .entry(candidate_hash) + .or_default() + .push((sender, claimed_group_index)) + } + + Ok(ack) + } + + /// Add a new backed candidate to the tracker. This yields + /// a list of validators which we should either advertise to + /// or signal that we know the candidate, along with the corresponding + /// type of manifest we should send. + pub fn add_backed_candidate( + &mut self, + session_topology: &SessionTopologyView, + candidate_hash: CandidateHash, + group_index: GroupIndex, + local_knowledge: StatementFilter, + ) -> Vec<(ValidatorIndex, ManifestKind)> { + let c = match self.confirmed_backed.entry(candidate_hash) { + Entry::Occupied(_) => return Vec::new(), + Entry::Vacant(v) => v.insert(KnownBackedCandidate { + group_index, + mutual_knowledge: HashMap::new(), + local_knowledge, + }), + }; + + // Populate the entry with previously unconfirmed manifests. + for (v, claimed_group_index) in + self.unconfirmed.remove(&candidate_hash).into_iter().flatten() + { + if claimed_group_index != group_index { + // This is misbehavior, but is handled more comprehensively elsewhere + continue + } + + let statement_filter = self + .received + .get(&v) + .and_then(|r| r.candidate_statement_filter(&candidate_hash)) + .expect("unconfirmed is only populated by validators who have sent manifest; qed"); + + // No need to send direct statements, because our local knowledge is `None` + c.manifest_received_from(v, statement_filter); + } + + let group_topology = match session_topology.group_views.get(&group_index) { + None => return Vec::new(), + Some(g) => g, + }; + + // advertise onwards and accept received advertisements + + let sending_group_manifests = + group_topology.sending.iter().map(|v| (*v, ManifestKind::Full)); + + let receiving_group_manifests = group_topology.receiving.iter().filter_map(|v| { + if c.has_received_manifest_from(*v) { + Some((*v, ManifestKind::Acknowledgement)) + } else { + None + } + }); + + // Note that order is important: if a validator is part of both the sending + // and receiving groups, we may overwrite a `Full` manifest with a `Acknowledgement` + // one. + for (v, manifest_mode) in sending_group_manifests.chain(receiving_group_manifests) { + self.pending_manifests + .entry(v) + .or_default() + .insert(candidate_hash, manifest_mode); + } + + self.pending_manifests + .iter() + .filter_map(|(v, x)| x.get(&candidate_hash).map(|k| (*v, *k))) + .collect() + } + + /// Note that a backed candidate has been advertised to a + /// given validator. + pub fn manifest_sent_to( + &mut self, + groups: &Groups, + validator_index: ValidatorIndex, + candidate_hash: CandidateHash, + local_knowledge: StatementFilter, + ) { + if let Some(c) = self.confirmed_backed.get_mut(&candidate_hash) { + c.manifest_sent_to(validator_index, local_knowledge); + + if let Some(pending_statements) = c.pending_statements(validator_index) { + self.pending_statements.entry(validator_index).or_default().extend( + decompose_statement_filter( + groups, + c.group_index, + candidate_hash, + &pending_statements, + ), + ); + } + } + + if let Some(x) = self.pending_manifests.get_mut(&validator_index) { + x.remove(&candidate_hash); + } + } + + /// Returns a vector of all candidates pending manifests for the specific validator, and + /// the type of manifest we should send. + pub fn pending_manifests_for( + &self, + validator_index: ValidatorIndex, + ) -> Vec<(CandidateHash, ManifestKind)> { + self.pending_manifests + .get(&validator_index) + .into_iter() + .flat_map(|pending| pending.iter().map(|(c, m)| (*c, *m))) + .collect() + } + + /// Returns a statement filter indicating statements that a given peer + /// is awaiting concerning the given candidate, constrained by the statements + /// we have ourselves. + pub fn pending_statements_for( + &self, + validator_index: ValidatorIndex, + candidate_hash: CandidateHash, + ) -> Option { + self.confirmed_backed + .get(&candidate_hash) + .and_then(|x| x.pending_statements(validator_index)) + } + + /// Returns a vector of all pending statements to the validator, sorted with + /// `Seconded` statements at the front. + /// + /// Statements are in the form `(Originator, Statement Kind)`. + pub fn all_pending_statements_for( + &self, + validator_index: ValidatorIndex, + ) -> Vec<(ValidatorIndex, CompactStatement)> { + let mut v = self + .pending_statements + .get(&validator_index) + .map(|x| x.iter().cloned().collect()) + .unwrap_or(Vec::new()); + + v.sort_by_key(|(_, s)| match s { + CompactStatement::Seconded(_) => 0u32, + CompactStatement::Valid(_) => 1u32, + }); + + v + } + + /// Whether a validator can request a manifest from us. + pub fn can_request(&self, validator: ValidatorIndex, candidate_hash: CandidateHash) -> bool { + self.confirmed_backed.get(&candidate_hash).map_or(false, |c| { + c.has_sent_manifest_to(validator) && !c.has_received_manifest_from(validator) + }) + } + + /// Determine the validators which can send a statement to us by direct broadcast. + pub fn direct_statement_providers( + &self, + groups: &Groups, + originator: ValidatorIndex, + statement: &CompactStatement, + ) -> Vec { + let (g, c_h, kind, in_group) = + match extract_statement_and_group_info(groups, originator, statement) { + None => return Vec::new(), + Some(x) => x, + }; + + self.confirmed_backed + .get(&c_h) + .map(|k| k.direct_statement_senders(g, in_group, kind)) + .unwrap_or_default() + } + + /// Determine the validators which can receive a statement from us by direct + /// broadcast. + pub fn direct_statement_targets( + &self, + groups: &Groups, + originator: ValidatorIndex, + statement: &CompactStatement, + ) -> Vec { + let (g, c_h, kind, in_group) = + match extract_statement_and_group_info(groups, originator, statement) { + None => return Vec::new(), + Some(x) => x, + }; + + self.confirmed_backed + .get(&c_h) + .map(|k| k.direct_statement_recipients(g, in_group, kind)) + .unwrap_or_default() + } + + /// Note that we have learned about a statement. This will update + /// `pending_statements_for` for any relevant validators if actually + /// fresh. + pub fn learned_fresh_statement( + &mut self, + groups: &Groups, + session_topology: &SessionTopologyView, + originator: ValidatorIndex, + statement: &CompactStatement, + ) { + let (g, c_h, kind, in_group) = + match extract_statement_and_group_info(groups, originator, statement) { + None => return, + Some(x) => x, + }; + + let known = match self.confirmed_backed.get_mut(&c_h) { + None => return, + Some(x) => x, + }; + + if !known.note_fresh_statement(in_group, kind) { + return + } + + // Add to `pending_statements` for all validators we communicate with + // who have exchanged manifests. + let all_group_validators = session_topology + .group_views + .get(&g) + .into_iter() + .flat_map(|g| g.sending.iter().chain(g.receiving.iter())); + + for v in all_group_validators { + if known.is_pending_statement(*v, in_group, kind) { + self.pending_statements + .entry(*v) + .or_default() + .insert((originator, statement.clone())); + } + } + } + + /// Note that a direct statement about a given candidate was sent to or + /// received from the given validator. + pub fn sent_or_received_direct_statement( + &mut self, + groups: &Groups, + originator: ValidatorIndex, + counterparty: ValidatorIndex, + statement: &CompactStatement, + ) { + if let Some((_, c_h, kind, in_group)) = + extract_statement_and_group_info(groups, originator, statement) + { + if let Some(known) = self.confirmed_backed.get_mut(&c_h) { + known.sent_or_received_direct_statement(counterparty, in_group, kind); + + if let Some(pending) = self.pending_statements.get_mut(&counterparty) { + pending.remove(&(originator, statement.clone())); + } + } + } + } + + /// Get the advertised statement filter of a validator for a candidate. + pub fn advertised_statements( + &self, + validator: ValidatorIndex, + candidate_hash: &CandidateHash, + ) -> Option { + self.received.get(&validator)?.candidate_statement_filter(candidate_hash) + } + + #[cfg(test)] + fn is_manifest_pending_for( + &self, + validator: ValidatorIndex, + candidate_hash: &CandidateHash, + ) -> Option { + self.pending_manifests + .get(&validator) + .and_then(|m| m.get(candidate_hash)) + .map(|x| *x) + } +} + +fn extract_statement_and_group_info( + groups: &Groups, + originator: ValidatorIndex, + statement: &CompactStatement, +) -> Option<(GroupIndex, CandidateHash, StatementKind, usize)> { + let (statement_kind, candidate_hash) = match statement { + CompactStatement::Seconded(h) => (StatementKind::Seconded, h), + CompactStatement::Valid(h) => (StatementKind::Valid, h), + }; + + let group = match groups.by_validator_index(originator) { + None => return None, + Some(g) => g, + }; + + let index_in_group = groups.get(group)?.iter().position(|v| v == &originator)?; + + Some((group, *candidate_hash, statement_kind, index_in_group)) +} + +fn decompose_statement_filter<'a>( + groups: &'a Groups, + group_index: GroupIndex, + candidate_hash: CandidateHash, + statement_filter: &'a StatementFilter, +) -> impl Iterator + 'a { + groups.get(group_index).into_iter().flat_map(move |g| { + let s = statement_filter + .seconded_in_group + .iter_ones() + .map(|i| g[i]) + .map(move |i| (i, CompactStatement::Seconded(candidate_hash))); + + let v = statement_filter + .validated_in_group + .iter_ones() + .map(|i| g[i]) + .map(move |i| (i, CompactStatement::Valid(candidate_hash))); + + s.chain(v) + }) +} + +/// A summary of a manifest being sent by a counterparty. +#[derive(Clone)] +pub struct ManifestSummary { + /// The claimed parent head data hash of the candidate. + pub claimed_parent_hash: Hash, + /// The claimed group index assigned to the candidate. + pub claimed_group_index: GroupIndex, + /// A statement filter sent alongisde the candidate, communicating + /// knowledge. + pub statement_knowledge: StatementFilter, +} + +/// Errors in importing a manifest. +#[derive(Debug, Clone)] +pub enum ManifestImportError { + /// The manifest conflicts with another, previously sent manifest. + Conflicting, + /// The manifest has overflowed beyond the limits of what the + /// counterparty was allowed to send us. + Overflow, + /// The manifest claims insufficient attestations to achieve the backing + /// threshold. + Insufficient, + /// The manifest is malformed. + Malformed, + /// The manifest was not allowed to be sent. + Disallowed, +} + +/// The knowledge we are aware of counterparties having of manifests. +#[derive(Default)] +struct ReceivedManifests { + received: HashMap, + // group -> seconded counts. + seconded_counts: HashMap>, +} + +impl ReceivedManifests { + fn candidate_statement_filter( + &self, + candidate_hash: &CandidateHash, + ) -> Option { + self.received.get(candidate_hash).map(|m| m.statement_knowledge.clone()) + } + + /// Attempt to import a received manifest from a counterparty. + /// + /// This will reject manifests which are either duplicate, conflicting, + /// or imply an irrational amount of `Seconded` statements. + /// + /// This assumes that the manifest has already been checked for + /// validity - i.e. that the bitvecs match the claimed group in size + /// and that the manifest includes at least one `Seconded` + /// attestation and includes enough attestations for the candidate + /// to be backed. + /// + /// This also should only be invoked when we are intended to track + /// the knowledge of this peer as determined by the [`SessionTopology`]. + fn import_received( + &mut self, + group_size: usize, + seconding_limit: usize, + candidate_hash: CandidateHash, + manifest_summary: ManifestSummary, + ) -> Result<(), ManifestImportError> { + match self.received.entry(candidate_hash) { + Entry::Occupied(mut e) => { + // occupied entry. + + // filter out clearly conflicting data. + { + let prev = e.get(); + if prev.claimed_group_index != manifest_summary.claimed_group_index { + return Err(ManifestImportError::Conflicting) + } + + if prev.claimed_parent_hash != manifest_summary.claimed_parent_hash { + return Err(ManifestImportError::Conflicting) + } + + if !manifest_summary + .statement_knowledge + .seconded_in_group + .contains(&prev.statement_knowledge.seconded_in_group) + { + return Err(ManifestImportError::Conflicting) + } + + if !manifest_summary + .statement_knowledge + .validated_in_group + .contains(&prev.statement_knowledge.validated_in_group) + { + return Err(ManifestImportError::Conflicting) + } + + let mut fresh_seconded = + manifest_summary.statement_knowledge.seconded_in_group.clone(); + fresh_seconded |= &prev.statement_knowledge.seconded_in_group; + + let within_limits = updating_ensure_within_seconding_limit( + &mut self.seconded_counts, + manifest_summary.claimed_group_index, + group_size, + seconding_limit, + &fresh_seconded, + ); + + if !within_limits { + return Err(ManifestImportError::Overflow) + } + } + + // All checks passed. Overwrite: guaranteed to be + // superset. + *e.get_mut() = manifest_summary; + Ok(()) + }, + Entry::Vacant(e) => { + let within_limits = updating_ensure_within_seconding_limit( + &mut self.seconded_counts, + manifest_summary.claimed_group_index, + group_size, + seconding_limit, + &manifest_summary.statement_knowledge.seconded_in_group, + ); + + if within_limits { + e.insert(manifest_summary); + Ok(()) + } else { + Err(ManifestImportError::Overflow) + } + }, + } + } +} + +// updates validator-seconded records but only if the new statements +// are OK. returns `true` if alright and `false` otherwise. +// +// The seconding limit is a per-validator limit. It ensures an upper bound on the total number of +// candidates entering the system. +fn updating_ensure_within_seconding_limit( + seconded_counts: &mut HashMap>, + group_index: GroupIndex, + group_size: usize, + seconding_limit: usize, + new_seconded: &BitSlice, +) -> bool { + if seconding_limit == 0 { + return false + } + + // due to the check above, if this was non-existent this function will + // always return `true`. + let counts = seconded_counts.entry(group_index).or_insert_with(|| vec![0; group_size]); + + for i in new_seconded.iter_ones() { + if counts[i] == seconding_limit { + return false + } + } + + for i in new_seconded.iter_ones() { + counts[i] += 1; + } + + true +} + +#[derive(Debug, Clone, Copy)] +enum StatementKind { + Seconded, + Valid, +} + +trait FilterQuery { + fn contains(&self, index: usize, statement_kind: StatementKind) -> bool; + fn set(&mut self, index: usize, statement_kind: StatementKind); +} + +impl FilterQuery for StatementFilter { + fn contains(&self, index: usize, statement_kind: StatementKind) -> bool { + match statement_kind { + StatementKind::Seconded => self.seconded_in_group.get(index).map_or(false, |x| *x), + StatementKind::Valid => self.validated_in_group.get(index).map_or(false, |x| *x), + } + } + + fn set(&mut self, index: usize, statement_kind: StatementKind) { + let b = match statement_kind { + StatementKind::Seconded => self.seconded_in_group.get_mut(index), + StatementKind::Valid => self.validated_in_group.get_mut(index), + }; + + if let Some(mut b) = b { + *b = true; + } + } +} + +/// Knowledge that we have about a remote peer concerning a candidate, and that they have about us +/// concerning the candidate. +#[derive(Debug, Clone)] +struct MutualKnowledge { + /// Knowledge the remote peer has about the candidate, as far as we're aware. + /// `Some` only if they have advertised, acknowledged, or requested the candidate. + remote_knowledge: Option, + /// Knowledge we have indicated to the remote peer about the candidate. + /// `Some` only if we have advertised, acknowledged, or requested the candidate + /// from them. + local_knowledge: Option, +} + +// A utility struct for keeping track of metadata about candidates +// we have confirmed as having been backed. +#[derive(Debug, Clone)] +struct KnownBackedCandidate { + group_index: GroupIndex, + local_knowledge: StatementFilter, + mutual_knowledge: HashMap, +} + +impl KnownBackedCandidate { + fn has_received_manifest_from(&self, validator: ValidatorIndex) -> bool { + self.mutual_knowledge + .get(&validator) + .map_or(false, |k| k.remote_knowledge.is_some()) + } + + fn has_sent_manifest_to(&self, validator: ValidatorIndex) -> bool { + self.mutual_knowledge + .get(&validator) + .map_or(false, |k| k.local_knowledge.is_some()) + } + + fn manifest_sent_to(&mut self, validator: ValidatorIndex, local_knowledge: StatementFilter) { + let k = self + .mutual_knowledge + .entry(validator) + .or_insert_with(|| MutualKnowledge { remote_knowledge: None, local_knowledge: None }); + + k.local_knowledge = Some(local_knowledge); + } + + fn manifest_received_from( + &mut self, + validator: ValidatorIndex, + remote_knowledge: StatementFilter, + ) { + let k = self + .mutual_knowledge + .entry(validator) + .or_insert_with(|| MutualKnowledge { remote_knowledge: None, local_knowledge: None }); + + k.remote_knowledge = Some(remote_knowledge); + } + + fn direct_statement_senders( + &self, + group_index: GroupIndex, + originator_index_in_group: usize, + statement_kind: StatementKind, + ) -> Vec { + if group_index != self.group_index { + return Vec::new() + } + + self.mutual_knowledge + .iter() + .filter(|(_, k)| k.remote_knowledge.is_some()) + .filter(|(_, k)| { + k.local_knowledge + .as_ref() + .map_or(false, |r| !r.contains(originator_index_in_group, statement_kind)) + }) + .map(|(v, _)| *v) + .collect() + } + + fn direct_statement_recipients( + &self, + group_index: GroupIndex, + originator_index_in_group: usize, + statement_kind: StatementKind, + ) -> Vec { + if group_index != self.group_index { + return Vec::new() + } + + self.mutual_knowledge + .iter() + .filter(|(_, k)| k.local_knowledge.is_some()) + .filter(|(_, k)| { + k.remote_knowledge + .as_ref() + .map_or(false, |r| !r.contains(originator_index_in_group, statement_kind)) + }) + .map(|(v, _)| *v) + .collect() + } + + fn note_fresh_statement( + &mut self, + statement_index_in_group: usize, + statement_kind: StatementKind, + ) -> bool { + let really_fresh = !self.local_knowledge.contains(statement_index_in_group, statement_kind); + self.local_knowledge.set(statement_index_in_group, statement_kind); + + really_fresh + } + + fn sent_or_received_direct_statement( + &mut self, + validator: ValidatorIndex, + statement_index_in_group: usize, + statement_kind: StatementKind, + ) { + if let Some(k) = self.mutual_knowledge.get_mut(&validator) { + if let (Some(r), Some(l)) = (k.remote_knowledge.as_mut(), k.local_knowledge.as_mut()) { + r.set(statement_index_in_group, statement_kind); + l.set(statement_index_in_group, statement_kind); + } + } + } + + fn is_pending_statement( + &self, + validator: ValidatorIndex, + statement_index_in_group: usize, + statement_kind: StatementKind, + ) -> bool { + // existence of both remote & local knowledge indicate we have exchanged + // manifests. + // then, everything that is not in the remote knowledge is pending + self.mutual_knowledge + .get(&validator) + .filter(|k| k.local_knowledge.is_some()) + .and_then(|k| k.remote_knowledge.as_ref()) + .map(|k| !k.contains(statement_index_in_group, statement_kind)) + .unwrap_or(false) + } + + fn pending_statements(&self, validator: ValidatorIndex) -> Option { + // existence of both remote & local knowledge indicate we have exchanged + // manifests. + // then, everything that is not in the remote knowledge is pending, and we + // further limit this by what is in the local knowledge itself. we use the + // full local knowledge, as the local knowledge stored here may be outdated. + let full_local = &self.local_knowledge; + + self.mutual_knowledge + .get(&validator) + .filter(|k| k.local_knowledge.is_some()) + .and_then(|k| k.remote_knowledge.as_ref()) + .map(|remote| StatementFilter { + seconded_in_group: full_local.seconded_in_group.clone() & + !remote.seconded_in_group.clone(), + validated_in_group: full_local.validated_in_group.clone() & + !remote.validated_in_group.clone(), + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use assert_matches::assert_matches; + use polkadot_node_network_protocol::grid_topology::TopologyPeerInfo; + use sp_authority_discovery::AuthorityPair as AuthorityDiscoveryPair; + use sp_core::crypto::Pair as PairT; + + fn dummy_groups(group_size: usize) -> Groups { + let groups = vec![(0..(group_size as u32)).map(ValidatorIndex).collect()].into(); + + Groups::new(groups) + } + + #[test] + fn topology_empty_for_no_index() { + let base_topology = SessionGridTopology::new( + vec![0, 1, 2], + vec![ + TopologyPeerInfo { + peer_ids: Vec::new(), + validator_index: ValidatorIndex(0), + discovery_id: AuthorityDiscoveryPair::generate().0.public(), + }, + TopologyPeerInfo { + peer_ids: Vec::new(), + validator_index: ValidatorIndex(1), + discovery_id: AuthorityDiscoveryPair::generate().0.public(), + }, + TopologyPeerInfo { + peer_ids: Vec::new(), + validator_index: ValidatorIndex(2), + discovery_id: AuthorityDiscoveryPair::generate().0.public(), + }, + ], + ); + + let t = build_session_topology( + &[vec![ValidatorIndex(0)], vec![ValidatorIndex(1)], vec![ValidatorIndex(2)]], + &base_topology, + None, + ); + + assert!(t.group_views.is_empty()); + } + + #[test] + fn topology_setup() { + let base_topology = SessionGridTopology::new( + (0..9).collect(), + (0..9) + .map(|i| TopologyPeerInfo { + peer_ids: Vec::new(), + validator_index: ValidatorIndex(i), + discovery_id: AuthorityDiscoveryPair::generate().0.public(), + }) + .collect(), + ); + + let t = build_session_topology( + &[ + vec![ValidatorIndex(0), ValidatorIndex(3), ValidatorIndex(6)], + vec![ValidatorIndex(4), ValidatorIndex(2), ValidatorIndex(7)], + vec![ValidatorIndex(8), ValidatorIndex(5), ValidatorIndex(1)], + ], + &base_topology, + Some(ValidatorIndex(0)), + ); + + assert_eq!(t.group_views.len(), 3); + + // 0 1 2 + // 3 4 5 + // 6 7 8 + + // our group: we send to all row/column neighbors which are not in our + // group and receive nothing. + assert_eq!( + t.group_views.get(&GroupIndex(0)).unwrap().sending, + vec![1, 2].into_iter().map(ValidatorIndex).collect::>(), + ); + assert_eq!(t.group_views.get(&GroupIndex(0)).unwrap().receiving, HashSet::new(),); + + // we share a row with '2' and have indirect connections to '4' and '7'. + + assert_eq!( + t.group_views.get(&GroupIndex(1)).unwrap().sending, + vec![3, 6].into_iter().map(ValidatorIndex).collect::>(), + ); + assert_eq!( + t.group_views.get(&GroupIndex(1)).unwrap().receiving, + vec![1, 2, 3, 6].into_iter().map(ValidatorIndex).collect::>(), + ); + + // we share a row with '1' and have indirect connections to '5' and '8'. + + assert_eq!( + t.group_views.get(&GroupIndex(2)).unwrap().sending, + vec![3, 6].into_iter().map(ValidatorIndex).collect::>(), + ); + assert_eq!( + t.group_views.get(&GroupIndex(2)).unwrap().receiving, + vec![1, 2, 3, 6].into_iter().map(ValidatorIndex).collect::>(), + ); + } + + #[test] + fn knowledge_rejects_conflicting_manifest() { + let mut knowledge = ReceivedManifests::default(); + + let expected_manifest_summary = ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(2), + claimed_group_index: GroupIndex(0), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + }, + }; + + knowledge + .import_received( + 3, + 2, + CandidateHash(Hash::repeat_byte(1)), + expected_manifest_summary.clone(), + ) + .unwrap(); + + // conflicting group + + let mut s = expected_manifest_summary.clone(); + s.claimed_group_index = GroupIndex(1); + assert_matches!( + knowledge.import_received(3, 2, CandidateHash(Hash::repeat_byte(1)), s,), + Err(ManifestImportError::Conflicting) + ); + + // conflicting parent hash + + let mut s = expected_manifest_summary.clone(); + s.claimed_parent_hash = Hash::repeat_byte(3); + assert_matches!( + knowledge.import_received(3, 2, CandidateHash(Hash::repeat_byte(1)), s,), + Err(ManifestImportError::Conflicting) + ); + + // conflicting seconded statements bitfield + + let mut s = expected_manifest_summary.clone(); + s.statement_knowledge.seconded_in_group = bitvec::bitvec![u8, Lsb0; 0, 1, 0]; + assert_matches!( + knowledge.import_received(3, 2, CandidateHash(Hash::repeat_byte(1)), s,), + Err(ManifestImportError::Conflicting) + ); + + // conflicting valid statements bitfield + + let mut s = expected_manifest_summary.clone(); + s.statement_knowledge.validated_in_group = bitvec::bitvec![u8, Lsb0; 0, 1, 0]; + assert_matches!( + knowledge.import_received(3, 2, CandidateHash(Hash::repeat_byte(1)), s,), + Err(ManifestImportError::Conflicting) + ); + } + + // Make sure we don't import manifests that would put a validator in a group over the limit of + // candidates they are allowed to second (aka seconding limit). + #[test] + fn reject_overflowing_manifests() { + let mut knowledge = ReceivedManifests::default(); + knowledge + .import_received( + 3, + 2, + CandidateHash(Hash::repeat_byte(1)), + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0xA), + claimed_group_index: GroupIndex(0), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + }, + }, + ) + .unwrap(); + + knowledge + .import_received( + 3, + 2, + CandidateHash(Hash::repeat_byte(2)), + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0xB), + claimed_group_index: GroupIndex(0), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + }, + }, + ) + .unwrap(); + + // Reject a seconding validator that is already at the seconding limit. Seconding counts for + // the validators should not be applied. + assert_matches!( + knowledge.import_received( + 3, + 2, + CandidateHash(Hash::repeat_byte(3)), + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0xC), + claimed_group_index: GroupIndex(0), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + } + }, + ), + Err(ManifestImportError::Overflow) + ); + + // Don't reject validators that have seconded less than the limit so far. + knowledge + .import_received( + 3, + 2, + CandidateHash(Hash::repeat_byte(3)), + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0xC), + claimed_group_index: GroupIndex(0), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + }, + }, + ) + .unwrap(); + } + + #[test] + fn reject_disallowed_manifest() { + let mut tracker = GridTracker::default(); + let session_topology = SessionTopologyView { + group_views: vec![( + GroupIndex(0), + GroupSubView { + sending: HashSet::new(), + receiving: vec![ValidatorIndex(0)].into_iter().collect(), + }, + )] + .into_iter() + .collect(), + }; + + let groups = dummy_groups(3); + + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + + assert_eq!(groups.get_size_and_backing_threshold(GroupIndex(0)), Some((3, 2)),); + + // Known group, disallowed receiving validator. + + assert_matches!( + tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: GroupIndex(0), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + } + }, + ManifestKind::Full, + ValidatorIndex(1), + ), + Err(ManifestImportError::Disallowed) + ); + + // Unknown group + + assert_matches!( + tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: GroupIndex(1), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + } + }, + ManifestKind::Full, + ValidatorIndex(0), + ), + Err(ManifestImportError::Disallowed) + ); + } + + #[test] + fn reject_malformed_wrong_group_size() { + let mut tracker = GridTracker::default(); + let session_topology = SessionTopologyView { + group_views: vec![( + GroupIndex(0), + GroupSubView { + sending: HashSet::new(), + receiving: vec![ValidatorIndex(0)].into_iter().collect(), + }, + )] + .into_iter() + .collect(), + }; + + let groups = dummy_groups(3); + + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + + assert_eq!(groups.get_size_and_backing_threshold(GroupIndex(0)), Some((3, 2)),); + + assert_matches!( + tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: GroupIndex(0), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + } + }, + ManifestKind::Full, + ValidatorIndex(0), + ), + Err(ManifestImportError::Malformed) + ); + + assert_matches!( + tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: GroupIndex(0), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1, 0], + } + }, + ManifestKind::Full, + ValidatorIndex(0), + ), + Err(ManifestImportError::Malformed) + ); + } + + #[test] + fn reject_malformed_no_seconders() { + let mut tracker = GridTracker::default(); + let session_topology = SessionTopologyView { + group_views: vec![( + GroupIndex(0), + GroupSubView { + sending: HashSet::new(), + receiving: vec![ValidatorIndex(0)].into_iter().collect(), + }, + )] + .into_iter() + .collect(), + }; + + let groups = dummy_groups(3); + + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + + assert_eq!(groups.get_size_and_backing_threshold(GroupIndex(0)), Some((3, 2)),); + + assert_matches!( + tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: GroupIndex(0), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], + } + }, + ManifestKind::Full, + ValidatorIndex(0), + ), + Err(ManifestImportError::Malformed) + ); + } + + #[test] + fn reject_insufficient_below_threshold() { + let mut tracker = GridTracker::default(); + let session_topology = SessionTopologyView { + group_views: vec![( + GroupIndex(0), + GroupSubView { + sending: HashSet::new(), + receiving: HashSet::from([ValidatorIndex(0)]), + }, + )] + .into_iter() + .collect(), + }; + + let groups = dummy_groups(3); + + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + + assert_eq!(groups.get_size_and_backing_threshold(GroupIndex(0)), Some((3, 2)),); + + // only one vote + + assert_matches!( + tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: GroupIndex(0), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + } + }, + ManifestKind::Full, + ValidatorIndex(0), + ), + Err(ManifestImportError::Insufficient) + ); + + // seconding + validating still not enough to reach '2' threshold + + assert_matches!( + tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: GroupIndex(0), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 1], + } + }, + ManifestKind::Full, + ValidatorIndex(0), + ), + Err(ManifestImportError::Insufficient) + ); + + // finally good. + + assert_matches!( + tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: GroupIndex(0), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + } + }, + ManifestKind::Full, + ValidatorIndex(0), + ), + Ok(false) + ); + } + + // Test that when we add a candidate as backed and advertise it to the sending group, they can + // provide an acknowledgement manifest in response. + #[test] + fn senders_can_provide_manifests_in_acknowledgement() { + let validator_index = ValidatorIndex(0); + + let mut tracker = GridTracker::default(); + let session_topology = SessionTopologyView { + group_views: vec![( + GroupIndex(0), + GroupSubView { + sending: HashSet::from([validator_index]), + receiving: HashSet::from([ValidatorIndex(1)]), + }, + )] + .into_iter() + .collect(), + }; + + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + let group_index = GroupIndex(0); + let group_size = 3; + let local_knowledge = StatementFilter::blank(group_size); + + let groups = dummy_groups(group_size); + + // Add the candidate as backed. + let receivers = tracker.add_backed_candidate( + &session_topology, + candidate_hash, + group_index, + local_knowledge.clone(), + ); + // Validator 0 is in the sending group. Advertise onward to it. + // + // Validator 1 is in the receiving group, but we have not received from it, so we're not + // expected to send it an acknowledgement. + assert_eq!(receivers, vec![(validator_index, ManifestKind::Full)]); + + // Note the manifest as 'sent' to validator 0. + tracker.manifest_sent_to(&groups, validator_index, candidate_hash, local_knowledge); + + // Import manifest of kind `Acknowledgement` from validator 0. + let ack = tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: group_index, + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + }, + }, + ManifestKind::Acknowledgement, + validator_index, + ); + assert_matches!(ack, Ok(false)); + } + + // Check that pending communication is set correctly when receiving a manifest on a confirmed candidate. + // + // It should also overwrite any existing `Full` ManifestKind. + #[test] + fn pending_communication_receiving_manifest_on_confirmed_candidate() { + let validator_index = ValidatorIndex(0); + + let mut tracker = GridTracker::default(); + let session_topology = SessionTopologyView { + group_views: vec![( + GroupIndex(0), + GroupSubView { + sending: HashSet::from([validator_index]), + receiving: HashSet::from([ValidatorIndex(1)]), + }, + )] + .into_iter() + .collect(), + }; + + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + let group_index = GroupIndex(0); + let group_size = 3; + let local_knowledge = StatementFilter::blank(group_size); + + let groups = dummy_groups(group_size); + + // Manifest should not be pending yet. + let pending_manifest = tracker.is_manifest_pending_for(validator_index, &candidate_hash); + assert_eq!(pending_manifest, None); + + // Add the candidate as backed. + tracker.add_backed_candidate( + &session_topology, + candidate_hash, + group_index, + local_knowledge.clone(), + ); + + // Manifest should be pending as `Full`. + let pending_manifest = tracker.is_manifest_pending_for(validator_index, &candidate_hash); + assert_eq!(pending_manifest, Some(ManifestKind::Full)); + + // Note the manifest as 'sent' to validator 0. + tracker.manifest_sent_to(&groups, validator_index, candidate_hash, local_knowledge); + + // Import manifest. + // + // Should overwrite existing `Full` manifest. + let ack = tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: group_index, + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + }, + }, + ManifestKind::Acknowledgement, + validator_index, + ); + assert_matches!(ack, Ok(false)); + + let pending_manifest = tracker.is_manifest_pending_for(validator_index, &candidate_hash); + assert_eq!(pending_manifest, None); + } + + // Check that pending communication is cleared correctly in `manifest_sent_to` + // + // Also test a scenario where manifest import returns `Ok(true)` (should acknowledge). + #[test] + fn pending_communication_is_cleared() { + let validator_index = ValidatorIndex(0); + + let mut tracker = GridTracker::default(); + let session_topology = SessionTopologyView { + group_views: vec![( + GroupIndex(0), + GroupSubView { + sending: HashSet::new(), + receiving: HashSet::from([validator_index]), + }, + )] + .into_iter() + .collect(), + }; + + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + let group_index = GroupIndex(0); + let group_size = 3; + let local_knowledge = StatementFilter::blank(group_size); + + let groups = dummy_groups(group_size); + + // Add the candidate as backed. + tracker.add_backed_candidate( + &session_topology, + candidate_hash, + group_index, + local_knowledge.clone(), + ); + + // Manifest should not be pending yet. + let pending_manifest = tracker.is_manifest_pending_for(validator_index, &candidate_hash); + assert_eq!(pending_manifest, None); + + // Import manifest. The candidate is confirmed backed and we are expected to receive from + // validator 0, so send it an acknowledgement. + let ack = tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: group_index, + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + }, + }, + ManifestKind::Full, + validator_index, + ); + assert_matches!(ack, Ok(true)); + + // Acknowledgement manifest should be pending. + let pending_manifest = tracker.is_manifest_pending_for(validator_index, &candidate_hash); + assert_eq!(pending_manifest, Some(ManifestKind::Acknowledgement)); + + // Note the candidate as advertised. + tracker.manifest_sent_to(&groups, validator_index, candidate_hash, local_knowledge); + + // Pending manifest should be cleared. + let pending_manifest = tracker.is_manifest_pending_for(validator_index, &candidate_hash); + assert_eq!(pending_manifest, None); + } + + /// A manifest exchange means that both `manifest_sent_to` and `manifest_received_from` have + /// been invoked. + /// + /// In practice, it means that one of three things have happened: + /// + /// - They announced, we acknowledged + /// + /// - We announced, they acknowledged + /// + /// - We announced, they announced (not sure if this can actually happen; it would happen if 2 + /// nodes had each other in their sending set and they sent manifests at the same time. The + /// code accounts for this anyway) + #[test] + fn pending_statements_are_updated_after_manifest_exchange() { + let send_to = ValidatorIndex(0); + let receive_from = ValidatorIndex(1); + + let mut tracker = GridTracker::default(); + let session_topology = SessionTopologyView { + group_views: vec![( + GroupIndex(0), + GroupSubView { + sending: HashSet::from([send_to]), + receiving: HashSet::from([receive_from]), + }, + )] + .into_iter() + .collect(), + }; + + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + let group_index = GroupIndex(0); + let group_size = 3; + let local_knowledge = StatementFilter::blank(group_size); + + let groups = dummy_groups(group_size); + + // Confirm the candidate. + let receivers = tracker.add_backed_candidate( + &session_topology, + candidate_hash, + group_index, + local_knowledge.clone(), + ); + assert_eq!(receivers, vec![(send_to, ManifestKind::Full)]); + + // Learn a statement from a different validator. + tracker.learned_fresh_statement( + &groups, + &session_topology, + ValidatorIndex(2), + &CompactStatement::Seconded(candidate_hash), + ); + + // Test receiving followed by sending an ack. + { + // Should start with no pending statements. + assert_eq!(tracker.pending_statements_for(receive_from, candidate_hash), None); + assert_eq!(tracker.all_pending_statements_for(receive_from), vec![]); + let ack = tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: group_index, + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + }, + }, + ManifestKind::Full, + receive_from, + ); + assert_matches!(ack, Ok(true)); + + // Send ack now. + tracker.manifest_sent_to( + &groups, + receive_from, + candidate_hash, + local_knowledge.clone(), + ); + + // There should be pending statements now. + assert_eq!( + tracker.pending_statements_for(receive_from, candidate_hash), + Some(StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }) + ); + assert_eq!( + tracker.all_pending_statements_for(receive_from), + vec![(ValidatorIndex(2), CompactStatement::Seconded(candidate_hash))] + ); + } + + // Test sending followed by receiving an ack. + { + // Should start with no pending statements. + assert_eq!(tracker.pending_statements_for(send_to, candidate_hash), None); + assert_eq!(tracker.all_pending_statements_for(send_to), vec![]); + + tracker.manifest_sent_to(&groups, send_to, candidate_hash, local_knowledge.clone()); + let ack = tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: group_index, + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 1], + }, + }, + ManifestKind::Acknowledgement, + send_to, + ); + assert_matches!(ack, Ok(false)); + + // There should be pending statements now. + assert_eq!( + tracker.pending_statements_for(send_to, candidate_hash), + Some(StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }) + ); + assert_eq!( + tracker.all_pending_statements_for(send_to), + vec![(ValidatorIndex(2), CompactStatement::Seconded(candidate_hash))] + ); + } + } + + #[test] + fn invalid_fresh_statement_import() { + let validator_index = ValidatorIndex(0); + + let mut tracker = GridTracker::default(); + let session_topology = SessionTopologyView { + group_views: vec![( + GroupIndex(0), + GroupSubView { + sending: HashSet::new(), + receiving: HashSet::from([validator_index]), + }, + )] + .into_iter() + .collect(), + }; + + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + let group_index = GroupIndex(0); + let group_size = 3; + let local_knowledge = StatementFilter::blank(group_size); + + let groups = dummy_groups(group_size); + + // Should start with no pending statements. + assert_eq!(tracker.pending_statements_for(validator_index, candidate_hash), None); + assert_eq!(tracker.all_pending_statements_for(validator_index), vec![]); + + // Try to import fresh statement. Candidate not backed. + let statement = CompactStatement::Seconded(candidate_hash); + tracker.learned_fresh_statement(&groups, &session_topology, validator_index, &statement); + + assert_eq!(tracker.pending_statements_for(validator_index, candidate_hash), None); + assert_eq!(tracker.all_pending_statements_for(validator_index), vec![]); + + // Add the candidate as backed. + tracker.add_backed_candidate( + &session_topology, + candidate_hash, + group_index, + local_knowledge.clone(), + ); + + // Try to import fresh statement. Unknown group for validator index. + let statement = CompactStatement::Seconded(candidate_hash); + tracker.learned_fresh_statement(&groups, &session_topology, ValidatorIndex(1), &statement); + + assert_eq!(tracker.pending_statements_for(validator_index, candidate_hash), None); + assert_eq!(tracker.all_pending_statements_for(validator_index), vec![]); + } + + #[test] + fn pending_statements_updated_when_importing_fresh_statement() { + let validator_index = ValidatorIndex(0); + + let mut tracker = GridTracker::default(); + let session_topology = SessionTopologyView { + group_views: vec![( + GroupIndex(0), + GroupSubView { + sending: HashSet::new(), + receiving: HashSet::from([validator_index]), + }, + )] + .into_iter() + .collect(), + }; + + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + let group_index = GroupIndex(0); + let group_size = 3; + let local_knowledge = StatementFilter::blank(group_size); + + let groups = dummy_groups(group_size); + + // Should start with no pending statements. + assert_eq!(tracker.pending_statements_for(validator_index, candidate_hash), None); + assert_eq!(tracker.all_pending_statements_for(validator_index), vec![]); + + // Add the candidate as backed. + tracker.add_backed_candidate( + &session_topology, + candidate_hash, + group_index, + local_knowledge.clone(), + ); + + // Import fresh statement. + + let ack = tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: group_index, + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + }, + }, + ManifestKind::Full, + validator_index, + ); + assert_matches!(ack, Ok(true)); + tracker.manifest_sent_to(&groups, validator_index, candidate_hash, local_knowledge); + let statement = CompactStatement::Seconded(candidate_hash); + tracker.learned_fresh_statement(&groups, &session_topology, validator_index, &statement); + + // There should be pending statements now. + let statements = StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }; + assert_eq!( + tracker.pending_statements_for(validator_index, candidate_hash), + Some(statements.clone()) + ); + assert_eq!( + tracker.all_pending_statements_for(validator_index), + vec![(ValidatorIndex(0), CompactStatement::Seconded(candidate_hash))] + ); + + // After successful import, try importing again. Nothing should change. + + tracker.learned_fresh_statement(&groups, &session_topology, validator_index, &statement); + assert_eq!( + tracker.pending_statements_for(validator_index, candidate_hash), + Some(statements) + ); + assert_eq!( + tracker.all_pending_statements_for(validator_index), + vec![(ValidatorIndex(0), CompactStatement::Seconded(candidate_hash))] + ); + } + + // After learning fresh statements, we should not generate pending statements for knowledge that + // the validator already has. + #[test] + fn pending_statements_respect_remote_knowledge() { + let validator_index = ValidatorIndex(0); + + let mut tracker = GridTracker::default(); + let session_topology = SessionTopologyView { + group_views: vec![( + GroupIndex(0), + GroupSubView { + sending: HashSet::new(), + receiving: HashSet::from([validator_index]), + }, + )] + .into_iter() + .collect(), + }; + + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + let group_index = GroupIndex(0); + let group_size = 3; + let local_knowledge = StatementFilter::blank(group_size); + + let groups = dummy_groups(group_size); + + // Should start with no pending statements. + assert_eq!(tracker.pending_statements_for(validator_index, candidate_hash), None); + assert_eq!(tracker.all_pending_statements_for(validator_index), vec![]); + + // Add the candidate as backed. + tracker.add_backed_candidate( + &session_topology, + candidate_hash, + group_index, + local_knowledge.clone(), + ); + + // Import fresh statement. + let ack = tracker.import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: group_index, + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + }, + ManifestKind::Full, + validator_index, + ); + assert_matches!(ack, Ok(true)); + tracker.manifest_sent_to(&groups, validator_index, candidate_hash, local_knowledge); + tracker.learned_fresh_statement( + &groups, + &session_topology, + validator_index, + &CompactStatement::Seconded(candidate_hash), + ); + tracker.learned_fresh_statement( + &groups, + &session_topology, + validator_index, + &CompactStatement::Valid(candidate_hash), + ); + + // The pending statements should respect the remote knowledge (meaning the Seconded + // statement is ignored, but not the Valid statement). + let statements = StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 0], + }; + assert_eq!( + tracker.pending_statements_for(validator_index, candidate_hash), + Some(statements.clone()) + ); + assert_eq!( + tracker.all_pending_statements_for(validator_index), + vec![(ValidatorIndex(0), CompactStatement::Valid(candidate_hash))] + ); + } + + #[test] + fn pending_statements_cleared_when_sending() { + let validator_index = ValidatorIndex(0); + let counterparty = ValidatorIndex(1); + + let mut tracker = GridTracker::default(); + let session_topology = SessionTopologyView { + group_views: vec![( + GroupIndex(0), + GroupSubView { + sending: HashSet::new(), + receiving: HashSet::from([validator_index, counterparty]), + }, + )] + .into_iter() + .collect(), + }; + + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + let group_index = GroupIndex(0); + let group_size = 3; + let local_knowledge = StatementFilter::blank(group_size); + + let groups = dummy_groups(group_size); + + // Should start with no pending statements. + assert_eq!(tracker.pending_statements_for(validator_index, candidate_hash), None); + assert_eq!(tracker.all_pending_statements_for(validator_index), vec![]); + + // Add the candidate as backed. + tracker.add_backed_candidate( + &session_topology, + candidate_hash, + group_index, + local_knowledge.clone(), + ); + + // Import statement for originator. + tracker + .import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: group_index, + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + }, + }, + ManifestKind::Full, + validator_index, + ) + .ok() + .unwrap(); + tracker.manifest_sent_to(&groups, validator_index, candidate_hash, local_knowledge.clone()); + let statement = CompactStatement::Seconded(candidate_hash); + tracker.learned_fresh_statement(&groups, &session_topology, validator_index, &statement); + + // Import statement for counterparty. + tracker + .import_manifest( + &session_topology, + &groups, + candidate_hash, + 3, + ManifestSummary { + claimed_parent_hash: Hash::repeat_byte(0), + claimed_group_index: group_index, + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + }, + }, + ManifestKind::Full, + counterparty, + ) + .ok() + .unwrap(); + tracker.manifest_sent_to(&groups, counterparty, candidate_hash, local_knowledge); + let statement = CompactStatement::Seconded(candidate_hash); + tracker.learned_fresh_statement(&groups, &session_topology, counterparty, &statement); + + // There should be pending statements now. + let statements = StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }; + assert_eq!( + tracker.pending_statements_for(validator_index, candidate_hash), + Some(statements.clone()) + ); + assert_eq!( + tracker.all_pending_statements_for(validator_index), + vec![(ValidatorIndex(0), CompactStatement::Seconded(candidate_hash))] + ); + assert_eq!( + tracker.pending_statements_for(counterparty, candidate_hash), + Some(statements.clone()) + ); + assert_eq!( + tracker.all_pending_statements_for(counterparty), + vec![(ValidatorIndex(0), CompactStatement::Seconded(candidate_hash))] + ); + + tracker.learned_fresh_statement(&groups, &session_topology, validator_index, &statement); + tracker.sent_or_received_direct_statement( + &groups, + validator_index, + counterparty, + &statement, + ); + + // There should be no pending statements now (for the counterparty). + assert_eq!( + tracker.pending_statements_for(counterparty, candidate_hash), + Some(StatementFilter::blank(group_size)) + ); + assert_eq!(tracker.all_pending_statements_for(counterparty), vec![]); + } +} diff --git a/node/network/statement-distribution/src/vstaging/groups.rs b/node/network/statement-distribution/src/vstaging/groups.rs new file mode 100644 index 000000000000..86321b30f220 --- /dev/null +++ b/node/network/statement-distribution/src/vstaging/groups.rs @@ -0,0 +1,70 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A utility for tracking groups and their members within a session. + +use polkadot_node_primitives::minimum_votes; +use polkadot_primitives::vstaging::{GroupIndex, IndexedVec, ValidatorIndex}; + +use std::collections::HashMap; + +/// Validator groups within a session, plus some helpful indexing for +/// looking up groups by validator indices or authority discovery ID. +#[derive(Debug, Clone)] +pub struct Groups { + groups: IndexedVec>, + by_validator_index: HashMap, +} + +impl Groups { + /// Create a new [`Groups`] tracker with the groups and discovery keys + /// from the session. + pub fn new(groups: IndexedVec>) -> Self { + let mut by_validator_index = HashMap::new(); + + for (i, group) in groups.iter().enumerate() { + let index = GroupIndex(i as _); + for v in group { + by_validator_index.insert(*v, index); + } + } + + Groups { groups, by_validator_index } + } + + /// Access all the underlying groups. + pub fn all(&self) -> &IndexedVec> { + &self.groups + } + + /// Get the underlying group validators by group index. + pub fn get(&self, group_index: GroupIndex) -> Option<&[ValidatorIndex]> { + self.groups.get(group_index).map(|x| &x[..]) + } + + /// Get the backing group size and backing threshold. + pub fn get_size_and_backing_threshold( + &self, + group_index: GroupIndex, + ) -> Option<(usize, usize)> { + self.get(group_index).map(|g| (g.len(), minimum_votes(g.len()))) + } + + /// Get the group index for a validator by index. + pub fn by_validator_index(&self, validator_index: ValidatorIndex) -> Option { + self.by_validator_index.get(&validator_index).map(|x| *x) + } +} diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs new file mode 100644 index 000000000000..a562668627a1 --- /dev/null +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -0,0 +1,2658 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Implementation of the v2 statement distribution protocol, +//! designed for asynchronous backing. + +use polkadot_node_network_protocol::{ + self as net_protocol, + grid_topology::SessionGridTopology, + peer_set::ValidationVersion, + request_response::{ + incoming::OutgoingResponse, + vstaging::{AttestedCandidateRequest, AttestedCandidateResponse}, + IncomingRequest, IncomingRequestReceiver, Requests, + MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS, + }, + vstaging::{self as protocol_vstaging, StatementFilter}, + IfDisconnected, PeerId, UnifiedReputationChange as Rep, Versioned, View, +}; +use polkadot_node_primitives::{ + SignedFullStatementWithPVD, StatementWithPVD as FullStatementWithPVD, +}; +use polkadot_node_subsystem::{ + messages::{ + CandidateBackingMessage, HypotheticalCandidate, HypotheticalFrontierRequest, + NetworkBridgeEvent, NetworkBridgeTxMessage, ProspectiveParachainsMessage, + }, + overseer, ActivatedLeaf, +}; +use polkadot_node_subsystem_util::{ + backing_implicit_view::View as ImplicitView, runtime::ProspectiveParachainsMode, +}; +use polkadot_primitives::vstaging::{ + AuthorityDiscoveryId, CandidateHash, CompactStatement, CoreIndex, CoreState, GroupIndex, + GroupRotationInfo, Hash, Id as ParaId, IndexedVec, SessionIndex, SessionInfo, SignedStatement, + SigningContext, UncheckedSignedStatement, ValidatorId, ValidatorIndex, +}; + +use sp_keystore::SyncCryptoStorePtr; + +use fatality::Nested; +use futures::{ + channel::{mpsc, oneshot}, + stream::FuturesUnordered, + SinkExt, StreamExt, +}; + +use std::collections::{ + hash_map::{Entry, HashMap}, + HashSet, +}; + +use crate::{ + error::{JfyiError, JfyiErrorResult}, + LOG_TARGET, +}; +use candidates::{BadAdvertisement, Candidates, PostConfirmation}; +use cluster::{Accept as ClusterAccept, ClusterTracker, RejectIncoming as ClusterRejectIncoming}; +use grid::GridTracker; +use groups::Groups; +use requests::{CandidateIdentifier, RequestProperties}; +use statement_store::{StatementOrigin, StatementStore}; + +pub use requests::{RequestManager, UnhandledResponse}; + +mod candidates; +mod cluster; +mod grid; +mod groups; +mod requests; +mod statement_store; + +#[cfg(test)] +mod tests; + +const COST_UNEXPECTED_STATEMENT: Rep = Rep::CostMinor("Unexpected Statement"); +const COST_UNEXPECTED_STATEMENT_MISSING_KNOWLEDGE: Rep = + Rep::CostMinor("Unexpected Statement, missing knowledge for relay parent"); +const COST_EXCESSIVE_SECONDED: Rep = Rep::CostMinor("Sent Excessive `Seconded` Statements"); + +const COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE: Rep = + Rep::CostMinor("Unexpected Manifest, missing knowlege for relay parent"); +const COST_UNEXPECTED_MANIFEST_DISALLOWED: Rep = + Rep::CostMinor("Unexpected Manifest, Peer Disallowed"); +const COST_CONFLICTING_MANIFEST: Rep = Rep::CostMajor("Manifest conflicts with previous"); +const COST_INSUFFICIENT_MANIFEST: Rep = + Rep::CostMajor("Manifest statements insufficient to back candidate"); +const COST_MALFORMED_MANIFEST: Rep = Rep::CostMajor("Manifest is malformed"); +const COST_UNEXPECTED_ACKNOWLEDGEMENT_UNKNOWN_CANDIDATE: Rep = + Rep::CostMinor("Unexpected acknowledgement, unknown candidate"); + +const COST_INVALID_SIGNATURE: Rep = Rep::CostMajor("Invalid Statement Signature"); +const COST_IMPROPERLY_DECODED_RESPONSE: Rep = + Rep::CostMajor("Improperly Encoded Candidate Response"); +const COST_INVALID_RESPONSE: Rep = Rep::CostMajor("Invalid Candidate Response"); +const COST_UNREQUESTED_RESPONSE_STATEMENT: Rep = + Rep::CostMajor("Un-requested Statement In Response"); +const COST_INACCURATE_ADVERTISEMENT: Rep = + Rep::CostMajor("Peer advertised a candidate inaccurately"); + +const COST_INVALID_REQUEST: Rep = Rep::CostMajor("Peer sent unparsable request"); +const COST_INVALID_REQUEST_BITFIELD_SIZE: Rep = + Rep::CostMajor("Attested candidate request bitfields have wrong size"); +const COST_UNEXPECTED_REQUEST: Rep = Rep::CostMajor("Unexpected attested candidate request"); + +const BENEFIT_VALID_RESPONSE: Rep = Rep::BenefitMajor("Peer Answered Candidate Request"); +const BENEFIT_VALID_STATEMENT: Rep = Rep::BenefitMajor("Peer provided a valid statement"); +const BENEFIT_VALID_STATEMENT_FIRST: Rep = + Rep::BenefitMajorFirst("Peer was the first to provide a valid statement"); + +struct PerRelayParentState { + local_validator: Option, + statement_store: StatementStore, + availability_cores: Vec, + group_rotation_info: GroupRotationInfo, + seconding_limit: usize, + session: SessionIndex, +} + +// per-relay-parent local validator state. +struct LocalValidatorState { + // The index of the validator. + index: ValidatorIndex, + // our validator group + group: GroupIndex, + // the assignment of our validator group, if any. + assignment: Option, + // the 'direct-in-group' communication at this relay-parent. + cluster_tracker: ClusterTracker, + // the grid-level communication at this relay-parent. + grid_tracker: GridTracker, +} + +struct PerSessionState { + session_info: SessionInfo, + groups: Groups, + authority_lookup: HashMap, + // is only `None` in the time between seeing a session and + // getting the topology from the gossip-support subsystem + grid_view: Option, + local_validator: Option, +} + +impl PerSessionState { + async fn new(session_info: SessionInfo, keystore: &SyncCryptoStorePtr) -> Self { + let groups = Groups::new(session_info.validator_groups.clone()); + let mut authority_lookup = HashMap::new(); + for (i, ad) in session_info.discovery_keys.iter().cloned().enumerate() { + authority_lookup.insert(ad, ValidatorIndex(i as _)); + } + + let local_validator = polkadot_node_subsystem_util::signing_key_and_index( + session_info.validators.iter(), + keystore, + ) + .await; + + PerSessionState { + session_info, + groups, + authority_lookup, + grid_view: None, + local_validator: local_validator.map(|(_key, index)| index), + } + } + + fn supply_topology(&mut self, topology: &SessionGridTopology) { + let grid_view = grid::build_session_topology( + self.session_info.validator_groups.iter(), + topology, + self.local_validator, + ); + + self.grid_view = Some(grid_view); + } +} + +pub(crate) struct State { + /// The utility for managing the implicit and explicit views in a consistent way. + /// + /// We only feed leaves which have prospective parachains enabled to this view. + implicit_view: ImplicitView, + candidates: Candidates, + per_relay_parent: HashMap, + per_session: HashMap, + peers: HashMap, + keystore: SyncCryptoStorePtr, + authorities: HashMap, + request_manager: RequestManager, +} + +impl State { + /// Create a new state. + pub(crate) fn new(keystore: SyncCryptoStorePtr) -> Self { + State { + implicit_view: Default::default(), + candidates: Default::default(), + per_relay_parent: HashMap::new(), + per_session: HashMap::new(), + peers: HashMap::new(), + keystore, + authorities: HashMap::new(), + request_manager: RequestManager::new(), + } + } +} + +// For the provided validator index, if there is a connected peer controlling the given authority +// ID, then return that peer's `PeerId`. +fn connected_validator_peer( + authorities: &HashMap, + per_session: &PerSessionState, + validator_index: ValidatorIndex, +) -> Option { + per_session + .session_info + .discovery_keys + .get(validator_index.0 as usize) + .and_then(|k| authorities.get(k)) + .map(|p| *p) +} + +struct PeerState { + view: View, + implicit_view: HashSet, + discovery_ids: Option>, +} + +impl PeerState { + // Update the view, returning a vector of implicit relay-parents which weren't previously + // part of the view. + fn update_view(&mut self, new_view: View, local_implicit: &ImplicitView) -> Vec { + let next_implicit = new_view + .iter() + .flat_map(|x| local_implicit.known_allowed_relay_parents_under(x, None)) + .flatten() + .cloned() + .collect::>(); + + let fresh_implicit = next_implicit + .iter() + .filter(|x| !self.implicit_view.contains(x)) + .cloned() + .collect(); + + self.view = new_view; + self.implicit_view = next_implicit; + + fresh_implicit + } + + // Attempt to reconcile the view with new information about the implicit relay parents + // under an active leaf. + fn reconcile_active_leaf(&mut self, leaf_hash: Hash, implicit: &[Hash]) -> Vec { + if !self.view.contains(&leaf_hash) { + return Vec::new() + } + + let mut v = Vec::with_capacity(implicit.len()); + for i in implicit { + if self.implicit_view.insert(*i) { + v.push(*i); + } + } + v + } + + // Whether we know that a peer knows a relay-parent. + // The peer knows the relay-parent if it is either implicit or explicit + // in their view. However, if it is implicit via an active-leaf we don't + // recognize, we will not accurately be able to recognize them as 'knowing' + // the relay-parent. + fn knows_relay_parent(&self, relay_parent: &Hash) -> bool { + self.implicit_view.contains(relay_parent) || self.view.contains(relay_parent) + } + + fn is_authority(&self, authority_id: &AuthorityDiscoveryId) -> bool { + self.discovery_ids.as_ref().map_or(false, |x| x.contains(authority_id)) + } + + fn iter_known_discovery_ids(&self) -> impl Iterator { + self.discovery_ids.as_ref().into_iter().flatten() + } +} + +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +pub(crate) async fn handle_network_update( + ctx: &mut Context, + state: &mut State, + update: NetworkBridgeEvent, +) { + match update { + NetworkBridgeEvent::PeerConnected(peer_id, role, protocol_version, mut authority_ids) => { + gum::trace!(target: LOG_TARGET, ?peer_id, ?role, ?protocol_version, "Peer connected"); + + if protocol_version != ValidationVersion::VStaging.into() { + return + } + + if let Some(ref mut authority_ids) = authority_ids { + authority_ids.retain(|a| match state.authorities.entry(a.clone()) { + Entry::Vacant(e) => { + e.insert(peer_id); + true + }, + Entry::Occupied(e) => { + gum::trace!( + target: LOG_TARGET, + authority_id = ?a, + existing_peer = ?e.get(), + new_peer = ?peer_id, + "Ignoring new peer with duplicate authority ID as a bearer of that identity" + ); + + false + }, + }); + } + + state.peers.insert( + peer_id, + PeerState { + view: View::default(), + implicit_view: HashSet::new(), + discovery_ids: authority_ids, + }, + ); + }, + NetworkBridgeEvent::PeerDisconnected(peer_id) => { + if let Some(p) = state.peers.remove(&peer_id) { + for discovery_key in p.discovery_ids.into_iter().flatten() { + state.authorities.remove(&discovery_key); + } + } + }, + NetworkBridgeEvent::NewGossipTopology(topology) => { + let new_session_index = topology.session; + let new_topology = topology.topology; + + if let Some(per_session) = state.per_session.get_mut(&new_session_index) { + per_session.supply_topology(&new_topology); + } + + // TODO [https://github.com/paritytech/polkadot/issues/6194] + // technically, we should account for the fact that the session topology might + // come late, and for all relay-parents with this session, send all grid peers + // any `BackedCandidateInv` messages they might need. + // + // in practice, this is a small issue & the API of receiving topologies could + // be altered to fix it altogether. + }, + NetworkBridgeEvent::PeerMessage(peer_id, message) => match message { + net_protocol::StatementDistributionMessage::V1(_) => return, + net_protocol::StatementDistributionMessage::VStaging( + protocol_vstaging::StatementDistributionMessage::V1Compatibility(_), + ) => return, + net_protocol::StatementDistributionMessage::VStaging( + protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + ) => handle_incoming_statement(ctx, state, peer_id, relay_parent, statement).await, + net_protocol::StatementDistributionMessage::VStaging( + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(inner), + ) => handle_incoming_manifest(ctx, state, peer_id, inner).await, + net_protocol::StatementDistributionMessage::VStaging( + protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(inner), + ) => handle_incoming_acknowledgement(ctx, state, peer_id, inner).await, + }, + NetworkBridgeEvent::PeerViewChange(peer_id, view) => + handle_peer_view_update(ctx, state, peer_id, view).await, + NetworkBridgeEvent::OurViewChange(_view) => { + // handled by `handle_activated_leaf` + }, + } +} + +/// If there is a new leaf, this should only be called for leaves which support +/// prospective parachains. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +pub(crate) async fn handle_active_leaves_update( + ctx: &mut Context, + state: &mut State, + activated: &ActivatedLeaf, + leaf_mode: ProspectiveParachainsMode, +) -> JfyiErrorResult<()> { + let seconding_limit = match leaf_mode { + ProspectiveParachainsMode::Disabled => return Ok(()), + ProspectiveParachainsMode::Enabled { max_candidate_depth, .. } => max_candidate_depth + 1, + }; + + state + .implicit_view + .activate_leaf(ctx.sender(), activated.hash) + .await + .map_err(JfyiError::ActivateLeafFailure)?; + + let new_relay_parents = + state.implicit_view.all_allowed_relay_parents().cloned().collect::>(); + for new_relay_parent in new_relay_parents.iter().cloned() { + if state.per_relay_parent.contains_key(&new_relay_parent) { + continue + } + + // New leaf: fetch info from runtime API and initialize + // `per_relay_parent`. + + let session_index = polkadot_node_subsystem_util::request_session_index_for_child( + new_relay_parent, + ctx.sender(), + ) + .await + .await + .map_err(JfyiError::RuntimeApiUnavailable)? + .map_err(JfyiError::FetchSessionIndex)?; + + let availability_cores = polkadot_node_subsystem_util::request_availability_cores( + new_relay_parent, + ctx.sender(), + ) + .await + .await + .map_err(JfyiError::RuntimeApiUnavailable)? + .map_err(JfyiError::FetchAvailabilityCores)?; + + let group_rotation_info = + polkadot_node_subsystem_util::request_validator_groups(new_relay_parent, ctx.sender()) + .await + .await + .map_err(JfyiError::RuntimeApiUnavailable)? + .map_err(JfyiError::FetchValidatorGroups)? + .1; + + if !state.per_session.contains_key(&session_index) { + let session_info = polkadot_node_subsystem_util::request_session_info( + new_relay_parent, + session_index, + ctx.sender(), + ) + .await + .await + .map_err(JfyiError::RuntimeApiUnavailable)? + .map_err(JfyiError::FetchSessionInfo)?; + + let session_info = match session_info { + None => { + gum::warn!( + target: LOG_TARGET, + relay_parent = ?new_relay_parent, + "No session info available for current session" + ); + + continue + }, + Some(s) => s, + }; + + state + .per_session + .insert(session_index, PerSessionState::new(session_info, &state.keystore).await); + } + + let per_session = state + .per_session + .get(&session_index) + .expect("either existed or just inserted; qed"); + + let local_validator = per_session.local_validator.and_then(|v| { + find_local_validator_state( + v, + &per_session.groups, + &availability_cores, + &group_rotation_info, + seconding_limit, + ) + }); + + state.per_relay_parent.insert( + new_relay_parent, + PerRelayParentState { + local_validator, + statement_store: StatementStore::new(&per_session.groups), + availability_cores, + group_rotation_info, + seconding_limit, + session: session_index, + }, + ); + } + + // Reconcile all peers' views with the active leaf and any relay parents + // it implies. If they learned about the block before we did, this reconciliation will give non-empty + // results and we should send them messages concerning all activated relay-parents. + { + let mut update_peers = Vec::new(); + for (peer, peer_state) in state.peers.iter_mut() { + let fresh = peer_state.reconcile_active_leaf(activated.hash, &new_relay_parents); + if !fresh.is_empty() { + update_peers.push((*peer, fresh)); + } + } + + for (peer, fresh) in update_peers { + for fresh_relay_parent in fresh { + send_peer_messages_for_relay_parent(ctx, state, peer, fresh_relay_parent).await; + } + } + } + + new_leaf_fragment_tree_updates(ctx, state, activated.hash).await; + + Ok(()) +} + +fn find_local_validator_state( + validator_index: ValidatorIndex, + groups: &Groups, + availability_cores: &[CoreState], + group_rotation_info: &GroupRotationInfo, + seconding_limit: usize, +) -> Option { + if groups.all().is_empty() { + return None + } + + let our_group = groups.by_validator_index(validator_index)?; + + // note: this won't work well for parathreads because it only works + // when core assignments to paras are static throughout the session. + + let core = group_rotation_info.core_for_group(our_group, availability_cores.len()); + let para = availability_cores.get(core.0 as usize).and_then(|c| c.para_id()); + let group_validators = groups.get(our_group)?.to_owned(); + + Some(LocalValidatorState { + index: validator_index, + group: our_group, + assignment: para, + cluster_tracker: ClusterTracker::new(group_validators, seconding_limit) + .expect("group is non-empty because we are in it; qed"), + grid_tracker: GridTracker::default(), + }) +} + +pub(crate) fn handle_deactivate_leaves(state: &mut State, leaves: &[Hash]) { + // deactivate the leaf in the implicit view. + for leaf in leaves { + state.implicit_view.deactivate_leaf(*leaf); + } + + let relay_parents = state.implicit_view.all_allowed_relay_parents().collect::>(); + + // fast exit for no-op. + if relay_parents.len() == state.per_relay_parent.len() { + return + } + + // clean up per-relay-parent data based on everything removed. + state.per_relay_parent.retain(|r, _| relay_parents.contains(r)); + + // Clean up all requests + for leaf in leaves { + state.request_manager.remove_by_relay_parent(*leaf); + } + + state.candidates.on_deactivate_leaves(&leaves, |h| relay_parents.contains(h)); + + // clean up sessions based on everything remaining. + let sessions: HashSet<_> = state.per_relay_parent.values().map(|r| r.session).collect(); + state.per_session.retain(|s, _| sessions.contains(s)); +} + +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn handle_peer_view_update( + ctx: &mut Context, + state: &mut State, + peer: PeerId, + new_view: View, +) { + let fresh_implicit = { + let peer_data = match state.peers.get_mut(&peer) { + None => return, + Some(p) => p, + }; + + peer_data.update_view(new_view, &state.implicit_view) + }; + + for new_relay_parent in fresh_implicit { + send_peer_messages_for_relay_parent(ctx, state, peer, new_relay_parent).await; + } +} + +// Returns an iterator over known validator indices, given an iterator over discovery IDs +// and a mapping from discovery IDs to validator indices. +fn find_validator_ids<'a>( + known_discovery_ids: impl IntoIterator, + discovery_mapping: impl Fn(&AuthorityDiscoveryId) -> Option<&'a ValidatorIndex>, +) -> impl Iterator { + known_discovery_ids.into_iter().filter_map(discovery_mapping).cloned() +} + +/// Send a peer, apparently just becoming aware of a relay-parent, all messages +/// concerning that relay-parent. +/// +/// In particular, we send all statements pertaining to our common cluster, +/// as well as all manifests, acknowledgements, or other grid statements. +/// +/// Note that due to the way we handle views, our knowledge of peers' relay parents +/// may "oscillate" with relay parents repeatedly leaving and entering the +/// view of a peer based on the implicit view of active leaves. +/// +/// This function is designed to be cheap and not to send duplicate messages in repeated +/// cases. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn send_peer_messages_for_relay_parent( + ctx: &mut Context, + state: &mut State, + peer: PeerId, + relay_parent: Hash, +) { + let peer_data = match state.peers.get_mut(&peer) { + None => return, + Some(p) => p, + }; + + let relay_parent_state = match state.per_relay_parent.get_mut(&relay_parent) { + None => return, + Some(s) => s, + }; + + let per_session_state = match state.per_session.get(&relay_parent_state.session) { + None => return, + Some(s) => s, + }; + + for validator_id in find_validator_ids(peer_data.iter_known_discovery_ids(), |a| { + per_session_state.authority_lookup.get(a) + }) { + if let Some(local_validator_state) = relay_parent_state.local_validator.as_mut() { + send_pending_cluster_statements( + ctx, + relay_parent, + &peer, + validator_id, + &mut local_validator_state.cluster_tracker, + &state.candidates, + &relay_parent_state.statement_store, + ) + .await; + } + + send_pending_grid_messages( + ctx, + relay_parent, + &peer, + validator_id, + &per_session_state.groups, + relay_parent_state, + &state.candidates, + ) + .await; + } +} + +fn pending_statement_network_message( + statement_store: &StatementStore, + relay_parent: Hash, + peer: &PeerId, + originator: ValidatorIndex, + compact: CompactStatement, +) -> Option<(Vec, net_protocol::VersionedValidationProtocol)> { + statement_store + .validator_statement(originator, compact) + .map(|s| s.as_unchecked().clone()) + .map(|signed| { + protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, signed) + }) + .map(|msg| (vec![*peer], Versioned::VStaging(msg).into())) +} + +/// Send a peer all pending cluster statements for a relay parent. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn send_pending_cluster_statements( + ctx: &mut Context, + relay_parent: Hash, + peer_id: &PeerId, + peer_validator_id: ValidatorIndex, + cluster_tracker: &mut ClusterTracker, + candidates: &Candidates, + statement_store: &StatementStore, +) { + let pending_statements = cluster_tracker.pending_statements_for(peer_validator_id); + let network_messages = pending_statements + .into_iter() + .filter_map(|(originator, compact)| { + if !candidates.is_confirmed(compact.candidate_hash()) { + return None + } + + let res = pending_statement_network_message( + &statement_store, + relay_parent, + peer_id, + originator, + compact.clone(), + ); + + if res.is_some() { + cluster_tracker.note_sent(peer_validator_id, originator, compact); + } + + res + }) + .collect::>(); + + if network_messages.is_empty() { + return + } + + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(network_messages)) + .await; +} + +/// Send a peer all pending grid messages / acknowledgements / follow up statements +/// upon learning about a new relay parent. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn send_pending_grid_messages( + ctx: &mut Context, + relay_parent: Hash, + peer_id: &PeerId, + peer_validator_id: ValidatorIndex, + groups: &Groups, + relay_parent_state: &mut PerRelayParentState, + candidates: &Candidates, +) { + let pending_manifests = { + let local_validator = match relay_parent_state.local_validator.as_mut() { + None => return, + Some(l) => l, + }; + + let grid_tracker = &mut local_validator.grid_tracker; + grid_tracker.pending_manifests_for(peer_validator_id) + }; + + let mut messages: Vec<(Vec, net_protocol::VersionedValidationProtocol)> = Vec::new(); + for (candidate_hash, kind) in pending_manifests { + let confirmed_candidate = match candidates.get_confirmed(&candidate_hash) { + None => continue, // sanity + Some(c) => c, + }; + + let group_index = confirmed_candidate.group_index(); + + let local_knowledge = { + let group_size = match groups.get(group_index) { + None => return, // sanity + Some(x) => x.len(), + }; + + local_knowledge_filter( + group_size, + group_index, + candidate_hash, + &relay_parent_state.statement_store, + ) + }; + + match kind { + grid::ManifestKind::Full => { + let manifest = protocol_vstaging::BackedCandidateManifest { + relay_parent, + candidate_hash, + group_index, + para_id: confirmed_candidate.para_id(), + parent_head_data_hash: confirmed_candidate.parent_head_data_hash(), + statement_knowledge: local_knowledge.clone(), + }; + + let grid = &mut relay_parent_state + .local_validator + .as_mut() + .expect("determined to be some earlier in this function; qed") + .grid_tracker; + + grid.manifest_sent_to( + groups, + peer_validator_id, + candidate_hash, + local_knowledge.clone(), + ); + + messages.push(( + vec![*peer_id], + Versioned::VStaging( + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + manifest, + ), + ) + .into(), + )); + }, + grid::ManifestKind::Acknowledgement => { + messages.extend(acknowledgement_and_statement_messages( + *peer_id, + peer_validator_id, + groups, + relay_parent_state, + relay_parent, + group_index, + candidate_hash, + local_knowledge, + )); + }, + } + } + + // Send all remaining pending grid statements for a validator, not just + // those for the acknowledgements we've sent. + // + // otherwise, we might receive statements while the grid peer is "out of view" and then + // not send them when they get back "in view". problem! + { + let grid_tracker = &mut relay_parent_state + .local_validator + .as_mut() + .expect("checked earlier; qed") + .grid_tracker; + + let pending_statements = grid_tracker.all_pending_statements_for(peer_validator_id); + + let extra_statements = + pending_statements.into_iter().filter_map(|(originator, compact)| { + let res = pending_statement_network_message( + &relay_parent_state.statement_store, + relay_parent, + peer_id, + originator, + compact.clone(), + ); + + if res.is_some() { + grid_tracker.sent_or_received_direct_statement( + groups, + originator, + peer_validator_id, + &compact, + ); + } + + res + }); + + messages.extend(extra_statements); + } + + if messages.is_empty() { + return + } + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(messages)).await; +} + +// Imports a locally originating statement and distributes it to peers. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +pub(crate) async fn share_local_statement( + ctx: &mut Context, + state: &mut State, + relay_parent: Hash, + statement: SignedFullStatementWithPVD, +) -> JfyiErrorResult<()> { + let per_relay_parent = match state.per_relay_parent.get_mut(&relay_parent) { + None => return Err(JfyiError::InvalidShare), + Some(x) => x, + }; + + let per_session = match state.per_session.get(&per_relay_parent.session) { + Some(s) => s, + None => return Ok(()), + }; + + let (local_index, local_assignment, local_group) = + match per_relay_parent.local_validator.as_ref() { + None => return Err(JfyiError::InvalidShare), + Some(l) => (l.index, l.assignment, l.group), + }; + + // Two possibilities: either the statement is `Seconded` or we already + // have the candidate. Sanity: check the para-id is valid. + let expected = match statement.payload() { + FullStatementWithPVD::Seconded(ref c, _) => + Some((c.descriptor().para_id, c.descriptor().relay_parent)), + FullStatementWithPVD::Valid(hash) => + state.candidates.get_confirmed(&hash).map(|c| (c.para_id(), c.relay_parent())), + }; + + let is_seconded = match statement.payload() { + FullStatementWithPVD::Seconded(_, _) => true, + FullStatementWithPVD::Valid(_) => false, + }; + + let (expected_para, expected_relay_parent) = match expected { + None => return Err(JfyiError::InvalidShare), + Some(x) => x, + }; + + if local_index != statement.validator_index() { + return Err(JfyiError::InvalidShare) + } + + if is_seconded && + per_relay_parent.statement_store.seconded_count(&local_index) == + per_relay_parent.seconding_limit + { + gum::warn!( + target: LOG_TARGET, + limit = ?per_relay_parent.seconding_limit, + "Local node has issued too many `Seconded` statements", + ); + return Err(JfyiError::InvalidShare) + } + + if local_assignment != Some(expected_para) || relay_parent != expected_relay_parent { + return Err(JfyiError::InvalidShare) + } + + let mut post_confirmation = None; + + // Insert candidate if unknown + more sanity checks. + let compact_statement = { + let compact_statement = FullStatementWithPVD::signed_to_compact(statement.clone()); + let candidate_hash = CandidateHash(*statement.payload().candidate_hash()); + + if let FullStatementWithPVD::Seconded(ref c, ref pvd) = statement.payload() { + post_confirmation = state.candidates.confirm_candidate( + candidate_hash, + c.clone(), + pvd.clone(), + local_group, + ); + }; + + match per_relay_parent.statement_store.insert( + &per_session.groups, + compact_statement.clone(), + StatementOrigin::Local, + ) { + Ok(false) | Err(_) => { + gum::warn!( + target: LOG_TARGET, + statement = ?compact_statement.payload(), + "Candidate backing issued redundant statement?", + ); + return Err(JfyiError::InvalidShare) + }, + Ok(true) => {}, + } + + { + let l = per_relay_parent.local_validator.as_mut().expect("checked above; qed"); + l.cluster_tracker.note_issued(local_index, compact_statement.payload().clone()); + } + + if let Some(ref session_topology) = per_session.grid_view { + let l = per_relay_parent.local_validator.as_mut().expect("checked above; qed"); + l.grid_tracker.learned_fresh_statement( + &per_session.groups, + session_topology, + local_index, + &compact_statement.payload(), + ); + } + + compact_statement + }; + + // send the compact version of the statement to any peers which need it. + circulate_statement( + ctx, + relay_parent, + per_relay_parent, + per_session, + &state.candidates, + &state.authorities, + &state.peers, + compact_statement, + ) + .await; + + if let Some(post_confirmation) = post_confirmation { + apply_post_confirmation(ctx, state, post_confirmation).await; + } + + Ok(()) +} + +// two kinds of targets: those in our 'cluster' (currently just those in the same group), +// and those we are propagating to through the grid. +enum DirectTargetKind { + Cluster, + Grid, +} + +// Circulates a compact statement to all peers who need it: those in the current group of the +// local validator and grid peers which have already indicated that they know the candidate as backed. +// +// We only circulate statements for which we have the confirmed candidate, even to the local group. +// +// The group index which is _canonically assigned_ to this parachain must be +// specified already. This function should not be used when the candidate receipt and +// therefore the canonical group for the parachain is unknown. +// +// preconditions: the candidate entry exists in the state under the relay parent +// and the statement has already been imported into the entry. If this is a `Valid` +// statement, then there must be at least one `Seconded` statement. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn circulate_statement( + ctx: &mut Context, + relay_parent: Hash, + relay_parent_state: &mut PerRelayParentState, + per_session: &PerSessionState, + candidates: &Candidates, + authorities: &HashMap, + peers: &HashMap, + statement: SignedStatement, +) { + let session_info = &per_session.session_info; + + let candidate_hash = *statement.payload().candidate_hash(); + + let compact_statement = statement.payload().clone(); + let is_confirmed = candidates.is_confirmed(&candidate_hash); + + let originator = statement.validator_index(); + let (local_validator, targets) = { + let local_validator = match relay_parent_state.local_validator.as_mut() { + Some(v) => v, + None => return, // sanity: nothing to propagate if not a validator. + }; + + let statement_group = per_session.groups.by_validator_index(originator); + + // We're not meant to circulate statements in the cluster until we have the confirmed candidate. + let cluster_relevant = Some(local_validator.group) == statement_group; + let cluster_targets = if is_confirmed && cluster_relevant { + Some( + local_validator + .cluster_tracker + .targets() + .iter() + .filter(|&&v| { + local_validator + .cluster_tracker + .can_send(v, originator, compact_statement.clone()) + .is_ok() + }) + .filter(|&v| v != &local_validator.index) + .map(|v| (*v, DirectTargetKind::Cluster)), + ) + } else { + None + }; + + let grid_targets = local_validator + .grid_tracker + .direct_statement_targets(&per_session.groups, originator, &compact_statement) + .into_iter() + .filter(|v| !cluster_relevant || !local_validator.cluster_tracker.targets().contains(v)) + .map(|v| (v, DirectTargetKind::Grid)); + + let targets = cluster_targets + .into_iter() + .flatten() + .chain(grid_targets) + .filter_map(|(v, k)| { + session_info.discovery_keys.get(v.0 as usize).map(|a| (v, a.clone(), k)) + }) + .collect::>(); + + (local_validator, targets) + }; + + let mut statement_to = Vec::new(); + for (target, authority_id, kind) in targets { + // Find peer ID based on authority ID, and also filter to connected. + let peer_id: PeerId = match authorities.get(&authority_id) { + Some(p) if peers.get(p).map_or(false, |p| p.knows_relay_parent(&relay_parent)) => *p, + None | Some(_) => continue, + }; + + match kind { + DirectTargetKind::Cluster => { + // At this point, all peers in the cluster should 'know' + // the candidate, so we don't expect for this to fail. + if let Ok(()) = local_validator.cluster_tracker.can_send( + target, + originator, + compact_statement.clone(), + ) { + local_validator.cluster_tracker.note_sent( + target, + originator, + compact_statement.clone(), + ); + statement_to.push(peer_id); + } + }, + DirectTargetKind::Grid => { + statement_to.push(peer_id); + local_validator.grid_tracker.sent_or_received_direct_statement( + &per_session.groups, + originator, + target, + &compact_statement, + ); + }, + } + } + + // ship off the network messages to the network bridge. + + if !statement_to.is_empty() { + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + statement_to, + Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::Statement( + relay_parent, + statement.as_unchecked().clone(), + )) + .into(), + )) + .await; + } +} +/// Check a statement signature under this parent hash. +fn check_statement_signature( + session_index: SessionIndex, + validators: &IndexedVec, + relay_parent: Hash, + statement: UncheckedSignedStatement, +) -> std::result::Result { + let signing_context = SigningContext { session_index, parent_hash: relay_parent }; + + validators + .get(statement.unchecked_validator_index()) + .ok_or_else(|| statement.clone()) + .and_then(|v| statement.try_into_checked(&signing_context, v)) +} + +async fn report_peer( + sender: &mut impl overseer::StatementDistributionSenderTrait, + peer: PeerId, + rep: Rep, +) { + sender.send_message(NetworkBridgeTxMessage::ReportPeer(peer, rep)).await +} + +/// Handle an incoming statement. +/// +/// This checks whether the sender is allowed to send the statement, +/// either via the cluster or the grid. +/// +/// This also checks the signature of the statement. +/// If the statement is fresh, this function guarantees that after completion +/// - The statement is re-circulated to all relevant peers in both the cluster +/// and the grid +/// - If the candidate is out-of-cluster and is backable and importable, +/// all statements about the candidate have been sent to backing +/// - If the candidate is in-cluster and is importable, +/// the statement has been sent to backing +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn handle_incoming_statement( + ctx: &mut Context, + state: &mut State, + peer: PeerId, + relay_parent: Hash, + statement: UncheckedSignedStatement, +) { + let peer_state = match state.peers.get(&peer) { + None => { + // sanity: should be impossible. + return + }, + Some(p) => p, + }; + + // Ensure we know the relay parent. + let per_relay_parent = match state.per_relay_parent.get_mut(&relay_parent) { + None => { + report_peer(ctx.sender(), peer, COST_UNEXPECTED_STATEMENT_MISSING_KNOWLEDGE).await; + return + }, + Some(p) => p, + }; + + let per_session = match state.per_session.get(&per_relay_parent.session) { + None => { + gum::warn!( + target: LOG_TARGET, + session = ?per_relay_parent.session, + "Missing expected session info.", + ); + + return + }, + Some(s) => s, + }; + let session_info = &per_session.session_info; + + let local_validator = match per_relay_parent.local_validator.as_mut() { + None => { + // we shouldn't be receiving statements unless we're a validator + // this session. + report_peer(ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; + return + }, + Some(l) => l, + }; + + let originator_group = + match per_session.groups.by_validator_index(statement.unchecked_validator_index()) { + Some(g) => g, + None => { + report_peer(ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; + return + }, + }; + + let cluster_sender_index = { + // This block of code only returns `Some` when both the originator and + // the sending peer are in the cluster. + + let allowed_senders = local_validator + .cluster_tracker + .senders_for_originator(statement.unchecked_validator_index()); + + allowed_senders + .iter() + .filter_map(|i| session_info.discovery_keys.get(i.0 as usize).map(|ad| (*i, ad))) + .filter(|(_, ad)| peer_state.is_authority(ad)) + .map(|(i, _)| i) + .next() + }; + + let checked_statement = if let Some(cluster_sender_index) = cluster_sender_index { + match handle_cluster_statement( + relay_parent, + &mut local_validator.cluster_tracker, + per_relay_parent.session, + &per_session.session_info, + statement, + cluster_sender_index, + ) { + Ok(Some(s)) => s, + Ok(None) => return, + Err(rep) => { + report_peer(ctx.sender(), peer, rep).await; + return + }, + } + } else { + let grid_sender_index = local_validator + .grid_tracker + .direct_statement_providers( + &per_session.groups, + statement.unchecked_validator_index(), + statement.unchecked_payload(), + ) + .into_iter() + .filter_map(|i| session_info.discovery_keys.get(i.0 as usize).map(|ad| (i, ad))) + .filter(|(_, ad)| peer_state.is_authority(ad)) + .map(|(i, _)| i) + .next(); + + if let Some(grid_sender_index) = grid_sender_index { + match handle_grid_statement( + relay_parent, + &mut local_validator.grid_tracker, + per_relay_parent.session, + &per_session, + statement, + grid_sender_index, + ) { + Ok(s) => s, + Err(rep) => { + report_peer(ctx.sender(), peer, rep).await; + return + }, + } + } else { + // Not a cluster or grid peer. + report_peer(ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; + return + } + }; + + let statement = checked_statement.payload().clone(); + let originator_index = checked_statement.validator_index(); + let candidate_hash = *checked_statement.payload().candidate_hash(); + + // Insert an unconfirmed candidate entry if needed. Note that if the candidate is already confirmed, + // this ensures that the assigned group of the originator matches the expected group of the + // parachain. + { + let res = state.candidates.insert_unconfirmed( + peer, + candidate_hash, + relay_parent, + originator_group, + None, + ); + + if let Err(BadAdvertisement) = res { + report_peer(ctx.sender(), peer, COST_UNEXPECTED_STATEMENT).await; + return + } + } + + let confirmed = state.candidates.get_confirmed(&candidate_hash); + let is_confirmed = state.candidates.is_confirmed(&candidate_hash); + if !is_confirmed { + // If the candidate is not confirmed, note that we should attempt + // to request it from the given peer. + let mut request_entry = + state + .request_manager + .get_or_insert(relay_parent, candidate_hash, originator_group); + + request_entry.add_peer(peer); + + // We only successfully accept statements from the grid on confirmed + // candidates, therefore this check only passes if the statement is from the cluster + request_entry.set_cluster_priority(); + } + + let was_fresh = match per_relay_parent.statement_store.insert( + &per_session.groups, + checked_statement.clone(), + StatementOrigin::Remote, + ) { + Err(statement_store::ValidatorUnknown) => { + // sanity: should never happen. + gum::warn!( + target: LOG_TARGET, + ?relay_parent, + validator_index = ?originator_index, + "Error - accepted message from unknown validator." + ); + + return + }, + Ok(known) => known, + }; + + if was_fresh { + report_peer(ctx.sender(), peer, BENEFIT_VALID_STATEMENT_FIRST).await; + let is_importable = state.candidates.is_importable(&candidate_hash); + + if let Some(ref session_topology) = per_session.grid_view { + local_validator.grid_tracker.learned_fresh_statement( + &per_session.groups, + session_topology, + local_validator.index, + &statement, + ); + } + + if let (true, &Some(confirmed)) = (is_importable, &confirmed) { + send_backing_fresh_statements( + ctx, + candidate_hash, + originator_group, + &relay_parent, + &mut *per_relay_parent, + confirmed, + per_session, + ) + .await; + } + + // We always circulate statements at this point. + circulate_statement( + ctx, + relay_parent, + per_relay_parent, + per_session, + &state.candidates, + &state.authorities, + &state.peers, + checked_statement, + ) + .await; + } else { + report_peer(ctx.sender(), peer, BENEFIT_VALID_STATEMENT).await; + } +} + +/// Checks whether a statement is allowed, whether the signature is accurate, +/// and importing into the cluster tracker if successful. +/// +/// if successful, this returns a checked signed statement if it should be imported +/// or otherwise an error indicating a reputational fault. +fn handle_cluster_statement( + relay_parent: Hash, + cluster_tracker: &mut ClusterTracker, + session: SessionIndex, + session_info: &SessionInfo, + statement: UncheckedSignedStatement, + cluster_sender_index: ValidatorIndex, +) -> Result, Rep> { + // additional cluster checks. + let should_import = { + match cluster_tracker.can_receive( + cluster_sender_index, + statement.unchecked_validator_index(), + statement.unchecked_payload().clone(), + ) { + Ok(ClusterAccept::Ok) => true, + Ok(ClusterAccept::WithPrejudice) => false, + Err(ClusterRejectIncoming::ExcessiveSeconded) => return Err(COST_EXCESSIVE_SECONDED), + Err(ClusterRejectIncoming::CandidateUnknown | ClusterRejectIncoming::Duplicate) => + return Err(COST_UNEXPECTED_STATEMENT), + Err(ClusterRejectIncoming::NotInGroup) => { + // sanity: shouldn't be possible; we already filtered this + // out above. + return Err(COST_UNEXPECTED_STATEMENT) + }, + } + }; + + // Ensure the statement is correctly signed. + let checked_statement = + match check_statement_signature(session, &session_info.validators, relay_parent, statement) + { + Ok(s) => s, + Err(_) => return Err(COST_INVALID_SIGNATURE), + }; + + cluster_tracker.note_received( + cluster_sender_index, + checked_statement.validator_index(), + checked_statement.payload().clone(), + ); + + Ok(if should_import { Some(checked_statement) } else { None }) +} + +/// Checks whether the signature is accurate, +/// importing into the grid tracker if successful. +/// +/// if successful, this returns a checked signed statement if it should be imported +/// or otherwise an error indicating a reputational fault. +fn handle_grid_statement( + relay_parent: Hash, + grid_tracker: &mut GridTracker, + session: SessionIndex, + per_session: &PerSessionState, + statement: UncheckedSignedStatement, + grid_sender_index: ValidatorIndex, +) -> Result { + // Ensure the statement is correctly signed. + let checked_statement = match check_statement_signature( + session, + &per_session.session_info.validators, + relay_parent, + statement, + ) { + Ok(s) => s, + Err(_) => return Err(COST_INVALID_SIGNATURE), + }; + + grid_tracker.sent_or_received_direct_statement( + &per_session.groups, + checked_statement.validator_index(), + grid_sender_index, + &checked_statement.payload(), + ); + + Ok(checked_statement) +} + +/// Send backing fresh statements. This should only be performed on importable & confirmed +/// candidates. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn send_backing_fresh_statements( + ctx: &mut Context, + candidate_hash: CandidateHash, + group_index: GroupIndex, + relay_parent: &Hash, + relay_parent_state: &mut PerRelayParentState, + confirmed: &candidates::ConfirmedCandidate, + per_session: &PerSessionState, +) { + let group_validators = per_session.groups.get(group_index).unwrap_or(&[]); + let mut imported = Vec::new(); + + for statement in relay_parent_state + .statement_store + .fresh_statements_for_backing(group_validators, candidate_hash) + { + let v = statement.validator_index(); + let compact = statement.payload().clone(); + imported.push((v, compact)); + let carrying_pvd = statement + .clone() + .convert_to_superpayload_with(|statement| match statement { + CompactStatement::Seconded(_) => FullStatementWithPVD::Seconded( + (&**confirmed.candidate_receipt()).clone(), + confirmed.persisted_validation_data().clone(), + ), + CompactStatement::Valid(c_hash) => FullStatementWithPVD::Valid(c_hash), + }) + .expect("statements refer to same candidate; qed"); + + ctx.send_message(CandidateBackingMessage::Statement(*relay_parent, carrying_pvd)) + .await; + } + + for (v, s) in imported { + relay_parent_state.statement_store.note_known_by_backing(v, s); + } +} + +fn local_knowledge_filter( + group_size: usize, + group_index: GroupIndex, + candidate_hash: CandidateHash, + statement_store: &StatementStore, +) -> StatementFilter { + let mut f = StatementFilter::blank(group_size); + statement_store.fill_statement_filter(group_index, candidate_hash, &mut f); + f +} + +// This provides a backable candidate to the grid and dispatches backable candidate announcements +// and acknowledgements via the grid topology. If the session topology is not yet +// available, this will be a no-op. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn provide_candidate_to_grid( + ctx: &mut Context, + candidate_hash: CandidateHash, + relay_parent_state: &mut PerRelayParentState, + confirmed_candidate: &candidates::ConfirmedCandidate, + per_session: &PerSessionState, + authorities: &HashMap, + peers: &HashMap, +) { + let local_validator = match relay_parent_state.local_validator { + Some(ref mut v) => v, + None => return, + }; + + let relay_parent = confirmed_candidate.relay_parent(); + let group_index = confirmed_candidate.group_index(); + + let grid_view = match per_session.grid_view { + Some(ref t) => t, + None => { + gum::trace!( + target: LOG_TARGET, + session = relay_parent_state.session, + "Cannot handle backable candidate due to lack of topology", + ); + + return + }, + }; + + let group_size = match per_session.groups.get(group_index) { + None => { + gum::warn!( + target: LOG_TARGET, + ?candidate_hash, + ?relay_parent, + ?group_index, + session = relay_parent_state.session, + "Handled backed candidate with unknown group?", + ); + + return + }, + Some(g) => g.len(), + }; + + let filter = local_knowledge_filter( + group_size, + group_index, + candidate_hash, + &relay_parent_state.statement_store, + ); + + let actions = local_validator.grid_tracker.add_backed_candidate( + grid_view, + candidate_hash, + group_index, + filter.clone(), + ); + + let manifest = protocol_vstaging::BackedCandidateManifest { + relay_parent, + candidate_hash, + group_index, + para_id: confirmed_candidate.para_id(), + parent_head_data_hash: confirmed_candidate.parent_head_data_hash(), + statement_knowledge: filter.clone(), + }; + let acknowledgement = protocol_vstaging::BackedCandidateAcknowledgement { + candidate_hash, + statement_knowledge: filter.clone(), + }; + + let manifest_message = Versioned::VStaging( + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + ); + let ack_message = Versioned::VStaging( + protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(acknowledgement), + ); + + let mut manifest_peers = Vec::new(); + let mut ack_peers = Vec::new(); + + let mut post_statements = Vec::new(); + for (v, action) in actions { + let p = match connected_validator_peer(authorities, per_session, v) { + None => continue, + Some(p) => + if peers.get(&p).map_or(false, |d| d.knows_relay_parent(&relay_parent)) { + p + } else { + continue + }, + }; + + match action { + grid::ManifestKind::Full => manifest_peers.push(p), + grid::ManifestKind::Acknowledgement => ack_peers.push(p), + } + + local_validator.grid_tracker.manifest_sent_to( + &per_session.groups, + v, + candidate_hash, + filter.clone(), + ); + post_statements.extend( + post_acknowledgement_statement_messages( + v, + relay_parent, + &mut local_validator.grid_tracker, + &relay_parent_state.statement_store, + &per_session.groups, + group_index, + candidate_hash, + ) + .into_iter() + .map(|m| (vec![p], m)), + ); + } + + if !manifest_peers.is_empty() { + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + manifest_peers, + manifest_message.into(), + )) + .await; + } + + if !ack_peers.is_empty() { + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( + ack_peers, + ack_message.into(), + )) + .await; + } + + if !post_statements.is_empty() { + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(post_statements)) + .await; + } +} + +fn group_for_para( + availability_cores: &[CoreState], + group_rotation_info: &GroupRotationInfo, + para_id: ParaId, +) -> Option { + // Note: this won't work well for parathreads as it assumes that core assignments are fixed + // across blocks. + let core_index = availability_cores.iter().position(|c| c.para_id() == Some(para_id)); + + core_index + .map(|c| group_rotation_info.group_for_core(CoreIndex(c as _), availability_cores.len())) +} + +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn fragment_tree_update_inner( + ctx: &mut Context, + state: &mut State, + active_leaf_hash: Option, + required_parent_info: Option<(Hash, ParaId)>, + known_hypotheticals: Option>, +) { + // 1. get hypothetical candidates + let hypotheticals = match known_hypotheticals { + None => state.candidates.frontier_hypotheticals(required_parent_info), + Some(h) => h, + }; + + // 2. find out which are in the frontier + let frontier = { + let (tx, rx) = oneshot::channel(); + ctx.send_message(ProspectiveParachainsMessage::GetHypotheticalFrontier( + HypotheticalFrontierRequest { + candidates: hypotheticals, + fragment_tree_relay_parent: active_leaf_hash, + backed_in_path_only: false, + }, + tx, + )) + .await; + + match rx.await { + Ok(frontier) => frontier, + Err(oneshot::Canceled) => return, + } + }; + // 3. note that they are importable under a given leaf hash. + for (hypo, membership) in frontier { + // skip parablocks outside of the frontier + if membership.is_empty() { + continue + } + + for (leaf_hash, _) in membership { + state.candidates.note_importable_under(&hypo, leaf_hash); + } + + // 4. for confirmed candidates, send all statements which are new to backing. + if let HypotheticalCandidate::Complete { + candidate_hash, + receipt, + persisted_validation_data: _, + } = hypo + { + let confirmed_candidate = state.candidates.get_confirmed(&candidate_hash); + let prs = state.per_relay_parent.get_mut(&receipt.descriptor().relay_parent); + if let (Some(confirmed), Some(prs)) = (confirmed_candidate, prs) { + let group_index = group_for_para( + &prs.availability_cores, + &prs.group_rotation_info, + receipt.descriptor().para_id, + ); + + let per_session = state.per_session.get(&prs.session); + if let (Some(per_session), Some(group_index)) = (per_session, group_index) { + send_backing_fresh_statements( + ctx, + candidate_hash, + group_index, + &receipt.descriptor().relay_parent, + prs, + confirmed, + per_session, + ) + .await; + } + } + } + } +} + +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn new_leaf_fragment_tree_updates( + ctx: &mut Context, + state: &mut State, + leaf_hash: Hash, +) { + fragment_tree_update_inner(ctx, state, Some(leaf_hash), None, None).await +} + +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn prospective_backed_notification_fragment_tree_updates( + ctx: &mut Context, + state: &mut State, + para_id: ParaId, + para_head: Hash, +) { + fragment_tree_update_inner(ctx, state, None, Some((para_head, para_id)), None).await +} + +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn new_confirmed_candidate_fragment_tree_updates( + ctx: &mut Context, + state: &mut State, + candidate: HypotheticalCandidate, +) { + fragment_tree_update_inner(ctx, state, None, None, Some(vec![candidate])).await +} + +struct ManifestImportSuccess<'a> { + relay_parent_state: &'a mut PerRelayParentState, + per_session: &'a PerSessionState, + acknowledge: bool, + sender_index: ValidatorIndex, +} + +/// Handles the common part of incoming manifests of both types (full & acknowledgement) +/// +/// Basic sanity checks around data, importing the manifest into the grid tracker, finding the +/// sending peer's validator index, reporting the peer for any misbehavior, etc. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn handle_incoming_manifest_common<'a, Context>( + ctx: &mut Context, + peer: PeerId, + peers: &HashMap, + per_relay_parent: &'a mut HashMap, + per_session: &'a HashMap, + candidates: &mut Candidates, + candidate_hash: CandidateHash, + relay_parent: Hash, + para_id: ParaId, + manifest_summary: grid::ManifestSummary, + manifest_kind: grid::ManifestKind, +) -> Option> { + // 1. sanity checks: peer is connected, relay-parent in state, para ID matches group index. + let peer_state = match peers.get(&peer) { + None => return None, + Some(p) => p, + }; + + let relay_parent_state = match per_relay_parent.get_mut(&relay_parent) { + None => { + report_peer(ctx.sender(), peer, COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE).await; + return None + }, + Some(s) => s, + }; + + let per_session = match per_session.get(&relay_parent_state.session) { + None => return None, + Some(s) => s, + }; + + let local_validator = match relay_parent_state.local_validator.as_mut() { + None => { + report_peer(ctx.sender(), peer, COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE).await; + return None + }, + Some(x) => x, + }; + + let expected_group = group_for_para( + &relay_parent_state.availability_cores, + &relay_parent_state.group_rotation_info, + para_id, + ); + + if expected_group != Some(manifest_summary.claimed_group_index) { + report_peer(ctx.sender(), peer, COST_MALFORMED_MANIFEST).await; + return None + } + + let grid_topology = match per_session.grid_view.as_ref() { + None => return None, + Some(x) => x, + }; + + let sender_index = grid_topology + .iter_sending_for_group(manifest_summary.claimed_group_index, manifest_kind) + .filter_map(|i| per_session.session_info.discovery_keys.get(i.0 as usize).map(|ad| (i, ad))) + .filter(|(_, ad)| peer_state.is_authority(ad)) + .map(|(i, _)| i) + .next(); + + let sender_index = match sender_index { + None => { + report_peer(ctx.sender(), peer, COST_UNEXPECTED_MANIFEST_DISALLOWED).await; + return None + }, + Some(s) => s, + }; + + // 2. sanity checks: peer is validator, bitvec size, import into grid tracker + let group_index = manifest_summary.claimed_group_index; + let claimed_parent_hash = manifest_summary.claimed_parent_hash; + let acknowledge = match local_validator.grid_tracker.import_manifest( + grid_topology, + &per_session.groups, + candidate_hash, + relay_parent_state.seconding_limit, + manifest_summary, + manifest_kind, + sender_index, + ) { + Ok(x) => x, + Err(grid::ManifestImportError::Conflicting) => { + report_peer(ctx.sender(), peer, COST_CONFLICTING_MANIFEST).await; + return None + }, + Err(grid::ManifestImportError::Overflow) => { + report_peer(ctx.sender(), peer, COST_EXCESSIVE_SECONDED).await; + return None + }, + Err(grid::ManifestImportError::Insufficient) => { + report_peer(ctx.sender(), peer, COST_INSUFFICIENT_MANIFEST).await; + return None + }, + Err(grid::ManifestImportError::Malformed) => { + report_peer(ctx.sender(), peer, COST_MALFORMED_MANIFEST).await; + return None + }, + Err(grid::ManifestImportError::Disallowed) => { + report_peer(ctx.sender(), peer, COST_UNEXPECTED_MANIFEST_DISALLOWED).await; + return None + }, + }; + + // 3. if accepted by grid, insert as unconfirmed. + if let Err(BadAdvertisement) = candidates.insert_unconfirmed( + peer, + candidate_hash, + relay_parent, + group_index, + Some((claimed_parent_hash, para_id)), + ) { + report_peer(ctx.sender(), peer, COST_INACCURATE_ADVERTISEMENT).await; + return None + } + + Some(ManifestImportSuccess { relay_parent_state, per_session, acknowledge, sender_index }) +} + +/// Produce a list of network messages to send to a peer, following acknowledgement of a manifest. +/// This notes the messages as sent within the grid state. +fn post_acknowledgement_statement_messages( + recipient: ValidatorIndex, + relay_parent: Hash, + grid_tracker: &mut GridTracker, + statement_store: &StatementStore, + groups: &Groups, + group_index: GroupIndex, + candidate_hash: CandidateHash, +) -> Vec { + let sending_filter = match grid_tracker.pending_statements_for(recipient, candidate_hash) { + None => return Vec::new(), + Some(f) => f, + }; + + let mut messages = Vec::new(); + for statement in + statement_store.group_statements(groups, group_index, candidate_hash, &sending_filter) + { + grid_tracker.sent_or_received_direct_statement( + groups, + statement.validator_index(), + recipient, + statement.payload(), + ); + + messages.push(Versioned::VStaging( + protocol_vstaging::StatementDistributionMessage::Statement( + relay_parent, + statement.as_unchecked().clone(), + ) + .into(), + )); + } + + messages +} + +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn handle_incoming_manifest( + ctx: &mut Context, + state: &mut State, + peer: PeerId, + manifest: net_protocol::vstaging::BackedCandidateManifest, +) { + let x = match handle_incoming_manifest_common( + ctx, + peer, + &state.peers, + &mut state.per_relay_parent, + &state.per_session, + &mut state.candidates, + manifest.candidate_hash, + manifest.relay_parent, + manifest.para_id, + grid::ManifestSummary { + claimed_parent_hash: manifest.parent_head_data_hash, + claimed_group_index: manifest.group_index, + statement_knowledge: manifest.statement_knowledge, + }, + grid::ManifestKind::Full, + ) + .await + { + Some(x) => x, + None => return, + }; + + let ManifestImportSuccess { relay_parent_state, per_session, acknowledge, sender_index } = x; + + if acknowledge { + // 4. if already known within grid (confirmed & backed), acknowledge candidate + + let local_knowledge = { + let group_size = match per_session.groups.get(manifest.group_index) { + None => return, // sanity + Some(x) => x.len(), + }; + + local_knowledge_filter( + group_size, + manifest.group_index, + manifest.candidate_hash, + &relay_parent_state.statement_store, + ) + }; + + let messages = acknowledgement_and_statement_messages( + peer, + sender_index, + &per_session.groups, + relay_parent_state, + manifest.relay_parent, + manifest.group_index, + manifest.candidate_hash, + local_knowledge, + ); + + if !messages.is_empty() { + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages(messages)).await; + } + } else if !state.candidates.is_confirmed(&manifest.candidate_hash) { + // 5. if unconfirmed, add request entry + state + .request_manager + .get_or_insert(manifest.relay_parent, manifest.candidate_hash, manifest.group_index) + .add_peer(peer); + } +} + +/// Produces acknowledgement and statement messages to be sent over the network, +/// noting that they have been sent within the grid topology tracker as well. +fn acknowledgement_and_statement_messages( + peer: PeerId, + validator_index: ValidatorIndex, + groups: &Groups, + relay_parent_state: &mut PerRelayParentState, + relay_parent: Hash, + group_index: GroupIndex, + candidate_hash: CandidateHash, + local_knowledge: StatementFilter, +) -> Vec<(Vec, net_protocol::VersionedValidationProtocol)> { + let local_validator = match relay_parent_state.local_validator.as_mut() { + None => return Vec::new(), + Some(l) => l, + }; + + let acknowledgement = protocol_vstaging::BackedCandidateAcknowledgement { + candidate_hash, + statement_knowledge: local_knowledge.clone(), + }; + + let msg = Versioned::VStaging( + protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(acknowledgement), + ); + + let mut messages = vec![(vec![peer], msg.into())]; + + local_validator.grid_tracker.manifest_sent_to( + groups, + validator_index, + candidate_hash, + local_knowledge.clone(), + ); + + let statement_messages = post_acknowledgement_statement_messages( + validator_index, + relay_parent, + &mut local_validator.grid_tracker, + &relay_parent_state.statement_store, + &groups, + group_index, + candidate_hash, + ); + + messages.extend(statement_messages.into_iter().map(|m| (vec![peer], m))); + + messages +} + +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn handle_incoming_acknowledgement( + ctx: &mut Context, + state: &mut State, + peer: PeerId, + acknowledgement: net_protocol::vstaging::BackedCandidateAcknowledgement, +) { + // The key difference between acknowledgments and full manifests is that only + // the candidate hash is included alongside the bitfields, so the candidate + // must be confirmed for us to even process it. + + let candidate_hash = acknowledgement.candidate_hash; + let (relay_parent, parent_head_data_hash, group_index, para_id) = { + match state.candidates.get_confirmed(&candidate_hash) { + Some(c) => (c.relay_parent(), c.parent_head_data_hash(), c.group_index(), c.para_id()), + None => { + report_peer(ctx.sender(), peer, COST_UNEXPECTED_ACKNOWLEDGEMENT_UNKNOWN_CANDIDATE) + .await; + return + }, + } + }; + + let x = match handle_incoming_manifest_common( + ctx, + peer, + &state.peers, + &mut state.per_relay_parent, + &state.per_session, + &mut state.candidates, + candidate_hash, + relay_parent, + para_id, + grid::ManifestSummary { + claimed_parent_hash: parent_head_data_hash, + claimed_group_index: group_index, + statement_knowledge: acknowledgement.statement_knowledge, + }, + grid::ManifestKind::Acknowledgement, + ) + .await + { + Some(x) => x, + None => return, + }; + + let ManifestImportSuccess { relay_parent_state, per_session, sender_index, .. } = x; + + let local_validator = match relay_parent_state.local_validator.as_mut() { + None => return, + Some(l) => l, + }; + + let messages = post_acknowledgement_statement_messages( + sender_index, + relay_parent, + &mut local_validator.grid_tracker, + &relay_parent_state.statement_store, + &per_session.groups, + group_index, + candidate_hash, + ); + + if !messages.is_empty() { + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessages( + messages.into_iter().map(|m| (vec![peer], m)).collect(), + )) + .await; + } +} + +/// Handle a notification of a candidate being backed. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +pub(crate) async fn handle_backed_candidate_message( + ctx: &mut Context, + state: &mut State, + candidate_hash: CandidateHash, +) { + // If the candidate is unknown or unconfirmed, it's a race (pruned before receiving message) + // or a bug. Ignore if so + let confirmed = match state.candidates.get_confirmed(&candidate_hash) { + None => { + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + "Received backed candidate notification for unknown or unconfirmed", + ); + + return + }, + Some(c) => c, + }; + + let relay_parent_state = match state.per_relay_parent.get_mut(&confirmed.relay_parent()) { + None => return, + Some(s) => s, + }; + + let per_session = match state.per_session.get(&relay_parent_state.session) { + None => return, + Some(s) => s, + }; + + provide_candidate_to_grid( + ctx, + candidate_hash, + relay_parent_state, + confirmed, + per_session, + &state.authorities, + &state.peers, + ) + .await; + + // Search for children of the backed candidate to request. + prospective_backed_notification_fragment_tree_updates( + ctx, + state, + confirmed.para_id(), + confirmed.candidate_receipt().descriptor().para_head, + ) + .await; +} + +/// Sends all messages about a candidate to all peers in the cluster, +/// with `Seconded` statements first. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn send_cluster_candidate_statements( + ctx: &mut Context, + state: &mut State, + candidate_hash: CandidateHash, + relay_parent: Hash, +) { + let relay_parent_state = match state.per_relay_parent.get_mut(&relay_parent) { + None => return, + Some(s) => s, + }; + + let per_session = match state.per_session.get(&relay_parent_state.session) { + None => return, + Some(s) => s, + }; + + let local_group = match relay_parent_state.local_validator.as_mut() { + None => return, + Some(v) => v.group, + }; + + let group_size = match per_session.groups.get(local_group) { + None => return, + Some(g) => g.len(), + }; + + let statements: Vec<_> = relay_parent_state + .statement_store + .group_statements( + &per_session.groups, + local_group, + candidate_hash, + &StatementFilter::full(group_size), + ) + .map(|x| x.clone()) + .collect(); + + for statement in statements { + circulate_statement( + ctx, + relay_parent, + relay_parent_state, + per_session, + &state.candidates, + &state.authorities, + &state.peers, + statement, + ) + .await; + } +} + +/// Applies state & p2p updates as a result of a newly confirmed candidate. +/// +/// This punishes peers which advertised the candidate incorrectly, as well as +/// doing an importability analysis of the confirmed candidate and providing +/// statements to the backing subsystem if importable. It also cleans up +/// any pending requests for the candidate. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +async fn apply_post_confirmation( + ctx: &mut Context, + state: &mut State, + post_confirmation: PostConfirmation, +) { + for peer in post_confirmation.reckoning.incorrect { + report_peer(ctx.sender(), peer, COST_INACCURATE_ADVERTISEMENT).await; + } + + let candidate_hash = post_confirmation.hypothetical.candidate_hash(); + state.request_manager.remove_for(candidate_hash); + + send_cluster_candidate_statements( + ctx, + state, + candidate_hash, + post_confirmation.hypothetical.relay_parent(), + ) + .await; + new_confirmed_candidate_fragment_tree_updates(ctx, state, post_confirmation.hypothetical).await; +} + +/// Dispatch pending requests for candidate data & statements. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut State) { + let peers = &state.peers; + let peer_advertised = |identifier: &CandidateIdentifier, peer: &_| { + let peer_data = peers.get(peer)?; + + let relay_parent_state = state.per_relay_parent.get(&identifier.relay_parent)?; + let per_session = state.per_session.get(&relay_parent_state.session)?; + + let local_validator = relay_parent_state.local_validator.as_ref()?; + + for validator_id in find_validator_ids(peer_data.iter_known_discovery_ids(), |a| { + per_session.authority_lookup.get(a) + }) { + // For cluster members, they haven't advertised any statements in particular, + // but have surely sent us some. + if local_validator + .cluster_tracker + .knows_candidate(validator_id, identifier.candidate_hash) + { + return Some(StatementFilter::blank(local_validator.cluster_tracker.targets().len())) + } + + let filter = local_validator + .grid_tracker + .advertised_statements(validator_id, &identifier.candidate_hash); + + if let Some(f) = filter { + return Some(f) + } + } + + None + }; + let request_props = |identifier: &CandidateIdentifier| { + let &CandidateIdentifier { relay_parent, group_index, .. } = identifier; + + let relay_parent_state = state.per_relay_parent.get(&relay_parent)?; + let per_session = state.per_session.get(&relay_parent_state.session)?; + let group = per_session.groups.get(group_index)?; + let seconding_limit = relay_parent_state.seconding_limit; + + // Request nothing which would be an 'over-seconded' statement. + let mut unwanted_mask = StatementFilter::blank(group.len()); + for (i, v) in group.iter().enumerate() { + if relay_parent_state.statement_store.seconded_count(v) >= seconding_limit { + unwanted_mask.seconded_in_group.set(i, true); + } + } + + // don't require a backing threshold for cluster candidates. + let require_backing = relay_parent_state.local_validator.as_ref()?.group != group_index; + + Some(RequestProperties { + unwanted_mask, + backing_threshold: if require_backing { + Some(polkadot_node_primitives::minimum_votes(group.len())) + } else { + None + }, + }) + }; + + while let Some(request) = state.request_manager.next_request(request_props, peer_advertised) { + // Peer is supposedly connected. + ctx.send_message(NetworkBridgeTxMessage::SendRequests( + vec![Requests::AttestedCandidateVStaging(request)], + IfDisconnected::ImmediateError, + )) + .await; + } +} + +/// Wait on the next incoming response. If there are no requests pending, this +/// future never resolves. It is the responsibility of the user of this API +/// to interrupt the future. +pub(crate) async fn receive_response(state: &mut State) -> UnhandledResponse { + match state.request_manager.await_incoming().await { + Some(r) => r, + None => futures::future::pending().await, + } +} + +/// Handles an incoming response. This does the actual work of validating the response, +/// importing statements, sending acknowledgements, etc. +#[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] +pub(crate) async fn handle_response( + ctx: &mut Context, + state: &mut State, + response: UnhandledResponse, +) { + let &requests::CandidateIdentifier { relay_parent, candidate_hash, group_index } = + response.candidate_identifier(); + + let post_confirmation = { + let relay_parent_state = match state.per_relay_parent.get_mut(&relay_parent) { + None => return, + Some(s) => s, + }; + + let per_session = match state.per_session.get(&relay_parent_state.session) { + None => return, + Some(s) => s, + }; + + let group = match per_session.groups.get(group_index) { + None => return, + Some(g) => g, + }; + + let res = response.validate_response( + &mut state.request_manager, + group, + relay_parent_state.session, + |v| per_session.session_info.validators.get(v).map(|x| x.clone()), + |para, g_index| { + let expected_group = group_for_para( + &relay_parent_state.availability_cores, + &relay_parent_state.group_rotation_info, + para, + ); + + Some(g_index) == expected_group + }, + ); + + for (peer, rep) in res.reputation_changes { + report_peer(ctx.sender(), peer, rep).await; + } + + let (candidate, pvd, statements) = match res.request_status { + requests::CandidateRequestStatus::Outdated => return, + requests::CandidateRequestStatus::Incomplete => return, + requests::CandidateRequestStatus::Complete { + candidate, + persisted_validation_data, + statements, + } => (candidate, persisted_validation_data, statements), + }; + + for statement in statements { + let _ = relay_parent_state.statement_store.insert( + &per_session.groups, + statement, + StatementOrigin::Remote, + ); + } + + if let Some(post_confirmation) = + state.candidates.confirm_candidate(candidate_hash, candidate, pvd, group_index) + { + post_confirmation + } else { + gum::warn!( + target: LOG_TARGET, + ?candidate_hash, + "Candidate re-confirmed by request/response: logic error", + ); + + return + } + }; + + // Note that this implicitly circulates all statements via the cluster. + apply_post_confirmation(ctx, state, post_confirmation).await; + + let confirmed = state.candidates.get_confirmed(&candidate_hash).expect("just confirmed; qed"); + + // Although the candidate is confirmed, it isn't yet on the + // hypothetical frontier of the fragment tree. Later, when it is, + // we will import statements. + if !confirmed.is_importable(None) { + return + } + + let relay_parent_state = match state.per_relay_parent.get_mut(&relay_parent) { + None => return, + Some(s) => s, + }; + + let per_session = match state.per_session.get(&relay_parent_state.session) { + None => return, + Some(s) => s, + }; + + send_backing_fresh_statements( + ctx, + candidate_hash, + group_index, + &relay_parent, + relay_parent_state, + confirmed, + per_session, + ) + .await; + + // we don't need to send acknowledgement yet because + // 1. the candidate is not known yet, so cannot be backed. + // any previous confirmation is a bug, because `apply_post_confirmation` is meant to + // clear requests. + // 2. providing the statements to backing will lead to 'Backed' message. + // 3. on 'Backed' we will send acknowledgements/follow up statements when this becomes + // includable. +} + +/// Answer an incoming request for a candidate. +pub(crate) fn answer_request(state: &mut State, message: ResponderMessage) { + let ResponderMessage { request, sent_feedback } = message; + let AttestedCandidateRequest { candidate_hash, ref mask } = &request.payload; + + // Signal to the responder that we started processing this request. + let _ = sent_feedback.send(()); + + let confirmed = match state.candidates.get_confirmed(&candidate_hash) { + None => return, // drop request, candidate not known. + Some(c) => c, + }; + + let relay_parent_state = match state.per_relay_parent.get(&confirmed.relay_parent()) { + None => return, + Some(s) => s, + }; + + let local_validator = match relay_parent_state.local_validator.as_ref() { + None => return, + Some(s) => s, + }; + + let per_session = match state.per_session.get(&relay_parent_state.session) { + None => return, + Some(s) => s, + }; + + let peer_data = match state.peers.get(&request.peer) { + None => return, + Some(d) => d, + }; + + let group_size = per_session + .groups + .get(confirmed.group_index()) + .expect("group from session's candidate always known; qed") + .len(); + + // check request bitfields are right size. + if mask.seconded_in_group.len() != group_size || mask.validated_in_group.len() != group_size { + let _ = request.send_outgoing_response(OutgoingResponse { + result: Err(()), + reputation_changes: vec![COST_INVALID_REQUEST_BITFIELD_SIZE], + sent_feedback: None, + }); + + return + } + + // check peer is allowed to request the candidate (i.e. we've sent them a manifest) + { + let mut can_request = false; + for validator_id in find_validator_ids(peer_data.iter_known_discovery_ids(), |a| { + per_session.authority_lookup.get(a) + }) { + if local_validator.grid_tracker.can_request(validator_id, *candidate_hash) { + can_request = true; + break + } + } + + if !can_request { + let _ = request.send_outgoing_response(OutgoingResponse { + result: Err(()), + reputation_changes: vec![COST_UNEXPECTED_REQUEST], + sent_feedback: None, + }); + + return + } + } + + // Transform mask with 'OR' semantics into one with 'AND' semantics for the API used + // below. + let and_mask = StatementFilter { + seconded_in_group: !mask.seconded_in_group.clone(), + validated_in_group: !mask.validated_in_group.clone(), + }; + + let response = AttestedCandidateResponse { + candidate_receipt: (&**confirmed.candidate_receipt()).clone(), + persisted_validation_data: confirmed.persisted_validation_data().clone(), + statements: relay_parent_state + .statement_store + .group_statements( + &per_session.groups, + confirmed.group_index(), + *candidate_hash, + &and_mask, + ) + .map(|s| s.as_unchecked().clone()) + .collect(), + }; + + let _ = request.send_response(response); +} + +/// Messages coming from the background respond task. +pub struct ResponderMessage { + request: IncomingRequest, + sent_feedback: oneshot::Sender<()>, +} + +/// A fetching task, taking care of fetching candidates via request/response. +/// +/// Runs in a background task and feeds request to [`answer_request`] through [`MuxedMessage`]. +pub async fn respond_task( + mut receiver: IncomingRequestReceiver, + mut sender: mpsc::Sender, +) { + let mut pending_out = FuturesUnordered::new(); + loop { + // Ensure we are not handling too many requests in parallel. + if pending_out.len() >= MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS as usize { + // Wait for one to finish: + pending_out.next().await; + } + + let req = match receiver.recv(|| vec![COST_INVALID_REQUEST]).await.into_nested() { + Ok(Ok(v)) => v, + Err(fatal) => { + gum::debug!(target: LOG_TARGET, error = ?fatal, "Shutting down request responder"); + return + }, + Ok(Err(jfyi)) => { + gum::debug!(target: LOG_TARGET, error = ?jfyi, "Decoding request failed"); + continue + }, + }; + + let (pending_sent_tx, pending_sent_rx) = oneshot::channel(); + if let Err(err) = sender + .feed(ResponderMessage { request: req, sent_feedback: pending_sent_tx }) + .await + { + gum::debug!(target: LOG_TARGET, ?err, "Shutting down responder"); + return + } + pending_out.push(pending_sent_rx); + } +} diff --git a/node/network/statement-distribution/src/vstaging/requests.rs b/node/network/statement-distribution/src/vstaging/requests.rs new file mode 100644 index 000000000000..507bbbb0ef18 --- /dev/null +++ b/node/network/statement-distribution/src/vstaging/requests.rs @@ -0,0 +1,1165 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +//! A requester for full information on candidates. +//! +//! 1. We use `RequestManager::get_or_insert().get_mut()` to add and mutate [`RequestedCandidate`]s, either setting the +//! priority or adding a peer we know has the candidate. We currently prioritize "cluster" candidates (those from our +//! own group, although the cluster mechanism could be made to include multiple groups in the future) over "grid" +//! candidates (those from other groups). +//! +//! 2. The main loop of the module will invoke [`RequestManager::next_request`] in a loop until it returns `None`, +//! dispatching all requests with the `NetworkBridgeTxMessage`. The receiving half of the channel is owned by the +//! [`RequestManager`]. +//! +//! 3. The main loop of the module will also select over [`RequestManager::await_incoming`] to receive +//! [`UnhandledResponse`]s, which it then validates using [`UnhandledResponse::validate_response`] (which requires state +//! not owned by the request manager). + +use super::{ + BENEFIT_VALID_RESPONSE, BENEFIT_VALID_STATEMENT, COST_IMPROPERLY_DECODED_RESPONSE, + COST_INVALID_RESPONSE, COST_INVALID_SIGNATURE, COST_UNREQUESTED_RESPONSE_STATEMENT, +}; +use crate::LOG_TARGET; + +use polkadot_node_network_protocol::{ + request_response::{ + outgoing::{Recipient as RequestRecipient, RequestError}, + vstaging::{AttestedCandidateRequest, AttestedCandidateResponse}, + OutgoingRequest, OutgoingResult, MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS, + }, + vstaging::StatementFilter, + PeerId, UnifiedReputationChange as Rep, +}; +use polkadot_primitives::vstaging::{ + CandidateHash, CommittedCandidateReceipt, CompactStatement, GroupIndex, Hash, ParaId, + PersistedValidationData, SessionIndex, SignedStatement, SigningContext, ValidatorId, + ValidatorIndex, +}; + +use futures::{future::BoxFuture, prelude::*, stream::FuturesUnordered}; + +use std::collections::{ + hash_map::{Entry as HEntry, HashMap}, + HashSet, VecDeque, +}; + +/// An identifier for a candidate. +/// +/// In this module, we are requesting candidates +/// for which we have no information other than the candidate hash and statements signed +/// by validators. It is possible for validators for multiple groups to abuse this lack of +/// information: until we actually get the preimage of this candidate we cannot confirm +/// anything other than the candidate hash. +#[derive(Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)] +pub struct CandidateIdentifier { + /// The relay-parent this candidate is ostensibly under. + pub relay_parent: Hash, + /// The hash of the candidate. + pub candidate_hash: CandidateHash, + /// The index of the group claiming to be assigned to the candidate's + /// para. + pub group_index: GroupIndex, +} + +struct TaggedResponse { + identifier: CandidateIdentifier, + requested_peer: PeerId, + props: RequestProperties, + response: OutgoingResult, +} + +/// A pending request. +#[derive(Debug)] +pub struct RequestedCandidate { + priority: Priority, + known_by: VecDeque, + in_flight: bool, +} + +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +enum Origin { + Cluster = 0, + Unspecified = 1, +} + +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +struct Priority { + origin: Origin, + attempts: usize, +} + +/// An entry for manipulating a requested candidate. +pub struct Entry<'a> { + prev_index: usize, + identifier: CandidateIdentifier, + by_priority: &'a mut Vec<(Priority, CandidateIdentifier)>, + requested: &'a mut RequestedCandidate, +} + +impl<'a> Entry<'a> { + /// Add a peer to the set of known peers. + pub fn add_peer(&mut self, peer: PeerId) { + if !self.requested.known_by.contains(&peer) { + self.requested.known_by.push_back(peer); + } + } + + /// Note that the candidate is required for the cluster. + pub fn set_cluster_priority(&mut self) { + self.requested.priority.origin = Origin::Cluster; + + insert_or_update_priority( + &mut *self.by_priority, + Some(self.prev_index), + self.identifier.clone(), + self.requested.priority.clone(), + ); + } +} + +/// A manager for outgoing requests. +pub struct RequestManager { + pending_responses: FuturesUnordered>, + requests: HashMap, + // sorted by priority. + by_priority: Vec<(Priority, CandidateIdentifier)>, + // all unique identifiers for the candidate. + unique_identifiers: HashMap>, +} + +impl RequestManager { + /// Create a new [`RequestManager`]. + pub fn new() -> Self { + RequestManager { + pending_responses: FuturesUnordered::new(), + requests: HashMap::new(), + by_priority: Vec::new(), + unique_identifiers: HashMap::new(), + } + } + + /// Gets an [`Entry`] for mutating a request and inserts it if the + /// manager doesn't store this request already. + pub fn get_or_insert( + &mut self, + relay_parent: Hash, + candidate_hash: CandidateHash, + group_index: GroupIndex, + ) -> Entry { + let identifier = CandidateIdentifier { relay_parent, candidate_hash, group_index }; + + let (candidate, fresh) = match self.requests.entry(identifier.clone()) { + HEntry::Occupied(e) => (e.into_mut(), false), + HEntry::Vacant(e) => ( + e.insert(RequestedCandidate { + priority: Priority { attempts: 0, origin: Origin::Unspecified }, + known_by: VecDeque::new(), + in_flight: false, + }), + true, + ), + }; + + let priority_index = if fresh { + self.unique_identifiers + .entry(candidate_hash) + .or_default() + .insert(identifier.clone()); + + insert_or_update_priority( + &mut self.by_priority, + None, + identifier.clone(), + candidate.priority.clone(), + ) + } else { + match self + .by_priority + .binary_search(&(candidate.priority.clone(), identifier.clone())) + { + Ok(i) => i, + Err(_) => unreachable!("requested candidates always have a priority entry; qed"), + } + }; + + Entry { + prev_index: priority_index, + identifier, + by_priority: &mut self.by_priority, + requested: candidate, + } + } + + /// Remove all pending requests for the given candidate. + pub fn remove_for(&mut self, candidate: CandidateHash) { + if let Some(identifiers) = self.unique_identifiers.remove(&candidate) { + self.by_priority.retain(|(_priority, id)| !identifiers.contains(&id)); + for id in identifiers { + self.requests.remove(&id); + } + } + } + + /// Remove based on relay-parent. + pub fn remove_by_relay_parent(&mut self, relay_parent: Hash) { + let mut candidate_hashes = HashSet::new(); + + // Remove from `by_priority` and `requests`. + self.by_priority.retain(|(_priority, id)| { + let retain = relay_parent != id.relay_parent; + if !retain { + self.requests.remove(id); + candidate_hashes.insert(id.candidate_hash); + } + retain + }); + + // Remove from `unique_identifiers`. + for candidate_hash in candidate_hashes { + match self.unique_identifiers.entry(candidate_hash) { + HEntry::Occupied(mut entry) => { + entry.get_mut().retain(|id| relay_parent != id.relay_parent); + if entry.get().is_empty() { + entry.remove(); + } + }, + // We can expect to encounter vacant entries, but only if nodes are misbehaving and + // we don't use a deduplicating collection; there are no issues from ignoring it. + HEntry::Vacant(_) => (), + } + } + } + + /// Yields the next request to dispatch, if there is any. + /// + /// This function accepts two closures as an argument. + /// + /// The first closure is used to gather information about the desired + /// properties of a response, which is used to select targets and validate + /// the response later on. + /// + /// The second closure is used to determine the specific advertised + /// statements by a peer, to be compared against the mask and backing + /// threshold and returns `None` if the peer is no longer connected. + pub fn next_request( + &mut self, + request_props: impl Fn(&CandidateIdentifier) -> Option, + peer_advertised: impl Fn(&CandidateIdentifier, &PeerId) -> Option, + ) -> Option> { + if self.pending_responses.len() >= MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS as usize { + return None + } + + let mut res = None; + + // loop over all requests, in order of priority. + // do some active maintenance of the connected peers. + // dispatch the first request which is not in-flight already. + + let mut cleanup_outdated = Vec::new(); + for (i, (_priority, id)) in self.by_priority.iter().enumerate() { + let entry = match self.requests.get_mut(&id) { + None => { + gum::error!( + target: LOG_TARGET, + identifier = ?id, + "Missing entry for priority queue member", + ); + + continue + }, + Some(e) => e, + }; + + if entry.in_flight { + continue + } + + let props = match request_props(&id) { + None => { + cleanup_outdated.push((i, id.clone())); + continue + }, + Some(s) => s, + }; + + let target = match find_request_target_with_update( + &mut entry.known_by, + id, + &props, + &peer_advertised, + ) { + None => continue, + Some(t) => t, + }; + + let (request, response_fut) = OutgoingRequest::new( + RequestRecipient::Peer(target), + AttestedCandidateRequest { + candidate_hash: id.candidate_hash, + mask: props.unwanted_mask.clone(), + }, + ); + + let stored_id = id.clone(); + self.pending_responses.push(Box::pin(async move { + TaggedResponse { + identifier: stored_id, + requested_peer: target, + props, + response: response_fut.await, + } + })); + + entry.in_flight = true; + + res = Some(request); + break + } + + for (priority_index, identifier) in cleanup_outdated.into_iter().rev() { + self.by_priority.remove(priority_index); + self.requests.remove(&identifier); + if let HEntry::Occupied(mut e) = + self.unique_identifiers.entry(identifier.candidate_hash) + { + e.get_mut().remove(&identifier); + if e.get().is_empty() { + e.remove(); + } + } + } + + res + } + + /// Await the next incoming response to a sent request, or immediately + /// return `None` if there are no pending responses. + pub async fn await_incoming(&mut self) -> Option { + self.pending_responses + .next() + .await + .map(|response| UnhandledResponse { response }) + } +} + +/// Properties used in target selection and validation of a request. +#[derive(Clone)] +pub struct RequestProperties { + /// A mask for limiting the statements the response is allowed to contain. + /// The mask has `OR` semantics: statements by validators corresponding to bits + /// in the mask are not desired. It also returns the required backing threshold + /// for the candidate. + pub unwanted_mask: StatementFilter, + /// The required backing threshold, if any. If this is `Some`, then requests will only + /// be made to peers which can provide enough statements to back the candidate, when + /// taking into account the `unwanted_mask`, and a response will only be validated + /// in the case of those statements. + /// + /// If this is `None`, it is assumed that only the candidate itself is needed. + pub backing_threshold: Option, +} + +/// Finds a valid request target, returning `None` if none exists. +/// Cleans up disconnected peers and places the returned peer at the back of the queue. +fn find_request_target_with_update( + known_by: &mut VecDeque, + candidate_identifier: &CandidateIdentifier, + props: &RequestProperties, + peer_advertised: impl Fn(&CandidateIdentifier, &PeerId) -> Option, +) -> Option { + let mut prune = Vec::new(); + let mut target = None; + for (i, p) in known_by.iter().enumerate() { + let mut filter = match peer_advertised(candidate_identifier, p) { + None => { + prune.push(i); + continue + }, + Some(f) => f, + }; + + filter.mask_seconded(&props.unwanted_mask.seconded_in_group); + filter.mask_valid(&props.unwanted_mask.validated_in_group); + if seconded_and_sufficient(&filter, props.backing_threshold) { + target = Some((i, *p)); + break + } + } + + let prune_count = prune.len(); + for i in prune { + known_by.remove(i); + } + + if let Some((i, p)) = target { + known_by.remove(i - prune_count); + known_by.push_back(p); + Some(p) + } else { + None + } +} + +fn seconded_and_sufficient(filter: &StatementFilter, backing_threshold: Option) -> bool { + backing_threshold.map_or(true, |t| filter.has_seconded() && filter.backing_validators() >= t) +} + +/// A response to a request, which has not yet been handled. +pub struct UnhandledResponse { + response: TaggedResponse, +} + +impl UnhandledResponse { + /// Get the candidate identifier which the corresponding request + /// was classified under. + pub fn candidate_identifier(&self) -> &CandidateIdentifier { + &self.response.identifier + } + + /// Validate the response. If the response is valid, this will yield the + /// candidate, the [`PersistedValidationData`] of the candidate, and requested + /// checked statements. + /// + /// Valid responses are defined as those which provide a valid candidate + /// and signatures which match the identifier, and provide enough statements to back the candidate. + /// + /// This will also produce a record of misbehaviors by peers: + /// * If the response is partially valid, misbehavior by the responding peer. + /// * If there are other peers which have advertised the same candidate for different + /// relay-parents or para-ids, misbehavior reports for those peers will also + /// be generated. + /// + /// Finally, in the case that the response is either valid or partially valid, + /// this will clean up all remaining requests for the candidate in the manager. + /// + /// As parameters, the user should supply the canonical group array as well + /// as a mapping from validator index to validator ID. The validator pubkey mapping + /// will not be queried except for validator indices in the group. + pub fn validate_response( + self, + manager: &mut RequestManager, + group: &[ValidatorIndex], + session: SessionIndex, + validator_key_lookup: impl Fn(ValidatorIndex) -> Option, + allowed_para_lookup: impl Fn(ParaId, GroupIndex) -> bool, + ) -> ResponseValidationOutput { + let UnhandledResponse { + response: TaggedResponse { identifier, requested_peer, props, response }, + } = self; + + // handle races if the candidate is no longer known. + // this could happen if we requested the candidate under two + // different identifiers at the same time, and received a valid + // response on the other. + // + // it could also happen in the case that we had a request in-flight + // and the request entry was garbage-collected on outdated relay parent. + let entry = match manager.requests.get_mut(&identifier) { + None => + return ResponseValidationOutput { + requested_peer, + reputation_changes: Vec::new(), + request_status: CandidateRequestStatus::Outdated, + }, + Some(e) => e, + }; + + let priority_index = match manager + .by_priority + .binary_search(&(entry.priority.clone(), identifier.clone())) + { + Ok(i) => i, + Err(_) => unreachable!("requested candidates always have a priority entry; qed"), + }; + + entry.in_flight = false; + entry.priority.attempts += 1; + + // update the location in the priority queue. + insert_or_update_priority( + &mut manager.by_priority, + Some(priority_index), + identifier.clone(), + entry.priority.clone(), + ); + + let complete_response = match response { + Err(RequestError::InvalidResponse(e)) => { + gum::trace!( + target: LOG_TARGET, + err = ?e, + peer = ?requested_peer, + "Improperly encoded response" + ); + + return ResponseValidationOutput { + requested_peer, + reputation_changes: vec![(requested_peer, COST_IMPROPERLY_DECODED_RESPONSE)], + request_status: CandidateRequestStatus::Incomplete, + } + }, + Err(RequestError::NetworkError(_) | RequestError::Canceled(_)) => + return ResponseValidationOutput { + requested_peer, + reputation_changes: vec![], + request_status: CandidateRequestStatus::Incomplete, + }, + Ok(response) => response, + }; + + let output = validate_complete_response( + &identifier, + props, + complete_response, + requested_peer, + group, + session, + validator_key_lookup, + allowed_para_lookup, + ); + + if let CandidateRequestStatus::Complete { .. } = output.request_status { + manager.remove_for(identifier.candidate_hash); + } + + output + } +} + +fn validate_complete_response( + identifier: &CandidateIdentifier, + props: RequestProperties, + response: AttestedCandidateResponse, + requested_peer: PeerId, + group: &[ValidatorIndex], + session: SessionIndex, + validator_key_lookup: impl Fn(ValidatorIndex) -> Option, + allowed_para_lookup: impl Fn(ParaId, GroupIndex) -> bool, +) -> ResponseValidationOutput { + let RequestProperties { backing_threshold, mut unwanted_mask } = props; + + // sanity check bitmask size. this is based entirely on + // local logic here. + if !unwanted_mask.has_len(group.len()) { + gum::error!( + target: LOG_TARGET, + group_len = group.len(), + "Logic bug: group size != sent bitmask len" + ); + + // resize and attempt to continue. + unwanted_mask.seconded_in_group.resize(group.len(), true); + unwanted_mask.validated_in_group.resize(group.len(), true); + } + + let invalid_candidate_output = || ResponseValidationOutput { + request_status: CandidateRequestStatus::Incomplete, + reputation_changes: vec![(requested_peer, COST_INVALID_RESPONSE)], + requested_peer, + }; + + // sanity-check candidate response. + // note: roughly ascending cost of operations + { + if response.candidate_receipt.descriptor.relay_parent != identifier.relay_parent { + return invalid_candidate_output() + } + + if response.candidate_receipt.descriptor.persisted_validation_data_hash != + response.persisted_validation_data.hash() + { + return invalid_candidate_output() + } + + if !allowed_para_lookup( + response.candidate_receipt.descriptor.para_id, + identifier.group_index, + ) { + return invalid_candidate_output() + } + + if response.candidate_receipt.hash() != identifier.candidate_hash { + return invalid_candidate_output() + } + } + + // statement checks. + let mut rep_changes = Vec::new(); + let statements = { + let mut statements = + Vec::with_capacity(std::cmp::min(response.statements.len(), group.len() * 2)); + + let mut received_filter = StatementFilter::blank(group.len()); + + let index_in_group = |v: ValidatorIndex| group.iter().position(|x| &v == x); + + let signing_context = + SigningContext { parent_hash: identifier.relay_parent, session_index: session }; + + for unchecked_statement in response.statements.into_iter().take(group.len() * 2) { + // ensure statement is from a validator in the group. + let i = match index_in_group(unchecked_statement.unchecked_validator_index()) { + Some(i) => i, + None => { + rep_changes.push((requested_peer, COST_UNREQUESTED_RESPONSE_STATEMENT)); + continue + }, + }; + + // ensure statement is on the correct candidate hash. + if unchecked_statement.unchecked_payload().candidate_hash() != + &identifier.candidate_hash + { + rep_changes.push((requested_peer, COST_UNREQUESTED_RESPONSE_STATEMENT)); + continue + } + + // filter out duplicates or statements outside the mask. + // note on indexing: we have ensured that the bitmask and the + // duplicate trackers have the correct size for the group. + match unchecked_statement.unchecked_payload() { + CompactStatement::Seconded(_) => { + if unwanted_mask.seconded_in_group[i] { + rep_changes.push((requested_peer, COST_UNREQUESTED_RESPONSE_STATEMENT)); + continue + } + + if received_filter.seconded_in_group[i] { + rep_changes.push((requested_peer, COST_UNREQUESTED_RESPONSE_STATEMENT)); + continue + } + }, + CompactStatement::Valid(_) => { + if unwanted_mask.validated_in_group[i] { + rep_changes.push((requested_peer, COST_UNREQUESTED_RESPONSE_STATEMENT)); + continue + } + + if received_filter.validated_in_group[i] { + rep_changes.push((requested_peer, COST_UNREQUESTED_RESPONSE_STATEMENT)); + continue + } + }, + } + + let validator_public = + match validator_key_lookup(unchecked_statement.unchecked_validator_index()) { + None => { + rep_changes.push((requested_peer, COST_INVALID_SIGNATURE)); + continue + }, + Some(p) => p, + }; + + let checked_statement = + match unchecked_statement.try_into_checked(&signing_context, &validator_public) { + Err(_) => { + rep_changes.push((requested_peer, COST_INVALID_SIGNATURE)); + continue + }, + Ok(checked) => checked, + }; + + match checked_statement.payload() { + CompactStatement::Seconded(_) => { + received_filter.seconded_in_group.set(i, true); + }, + CompactStatement::Valid(_) => { + received_filter.validated_in_group.set(i, true); + }, + } + + statements.push(checked_statement); + rep_changes.push((requested_peer, BENEFIT_VALID_STATEMENT)); + } + + // Only accept responses which are sufficient, according to our + // required backing threshold. + if !seconded_and_sufficient(&received_filter, backing_threshold) { + return invalid_candidate_output() + } + + statements + }; + + rep_changes.push((requested_peer, BENEFIT_VALID_RESPONSE)); + + ResponseValidationOutput { + requested_peer, + request_status: CandidateRequestStatus::Complete { + candidate: response.candidate_receipt, + persisted_validation_data: response.persisted_validation_data, + statements, + }, + reputation_changes: rep_changes, + } +} + +/// The status of the candidate request after the handling of a response. +#[derive(Debug, PartialEq)] +pub enum CandidateRequestStatus { + /// The request was outdated at the point of receiving the response. + Outdated, + /// The response either did not arrive or was invalid. + Incomplete, + /// The response completed the request. Statements sent beyond the + /// mask have been ignored. + Complete { + candidate: CommittedCandidateReceipt, + persisted_validation_data: PersistedValidationData, + statements: Vec, + }, +} + +/// Output of the response validation. +#[derive(Debug, PartialEq)] +pub struct ResponseValidationOutput { + /// The peer we requested from. + pub requested_peer: PeerId, + /// The status of the request. + pub request_status: CandidateRequestStatus, + /// Any reputation changes as a result of validating the response. + pub reputation_changes: Vec<(PeerId, Rep)>, +} + +fn insert_or_update_priority( + priority_sorted: &mut Vec<(Priority, CandidateIdentifier)>, + prev_index: Option, + candidate_identifier: CandidateIdentifier, + new_priority: Priority, +) -> usize { + if let Some(prev_index) = prev_index { + // GIGO: this behaves strangely if prev-index is not for the + // expected identifier. + if priority_sorted[prev_index].0 == new_priority { + // unchanged. + return prev_index + } else { + priority_sorted.remove(prev_index); + } + } + + let item = (new_priority, candidate_identifier); + match priority_sorted.binary_search(&item) { + Ok(i) => i, // ignore if already present. + Err(i) => { + priority_sorted.insert(i, item); + i + }, + } +} + +#[cfg(test)] +mod tests { + use super::*; + use polkadot_primitives::HeadData; + use polkadot_primitives_test_helpers as test_helpers; + + fn dummy_pvd() -> PersistedValidationData { + PersistedValidationData { + parent_head: HeadData(vec![7, 8, 9]), + relay_parent_number: 5, + max_pov_size: 1024, + relay_parent_storage_root: Default::default(), + } + } + + #[test] + fn test_remove_by_relay_parent() { + let parent_a = Hash::from_low_u64_le(1); + let parent_b = Hash::from_low_u64_le(2); + let parent_c = Hash::from_low_u64_le(3); + + let candidate_a1 = CandidateHash(Hash::from_low_u64_le(11)); + let candidate_a2 = CandidateHash(Hash::from_low_u64_le(12)); + let candidate_b1 = CandidateHash(Hash::from_low_u64_le(21)); + let candidate_b2 = CandidateHash(Hash::from_low_u64_le(22)); + let candidate_c1 = CandidateHash(Hash::from_low_u64_le(31)); + let duplicate_hash = CandidateHash(Hash::from_low_u64_le(31)); + + let mut request_manager = RequestManager::new(); + request_manager.get_or_insert(parent_a, candidate_a1, 1.into()); + request_manager.get_or_insert(parent_a, candidate_a2, 1.into()); + request_manager.get_or_insert(parent_b, candidate_b1, 1.into()); + request_manager.get_or_insert(parent_b, candidate_b2, 2.into()); + request_manager.get_or_insert(parent_c, candidate_c1, 2.into()); + request_manager.get_or_insert(parent_a, duplicate_hash, 1.into()); + + assert_eq!(request_manager.requests.len(), 6); + assert_eq!(request_manager.by_priority.len(), 6); + assert_eq!(request_manager.unique_identifiers.len(), 5); + + request_manager.remove_by_relay_parent(parent_a); + + assert_eq!(request_manager.requests.len(), 3); + assert_eq!(request_manager.by_priority.len(), 3); + assert_eq!(request_manager.unique_identifiers.len(), 3); + + assert!(!request_manager.unique_identifiers.contains_key(&candidate_a1)); + assert!(!request_manager.unique_identifiers.contains_key(&candidate_a2)); + // Duplicate hash should still be there (under a different parent). + assert!(request_manager.unique_identifiers.contains_key(&duplicate_hash)); + + request_manager.remove_by_relay_parent(parent_b); + + assert_eq!(request_manager.requests.len(), 1); + assert_eq!(request_manager.by_priority.len(), 1); + assert_eq!(request_manager.unique_identifiers.len(), 1); + + assert!(!request_manager.unique_identifiers.contains_key(&candidate_b1)); + assert!(!request_manager.unique_identifiers.contains_key(&candidate_b2)); + + request_manager.remove_by_relay_parent(parent_c); + + assert!(request_manager.requests.is_empty()); + assert!(request_manager.by_priority.is_empty()); + assert!(request_manager.unique_identifiers.is_empty()); + } + + #[test] + fn test_priority_ordering() { + let parent_a = Hash::from_low_u64_le(1); + let parent_b = Hash::from_low_u64_le(2); + let parent_c = Hash::from_low_u64_le(3); + + let candidate_a1 = CandidateHash(Hash::from_low_u64_le(11)); + let candidate_a2 = CandidateHash(Hash::from_low_u64_le(12)); + let candidate_b1 = CandidateHash(Hash::from_low_u64_le(21)); + let candidate_b2 = CandidateHash(Hash::from_low_u64_le(22)); + let candidate_c1 = CandidateHash(Hash::from_low_u64_le(31)); + + let mut request_manager = RequestManager::new(); + + // Add some entries, set a couple of them to cluster (high) priority. + let identifier_a1 = request_manager + .get_or_insert(parent_a, candidate_a1, 1.into()) + .identifier + .clone(); + let identifier_a2 = { + let mut entry = request_manager.get_or_insert(parent_a, candidate_a2, 1.into()); + entry.set_cluster_priority(); + entry.identifier.clone() + }; + let identifier_b1 = request_manager + .get_or_insert(parent_b, candidate_b1, 1.into()) + .identifier + .clone(); + let identifier_b2 = request_manager + .get_or_insert(parent_b, candidate_b2, 2.into()) + .identifier + .clone(); + let identifier_c1 = { + let mut entry = request_manager.get_or_insert(parent_c, candidate_c1, 2.into()); + entry.set_cluster_priority(); + entry.identifier.clone() + }; + + let attempts = 0; + assert_eq!( + request_manager.by_priority, + vec![ + (Priority { origin: Origin::Cluster, attempts }, identifier_a2), + (Priority { origin: Origin::Cluster, attempts }, identifier_c1), + (Priority { origin: Origin::Unspecified, attempts }, identifier_a1), + (Priority { origin: Origin::Unspecified, attempts }, identifier_b1), + (Priority { origin: Origin::Unspecified, attempts }, identifier_b2), + ] + ); + } + + // Test case where candidate is requested under two different identifiers at the same time. + // Should result in `Outdated` error. + #[test] + fn handle_outdated_response_due_to_requests_for_different_identifiers() { + let mut request_manager = RequestManager::new(); + + let relay_parent = Hash::from_low_u64_le(1); + let mut candidate_receipt = test_helpers::dummy_committed_candidate_receipt(relay_parent); + let persisted_validation_data = dummy_pvd(); + candidate_receipt.descriptor.persisted_validation_data_hash = + persisted_validation_data.hash(); + let candidate = candidate_receipt.hash(); + let requested_peer = PeerId::random(); + + let identifier1 = request_manager + .get_or_insert(relay_parent, candidate, 1.into()) + .identifier + .clone(); + request_manager + .get_or_insert(relay_parent, candidate, 1.into()) + .add_peer(requested_peer); + let identifier2 = request_manager + .get_or_insert(relay_parent, candidate, 2.into()) + .identifier + .clone(); + request_manager + .get_or_insert(relay_parent, candidate, 2.into()) + .add_peer(requested_peer); + + assert_ne!(identifier1, identifier2); + assert_eq!(request_manager.requests.len(), 2); + + let group_size = 3; + let group = &[ValidatorIndex(0), ValidatorIndex(1), ValidatorIndex(2)]; + + let unwanted_mask = StatementFilter::blank(group_size); + let request_properties = RequestProperties { unwanted_mask, backing_threshold: None }; + + // Get requests. + { + let request_props = + |_identifier: &CandidateIdentifier| Some((&request_properties).clone()); + let peer_advertised = |_identifier: &CandidateIdentifier, _peer: &_| { + Some(StatementFilter::full(group_size)) + }; + let outgoing = request_manager.next_request(request_props, peer_advertised).unwrap(); + assert_eq!(outgoing.payload.candidate_hash, candidate); + let outgoing = request_manager.next_request(request_props, peer_advertised).unwrap(); + assert_eq!(outgoing.payload.candidate_hash, candidate); + } + + // Validate first response. + { + let statements = vec![]; + let response = UnhandledResponse { + response: TaggedResponse { + identifier: identifier1, + requested_peer, + props: request_properties.clone(), + response: Ok(AttestedCandidateResponse { + candidate_receipt: candidate_receipt.clone(), + persisted_validation_data: persisted_validation_data.clone(), + statements, + }), + }, + }; + let validator_key_lookup = |_v| None; + let allowed_para_lookup = |_para, _g_index| true; + let statements = vec![]; + let output = response.validate_response( + &mut request_manager, + group, + 0, + validator_key_lookup, + allowed_para_lookup, + ); + assert_eq!( + output, + ResponseValidationOutput { + requested_peer, + request_status: CandidateRequestStatus::Complete { + candidate: candidate_receipt.clone(), + persisted_validation_data: persisted_validation_data.clone(), + statements, + }, + reputation_changes: vec![(requested_peer, BENEFIT_VALID_RESPONSE)], + } + ); + } + + // Try to validate second response. + { + let statements = vec![]; + let response = UnhandledResponse { + response: TaggedResponse { + identifier: identifier2, + requested_peer, + props: request_properties, + response: Ok(AttestedCandidateResponse { + candidate_receipt: candidate_receipt.clone(), + persisted_validation_data: persisted_validation_data.clone(), + statements, + }), + }, + }; + let validator_key_lookup = |_v| None; + let allowed_para_lookup = |_para, _g_index| true; + let output = response.validate_response( + &mut request_manager, + group, + 0, + validator_key_lookup, + allowed_para_lookup, + ); + assert_eq!( + output, + ResponseValidationOutput { + requested_peer, + request_status: CandidateRequestStatus::Outdated, + reputation_changes: vec![], + } + ); + } + } + + // Test case where we had a request in-flight and the request entry was garbage-collected on + // outdated relay parent. + #[test] + fn handle_outdated_response_due_to_garbage_collection() { + let mut request_manager = RequestManager::new(); + + let relay_parent = Hash::from_low_u64_le(1); + let mut candidate_receipt = test_helpers::dummy_committed_candidate_receipt(relay_parent); + let persisted_validation_data = dummy_pvd(); + candidate_receipt.descriptor.persisted_validation_data_hash = + persisted_validation_data.hash(); + let candidate = candidate_receipt.hash(); + let requested_peer = PeerId::random(); + + let identifier = request_manager + .get_or_insert(relay_parent, candidate, 1.into()) + .identifier + .clone(); + request_manager + .get_or_insert(relay_parent, candidate, 1.into()) + .add_peer(requested_peer); + + let group_size = 3; + let group = &[ValidatorIndex(0), ValidatorIndex(1), ValidatorIndex(2)]; + + let unwanted_mask = StatementFilter::blank(group_size); + let request_properties = RequestProperties { unwanted_mask, backing_threshold: None }; + let peer_advertised = + |_identifier: &CandidateIdentifier, _peer: &_| Some(StatementFilter::full(group_size)); + + // Get request once successfully. + { + let request_props = + |_identifier: &CandidateIdentifier| Some((&request_properties).clone()); + let outgoing = request_manager.next_request(request_props, peer_advertised).unwrap(); + assert_eq!(outgoing.payload.candidate_hash, candidate); + } + + // Garbage collect based on relay parent. + request_manager.remove_by_relay_parent(relay_parent); + + // Try to validate response. + { + let statements = vec![]; + let response = UnhandledResponse { + response: TaggedResponse { + identifier, + requested_peer, + props: request_properties, + response: Ok(AttestedCandidateResponse { + candidate_receipt: candidate_receipt.clone(), + persisted_validation_data: persisted_validation_data.clone(), + statements, + }), + }, + }; + let validator_key_lookup = |_v| None; + let allowed_para_lookup = |_para, _g_index| true; + let output = response.validate_response( + &mut request_manager, + group, + 0, + validator_key_lookup, + allowed_para_lookup, + ); + assert_eq!( + output, + ResponseValidationOutput { + requested_peer, + request_status: CandidateRequestStatus::Outdated, + reputation_changes: vec![], + } + ); + } + } + + #[test] + fn should_clean_up_after_successful_requests() { + let mut request_manager = RequestManager::new(); + + let relay_parent = Hash::from_low_u64_le(1); + let mut candidate_receipt = test_helpers::dummy_committed_candidate_receipt(relay_parent); + let persisted_validation_data = dummy_pvd(); + candidate_receipt.descriptor.persisted_validation_data_hash = + persisted_validation_data.hash(); + let candidate = candidate_receipt.hash(); + let requested_peer = PeerId::random(); + + let identifier = request_manager + .get_or_insert(relay_parent, candidate, 1.into()) + .identifier + .clone(); + request_manager + .get_or_insert(relay_parent, candidate, 1.into()) + .add_peer(requested_peer); + + assert_eq!(request_manager.requests.len(), 1); + assert_eq!(request_manager.by_priority.len(), 1); + + let group_size = 3; + let group = &[ValidatorIndex(0), ValidatorIndex(1), ValidatorIndex(2)]; + + let unwanted_mask = StatementFilter::blank(group_size); + let request_properties = RequestProperties { unwanted_mask, backing_threshold: None }; + let peer_advertised = + |_identifier: &CandidateIdentifier, _peer: &_| Some(StatementFilter::full(group_size)); + + // Get request once successfully. + { + let request_props = + |_identifier: &CandidateIdentifier| Some((&request_properties).clone()); + let outgoing = request_manager.next_request(request_props, peer_advertised).unwrap(); + assert_eq!(outgoing.payload.candidate_hash, candidate); + } + + // Validate response. + { + let statements = vec![]; + let response = UnhandledResponse { + response: TaggedResponse { + identifier, + requested_peer, + props: request_properties.clone(), + response: Ok(AttestedCandidateResponse { + candidate_receipt: candidate_receipt.clone(), + persisted_validation_data: persisted_validation_data.clone(), + statements, + }), + }, + }; + let validator_key_lookup = |_v| None; + let allowed_para_lookup = |_para, _g_index| true; + let statements = vec![]; + let output = response.validate_response( + &mut request_manager, + group, + 0, + validator_key_lookup, + allowed_para_lookup, + ); + assert_eq!( + output, + ResponseValidationOutput { + requested_peer, + request_status: CandidateRequestStatus::Complete { + candidate: candidate_receipt.clone(), + persisted_validation_data: persisted_validation_data.clone(), + statements, + }, + reputation_changes: vec![(requested_peer, BENEFIT_VALID_RESPONSE)], + } + ); + } + + // Ensure that cleanup occurred. + assert_eq!(request_manager.requests.len(), 0); + assert_eq!(request_manager.by_priority.len(), 0); + } +} diff --git a/node/network/statement-distribution/src/vstaging/statement_store.rs b/node/network/statement-distribution/src/vstaging/statement_store.rs new file mode 100644 index 000000000000..50ac99d0a813 --- /dev/null +++ b/node/network/statement-distribution/src/vstaging/statement_store.rs @@ -0,0 +1,283 @@ +// Copyright 2022 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! A store of all statements under a given relay-parent. +//! +//! This structure doesn't attempt to do any spam protection, which must +//! be provided at a higher level. +//! +//! This keeps track of statements submitted with a number of different of +//! views into this data: views based on the candidate, views based on the validator +//! groups, and views based on the validators themselves. + +use bitvec::{order::Lsb0 as BitOrderLsb0, vec::BitVec}; +use polkadot_node_network_protocol::vstaging::StatementFilter; +use polkadot_primitives::vstaging::{ + CandidateHash, CompactStatement, GroupIndex, SignedStatement, ValidatorIndex, +}; +use std::collections::hash_map::{Entry as HEntry, HashMap}; + +use super::groups::Groups; + +/// Possible origins of a statement. +pub enum StatementOrigin { + /// The statement originated locally. + Local, + /// The statement originated from a remote peer. + Remote, +} + +impl StatementOrigin { + fn is_local(&self) -> bool { + match *self { + StatementOrigin::Local => true, + StatementOrigin::Remote => false, + } + } +} + +struct StoredStatement { + statement: SignedStatement, + known_by_backing: bool, +} + +/// Storage for statements. Intended to be used for statements signed under +/// the same relay-parent. See module docs for more details. +pub struct StatementStore { + validator_meta: HashMap, + + // we keep statements per-group because even though only one group _should_ be + // producing statements about a candidate, until we have the candidate receipt + // itself, we can't tell which group that is. + group_statements: HashMap<(GroupIndex, CandidateHash), GroupStatements>, + known_statements: HashMap, +} + +impl StatementStore { + /// Create a new [`StatementStore`] + pub fn new(groups: &Groups) -> Self { + let mut validator_meta = HashMap::new(); + for (g, group) in groups.all().iter().enumerate() { + for (i, v) in group.iter().enumerate() { + validator_meta.insert( + *v, + ValidatorMeta { + seconded_count: 0, + within_group_index: i, + group: GroupIndex(g as _), + }, + ); + } + } + + StatementStore { + validator_meta, + group_statements: HashMap::new(), + known_statements: HashMap::new(), + } + } + + /// Insert a statement. Returns `true` if was not known already, `false` if it was. + /// Ignores statements by unknown validators and returns an error. + pub fn insert( + &mut self, + groups: &Groups, + statement: SignedStatement, + origin: StatementOrigin, + ) -> Result { + let validator_index = statement.validator_index(); + let validator_meta = match self.validator_meta.get_mut(&validator_index) { + None => return Err(ValidatorUnknown), + Some(m) => m, + }; + + let compact = statement.payload().clone(); + let fingerprint = (validator_index, compact.clone()); + match self.known_statements.entry(fingerprint) { + HEntry::Occupied(mut e) => { + if let StatementOrigin::Local = origin { + e.get_mut().known_by_backing = true; + } + + return Ok(false) + }, + HEntry::Vacant(e) => { + e.insert(StoredStatement { statement, known_by_backing: origin.is_local() }); + }, + } + + let candidate_hash = *compact.candidate_hash(); + let seconded = if let CompactStatement::Seconded(_) = compact { true } else { false }; + + // cross-reference updates. + { + let group_index = validator_meta.group; + let group = match groups.get(group_index) { + Some(g) => g, + None => { + gum::error!( + target: crate::LOG_TARGET, + ?group_index, + "groups passed into `insert` differ from those used at store creation" + ); + + return Err(ValidatorUnknown) + }, + }; + + let group_statements = self + .group_statements + .entry((group_index, candidate_hash)) + .or_insert_with(|| GroupStatements::with_group_size(group.len())); + + if seconded { + validator_meta.seconded_count += 1; + group_statements.note_seconded(validator_meta.within_group_index); + } else { + group_statements.note_validated(validator_meta.within_group_index); + } + } + + Ok(true) + } + + /// Fill a `StatementFilter` to be used in the grid topology with all statements + /// we are already aware of. + pub fn fill_statement_filter( + &self, + group_index: GroupIndex, + candidate_hash: CandidateHash, + statement_filter: &mut StatementFilter, + ) { + if let Some(statements) = self.group_statements.get(&(group_index, candidate_hash)) { + statement_filter.seconded_in_group |= statements.seconded.as_bitslice(); + statement_filter.validated_in_group |= statements.valid.as_bitslice(); + } + } + + /// Get an iterator over stored signed statements by the group conforming to the + /// given filter. + /// + /// Seconded statements are provided first. + pub fn group_statements<'a>( + &'a self, + groups: &'a Groups, + group_index: GroupIndex, + candidate_hash: CandidateHash, + filter: &'a StatementFilter, + ) -> impl Iterator + 'a { + let group_validators = groups.get(group_index); + + let seconded_statements = filter + .seconded_in_group + .iter_ones() + .filter_map(move |i| group_validators.as_ref().and_then(|g| g.get(i))) + .filter_map(move |v| { + self.known_statements.get(&(*v, CompactStatement::Seconded(candidate_hash))) + }) + .map(|s| &s.statement); + + let valid_statements = filter + .validated_in_group + .iter_ones() + .filter_map(move |i| group_validators.as_ref().and_then(|g| g.get(i))) + .filter_map(move |v| { + self.known_statements.get(&(*v, CompactStatement::Valid(candidate_hash))) + }) + .map(|s| &s.statement); + + seconded_statements.chain(valid_statements) + } + + /// Get the full statement of this kind issued by this validator, if it is known. + pub fn validator_statement( + &self, + validator_index: ValidatorIndex, + statement: CompactStatement, + ) -> Option<&SignedStatement> { + self.known_statements.get(&(validator_index, statement)).map(|s| &s.statement) + } + + /// Get an iterator over all statements marked as being unknown by the backing subsystem. + pub fn fresh_statements_for_backing<'a>( + &'a self, + validators: &'a [ValidatorIndex], + candidate_hash: CandidateHash, + ) -> impl Iterator + 'a { + let s_st = CompactStatement::Seconded(candidate_hash); + let v_st = CompactStatement::Valid(candidate_hash); + + validators + .iter() + .flat_map(move |v| { + let a = self.known_statements.get(&(*v, s_st.clone())); + let b = self.known_statements.get(&(*v, v_st.clone())); + + a.into_iter().chain(b) + }) + .filter(|stored| !stored.known_by_backing) + .map(|stored| &stored.statement) + } + + /// Get the amount of known `Seconded` statements by the given validator index. + pub fn seconded_count(&self, validator_index: &ValidatorIndex) -> usize { + self.validator_meta.get(validator_index).map_or(0, |m| m.seconded_count) + } + + /// Note that a statement is known by the backing subsystem. + pub fn note_known_by_backing( + &mut self, + validator_index: ValidatorIndex, + statement: CompactStatement, + ) { + if let Some(stored) = self.known_statements.get_mut(&(validator_index, statement)) { + stored.known_by_backing = true; + } + } +} + +/// Error indicating that the validator was unknown. +pub struct ValidatorUnknown; + +type Fingerprint = (ValidatorIndex, CompactStatement); + +struct ValidatorMeta { + group: GroupIndex, + within_group_index: usize, + seconded_count: usize, +} + +struct GroupStatements { + seconded: BitVec, + valid: BitVec, +} + +impl GroupStatements { + fn with_group_size(group_size: usize) -> Self { + GroupStatements { + seconded: BitVec::repeat(false, group_size), + valid: BitVec::repeat(false, group_size), + } + } + + fn note_seconded(&mut self, within_group_index: usize) { + self.seconded.set(within_group_index, true); + } + + fn note_validated(&mut self, within_group_index: usize) { + self.valid.set(within_group_index, true); + } +} diff --git a/node/network/statement-distribution/src/vstaging/tests/cluster.rs b/node/network/statement-distribution/src/vstaging/tests/cluster.rs new file mode 100644 index 000000000000..ca849dbd39a6 --- /dev/null +++ b/node/network/statement-distribution/src/vstaging/tests/cluster.rs @@ -0,0 +1,1216 @@ +// Copyright 2023 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +use super::*; + +use polkadot_primitives_test_helpers::make_candidate; + +#[test] +fn share_seconded_circulated_to_cluster() { + let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + let peer_c = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + local_para, + vec![1, 2, 3].into(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let test_leaf = TestLeaf { + number: 1, + hash: relay_parent, + parent_hash: Hash::repeat_byte(0), + session: 1, + availability_cores: state.make_availability_cores(|i| { + CoreState::Scheduled(ScheduledCore { + para_id: ParaId::from(i as u32), + collator: None, + }) + }), + para_data: (0..state.session_info.validator_groups.len()) + .map(|i| { + ( + ParaId::from(i as u32), + PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, + ) + }) + .collect(), + }; + + // peer A is in group, has relay parent in view. + // peer B is in group, has no relay parent in view. + // peer C is not in group, has relay parent in view. + { + let other_group_validators = state.group_validators(local_validator.group_index, true); + + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(other_group_validators[0])].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_b.clone(), + Some(vec![state.discovery_id(other_group_validators[1])].into_iter().collect()), + ) + .await; + + connect_peer(&mut overseer, peer_c.clone(), None).await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + let full_signed = state + .sign_statement( + local_validator.validator_index, + CompactStatement::Seconded(candidate_hash), + &SigningContext { session_index: 1, parent_hash: relay_parent }, + ) + .convert_to_superpayload(StatementWithPVD::Seconded(candidate.clone(), pvd.clone())) + .unwrap(); + + overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::Share(relay_parent, full_signed), + }) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( + protocol_vstaging::StatementDistributionMessage::Statement( + r, + s, + ) + )) + )) => { + assert_eq!(peers, vec![peer_a.clone()]); + assert_eq!(r, relay_parent); + assert_eq!(s.unchecked_payload(), &CompactStatement::Seconded(candidate_hash)); + assert_eq!(s.unchecked_validator_index(), local_validator.validator_index); + } + ); + + // sharing a `Seconded` message confirms a candidate, which leads to new + // fragment tree updates. + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + + overseer + }); +} + +#[test] +fn cluster_valid_statement_before_seconded_ignored() { + let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + + let test_leaf = TestLeaf { + number: 1, + hash: relay_parent, + parent_hash: Hash::repeat_byte(0), + session: 1, + availability_cores: state.make_availability_cores(|i| { + CoreState::Scheduled(ScheduledCore { + para_id: ParaId::from(i as u32), + collator: None, + }) + }), + para_data: (0..state.session_info.validator_groups.len()) + .map(|i| { + ( + ParaId::from(i as u32), + PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, + ) + }) + .collect(), + }; + + // peer A is in group, has relay parent in view. + let other_group_validators = state.group_validators(local_validator.group_index, true); + let v_a = other_group_validators[0]; + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(v_a)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + let signed_valid = state.sign_statement( + v_a, + CompactStatement::Valid(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::Statement( + relay_parent, + signed_valid.as_unchecked().clone(), + ), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) => { + assert_eq!(p, peer_a); + assert_eq!(r, COST_UNEXPECTED_STATEMENT); + } + ); + + overseer + }); +} + +#[test] +fn cluster_statement_bad_signature() { + let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + + let test_leaf = TestLeaf { + number: 1, + hash: relay_parent, + parent_hash: Hash::repeat_byte(0), + session: 1, + availability_cores: state.make_availability_cores(|i| { + CoreState::Scheduled(ScheduledCore { + para_id: ParaId::from(i as u32), + collator: None, + }) + }), + para_data: (0..state.session_info.validator_groups.len()) + .map(|i| { + ( + ParaId::from(i as u32), + PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, + ) + }) + .collect(), + }; + + // peer A is in group, has relay parent in view. + let other_group_validators = state.group_validators(local_validator.group_index, true); + let v_a = other_group_validators[0]; + let v_b = other_group_validators[1]; + + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(v_a)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + // sign statements with wrong signing context, leading to bad signature. + let statements = vec![ + (v_a, CompactStatement::Seconded(candidate_hash)), + (v_b, CompactStatement::Seconded(candidate_hash)), + ] + .into_iter() + .map(|(v, s)| { + state.sign_statement( + v, + s, + &SigningContext { parent_hash: Hash::repeat_byte(69), session_index: 1 }, + ) + }) + .map(|s| s.as_unchecked().clone()); + + for statement in statements { + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::Statement( + relay_parent, + statement.clone(), + ), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == COST_INVALID_SIGNATURE => { }, + "{:?}", + statement + ); + } + + overseer + }); +} + +#[test] +fn useful_cluster_statement_from_non_cluster_peer_rejected() { + let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + + let test_leaf = TestLeaf { + number: 1, + hash: relay_parent, + parent_hash: Hash::repeat_byte(0), + session: 1, + availability_cores: state.make_availability_cores(|i| { + CoreState::Scheduled(ScheduledCore { + para_id: ParaId::from(i as u32), + collator: None, + }) + }), + para_data: (0..state.session_info.validator_groups.len()) + .map(|i| { + ( + ParaId::from(i as u32), + PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, + ) + }) + .collect(), + }; + + // peer A is not in group, has relay parent in view. + let not_our_group = + if local_validator.group_index.0 == 0 { GroupIndex(1) } else { GroupIndex(0) }; + + let that_group_validators = state.group_validators(not_our_group, false); + let v_non = that_group_validators[0]; + + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(v_non)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + let statement = state + .sign_statement( + v_non, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == COST_UNEXPECTED_STATEMENT => { } + ); + + overseer + }); +} + +#[test] +fn statement_from_non_cluster_originator_unexpected() { + let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + + let test_leaf = TestLeaf { + number: 1, + hash: relay_parent, + parent_hash: Hash::repeat_byte(0), + session: 1, + availability_cores: state.make_availability_cores(|i| { + CoreState::Scheduled(ScheduledCore { + para_id: ParaId::from(i as u32), + collator: None, + }) + }), + para_data: (0..state.session_info.validator_groups.len()) + .map(|i| { + ( + ParaId::from(i as u32), + PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, + ) + }) + .collect(), + }; + + // peer A is not in group, has relay parent in view. + let other_group_validators = state.group_validators(local_validator.group_index, true); + let v_a = other_group_validators[0]; + + connect_peer(&mut overseer, peer_a.clone(), None).await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + let statement = state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == COST_UNEXPECTED_STATEMENT => { } + ); + + overseer + }); +} + +#[test] +fn seconded_statement_leads_to_request() { + let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); + let candidate_hash = CandidateHash(Hash::repeat_byte(42)); + + let test_leaf = TestLeaf { + number: 1, + hash: relay_parent, + parent_hash: Hash::repeat_byte(0), + session: 1, + availability_cores: state.make_availability_cores(|i| { + CoreState::Scheduled(ScheduledCore { + para_id: ParaId::from(i as u32), + collator: None, + }) + }), + para_data: (0..state.session_info.validator_groups.len()) + .map(|i| { + ( + ParaId::from(i as u32), + PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, + ) + }) + .collect(), + }; + + // peer A is in group, has relay parent in view. + let other_group_validators = state.group_validators(local_validator.group_index, true); + let v_a = other_group_validators[0]; + + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(v_a)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + let statement = state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST => { } + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendRequests(requests, IfDisconnected::ImmediateError)) => { + assert_eq!(requests.len(), 1); + assert_matches!( + &requests[0], + Requests::AttestedCandidateVStaging(outgoing) => { + assert_eq!(outgoing.peer, Recipient::Peer(peer_a.clone())); + assert_eq!(outgoing.payload.candidate_hash, candidate_hash); + } + ); + } + ); + + overseer + }); +} + +#[test] +fn cluster_statements_shared_seconded_first() { + let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + local_para, + vec![1, 2, 3].into(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let test_leaf = TestLeaf { + number: 1, + hash: relay_parent, + parent_hash: Hash::repeat_byte(0), + session: 1, + availability_cores: state.make_availability_cores(|i| { + CoreState::Scheduled(ScheduledCore { + para_id: ParaId::from(i as u32), + collator: None, + }) + }), + para_data: (0..state.session_info.validator_groups.len()) + .map(|i| { + ( + ParaId::from(i as u32), + PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, + ) + }) + .collect(), + }; + + // peer A is in group, no relay parent in view. + { + let other_group_validators = state.group_validators(local_validator.group_index, true); + + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(other_group_validators[0])].into_iter().collect()), + ) + .await; + } + + activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + let full_signed = state + .sign_statement( + local_validator.validator_index, + CompactStatement::Seconded(candidate_hash), + &SigningContext { session_index: 1, parent_hash: relay_parent }, + ) + .convert_to_superpayload(StatementWithPVD::Seconded(candidate.clone(), pvd.clone())) + .unwrap(); + + let valid_signed = state + .sign_statement( + local_validator.validator_index, + CompactStatement::Valid(candidate_hash), + &SigningContext { session_index: 1, parent_hash: relay_parent }, + ) + .convert_to_superpayload(StatementWithPVD::Valid(candidate_hash)) + .unwrap(); + + overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::Share(relay_parent, full_signed), + }) + .await; + + // result of new confirmed candidate. + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + + overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::Share(relay_parent, valid_signed), + }) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessages(messages)) => { + assert_eq!(messages.len(), 2); + + assert_eq!(messages[0].0, vec![peer_a]); + assert_eq!(messages[1].0, vec![peer_a]); + + assert_matches!( + &messages[0].1, + Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( + protocol_vstaging::StatementDistributionMessage::Statement( + r, + s, + ) + )) if r == &relay_parent + && s.unchecked_payload() == &CompactStatement::Seconded(candidate_hash) => {} + ); + + assert_matches!( + &messages[1].1, + Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( + protocol_vstaging::StatementDistributionMessage::Statement( + r, + s, + ) + )) if r == &relay_parent + && s.unchecked_payload() == &CompactStatement::Valid(candidate_hash) => {} + ); + } + ); + + overseer + }); +} + +#[test] +fn cluster_accounts_for_implicit_view() { + let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + local_para, + vec![1, 2, 3].into(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let test_leaf = TestLeaf { + number: 1, + hash: relay_parent, + parent_hash: Hash::repeat_byte(0), + session: 1, + availability_cores: state.make_availability_cores(|i| { + CoreState::Scheduled(ScheduledCore { + para_id: ParaId::from(i as u32), + collator: None, + }) + }), + para_data: (0..state.session_info.validator_groups.len()) + .map(|i| { + ( + ParaId::from(i as u32), + PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, + ) + }) + .collect(), + }; + + // peer A is in group, has relay parent in view. + // peer B is in group, has no relay parent in view. + { + let other_group_validators = state.group_validators(local_validator.group_index, true); + + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(other_group_validators[0])].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_b.clone(), + Some(vec![state.discovery_id(other_group_validators[1])].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + let full_signed = state + .sign_statement( + local_validator.validator_index, + CompactStatement::Seconded(candidate_hash), + &SigningContext { session_index: 1, parent_hash: relay_parent }, + ) + .convert_to_superpayload(StatementWithPVD::Seconded(candidate.clone(), pvd.clone())) + .unwrap(); + + overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::Share(relay_parent, full_signed), + }) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( + protocol_vstaging::StatementDistributionMessage::Statement( + r, + s, + ) + )) + )) => { + assert_eq!(peers, vec![peer_a.clone()]); + assert_eq!(r, relay_parent); + assert_eq!(s.unchecked_payload(), &CompactStatement::Seconded(candidate_hash)); + assert_eq!(s.unchecked_validator_index(), local_validator.validator_index); + } + ); + + // sharing a `Seconded` message confirms a candidate, which leads to new + // fragment tree updates. + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + + // activate new leaf, which has relay-parent in implicit view. + let next_relay_parent = Hash::repeat_byte(2); + let next_test_leaf = TestLeaf { + number: 2, + hash: next_relay_parent, + parent_hash: relay_parent, + session: 1, + availability_cores: state.make_availability_cores(|i| { + CoreState::Scheduled(ScheduledCore { + para_id: ParaId::from(i as u32), + collator: None, + }) + }), + para_data: (0..state.session_info.validator_groups.len()) + .map(|i| { + ( + ParaId::from(i as u32), + PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, + ) + }) + .collect(), + }; + + activate_leaf(&mut overseer, local_para, &next_test_leaf, &state, false).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(next_relay_parent), + false, + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![next_relay_parent]).await; + send_peer_view_change(&mut overseer, peer_b.clone(), view![next_relay_parent]).await; + + // peer B never had the relay parent in its view, so this tests that + // the implicit view is working correctly for B. + // + // the fact that the statement isn't sent again to A also indicates that it works + // it's working. + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessages(messages)) => { + assert_eq!(messages.len(), 1); + assert_matches!( + &messages[0], + ( + peers, + Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( + protocol_vstaging::StatementDistributionMessage::Statement( + r, + s, + ) + )) + ) => { + assert_eq!(peers, &vec![peer_b.clone()]); + assert_eq!(r, &relay_parent); + assert_eq!(s.unchecked_payload(), &CompactStatement::Seconded(candidate_hash)); + assert_eq!(s.unchecked_validator_index(), local_validator.validator_index); + } + ) + } + ); + + overseer + }); +} + +#[test] +fn cluster_messages_imported_after_confirmed_candidate_importable_check() { + let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + local_para, + vec![1, 2, 3].into(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let test_leaf = TestLeaf { + number: 1, + hash: relay_parent, + parent_hash: Hash::repeat_byte(0), + session: 1, + availability_cores: state.make_availability_cores(|i| { + CoreState::Scheduled(ScheduledCore { + para_id: ParaId::from(i as u32), + collator: None, + }) + }), + para_data: (0..state.session_info.validator_groups.len()) + .map(|i| { + ( + ParaId::from(i as u32), + PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, + ) + }) + .collect(), + }; + + // peer A is in group, has relay parent in view. + let other_group_validators = state.group_validators(local_validator.group_index, true); + let v_a = other_group_validators[0]; + { + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(v_a)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + let a_seconded = state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, a_seconded), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST => { } + ); + + let req = assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendRequests(mut requests, IfDisconnected::ImmediateError)) => { + assert_eq!(requests.len(), 1); + assert_matches!( + requests.pop().unwrap(), + Requests::AttestedCandidateVStaging(mut outgoing) => { + assert_eq!(outgoing.peer, Recipient::Peer(peer_a.clone())); + assert_eq!(outgoing.payload.candidate_hash, candidate_hash); + + let res = AttestedCandidateResponse { + candidate_receipt: candidate.clone(), + persisted_validation_data: pvd.clone(), + statements: vec![], + }; + outgoing.pending_response.send(Ok(res.encode())); + } + ); + } + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_RESPONSE => { } + ); + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![( + HypotheticalCandidate::Complete { + candidate_hash, + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), + }, + vec![(relay_parent, vec![0])], + )], + None, + false, + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::CandidateBacking(CandidateBackingMessage::Statement( + r, + s, + )) if r == relay_parent => { + assert_matches!( + s.payload(), + FullStatementWithPVD::Seconded(c, p) + if c == &candidate && p == &pvd => {} + ); + assert_eq!(s.validator_index(), v_a); + } + ); + + overseer + }); +} + +#[test] +fn cluster_messages_imported_after_new_leaf_importable_check() { + let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + local_para, + vec![1, 2, 3].into(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let test_leaf = TestLeaf { + number: 1, + hash: relay_parent, + parent_hash: Hash::repeat_byte(0), + session: 1, + availability_cores: state.make_availability_cores(|i| { + CoreState::Scheduled(ScheduledCore { + para_id: ParaId::from(i as u32), + collator: None, + }) + }), + para_data: (0..state.session_info.validator_groups.len()) + .map(|i| { + ( + ParaId::from(i as u32), + PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, + ) + }) + .collect(), + }; + + // peer A is in group, has relay parent in view. + let other_group_validators = state.group_validators(local_validator.group_index, true); + let v_a = other_group_validators[0]; + { + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(v_a)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + let a_seconded = state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, a_seconded), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST => { } + ); + + let req = assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendRequests(mut requests, IfDisconnected::ImmediateError)) => { + assert_eq!(requests.len(), 1); + assert_matches!( + requests.pop().unwrap(), + Requests::AttestedCandidateVStaging(mut outgoing) => { + assert_eq!(outgoing.peer, Recipient::Peer(peer_a.clone())); + assert_eq!(outgoing.payload.candidate_hash, candidate_hash); + + let res = AttestedCandidateResponse { + candidate_receipt: candidate.clone(), + persisted_validation_data: pvd.clone(), + statements: vec![], + }; + outgoing.pending_response.send(Ok(res.encode())); + } + ); + } + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_RESPONSE => { } + ); + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + + let next_relay_parent = Hash::repeat_byte(2); + let next_test_leaf = TestLeaf { + number: 2, + hash: next_relay_parent, + parent_hash: relay_parent, + session: 1, + availability_cores: state.make_availability_cores(|i| { + CoreState::Scheduled(ScheduledCore { + para_id: ParaId::from(i as u32), + collator: None, + }) + }), + para_data: (0..state.session_info.validator_groups.len()) + .map(|i| { + ( + ParaId::from(i as u32), + PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, + ) + }) + .collect(), + }; + + activate_leaf(&mut overseer, local_para, &next_test_leaf, &state, false).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![( + HypotheticalCandidate::Complete { + candidate_hash, + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), + }, + vec![(relay_parent, vec![0])], + )], + Some(next_relay_parent), + false, + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::CandidateBacking(CandidateBackingMessage::Statement( + r, + s, + )) if r == relay_parent => { + assert_matches!( + s.payload(), + FullStatementWithPVD::Seconded(c, p) + if c == &candidate && p == &pvd => {} + ); + assert_eq!(s.validator_index(), v_a); + } + ); + + overseer + }); +} + +// TODO [now]: ensure seconding limit is respected diff --git a/node/network/statement-distribution/src/vstaging/tests/grid.rs b/node/network/statement-distribution/src/vstaging/tests/grid.rs new file mode 100644 index 000000000000..c5eb3826846e --- /dev/null +++ b/node/network/statement-distribution/src/vstaging/tests/grid.rs @@ -0,0 +1,41 @@ +// Copyright 2023 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +// TODO [now]: backed candidate leads to advertisement to relevant validators with relay-parent + +// TODO [now]: received advertisement before confirmation leads to request + +// TODO [now]: received advertisement after backing leads to acknowledgement + +// TODO [now]: received advertisement after confirmation but before backing leads to nothing + +// TODO [now]: additional statements are shared after manifest exchange + +// TODO [now]: grid-sending validator view entering relay-parent leads to advertisement + +// TODO [now]: advertisement not re-sent after re-entering relay parent (view oscillation) + +// TODO [now]: acknowledgements sent only when candidate backed + +// TODO [now]: grid statements imported to backing once candidate enters hypothetical frontier + +// TODO [now]: advertisements rejected from incorrect peers + +// TODO [now]: manifests rejected with unknown relay parent or when not a validator + +// TODO [now]: advertisements rejected when candidate group doers not match para + +// TODO [now]: peer reported when advertisement conflicting with confirmed candidate. diff --git a/node/network/statement-distribution/src/vstaging/tests/mod.rs b/node/network/statement-distribution/src/vstaging/tests/mod.rs new file mode 100644 index 000000000000..583d17616629 --- /dev/null +++ b/node/network/statement-distribution/src/vstaging/tests/mod.rs @@ -0,0 +1,503 @@ +// Copyright 2023 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +// TODO [now]: Remove once some tests are written. +#![allow(unused)] + +use super::*; +use crate::*; +use polkadot_node_network_protocol::{ + request_response::{outgoing::Recipient, ReqProtocolNames}, + view, ObservedRole, +}; +use polkadot_node_subsystem::messages::{ + network_bridge_event::NewGossipTopology, AllMessages, ChainApiMessage, FragmentTreeMembership, + HypotheticalCandidate, NetworkBridgeEvent, ProspectiveParachainsMessage, RuntimeApiMessage, + RuntimeApiRequest, +}; +use polkadot_node_subsystem_test_helpers as test_helpers; +use polkadot_node_subsystem_types::{jaeger, ActivatedLeaf, LeafStatus}; +use polkadot_primitives::vstaging::{ + AssignmentId, AssignmentPair, AsyncBackingParameters, BlockNumber, CandidateCommitments, + CandidateDescriptor, CommittedCandidateReceipt, CoreState, GroupRotationInfo, HeadData, Header, + IndexedVec, PersistedValidationData, ScheduledCore, SessionIndex, SessionInfo, + ValidationCodeHash, ValidatorPair, +}; +use sc_keystore::LocalKeystore; +use sp_application_crypto::Pair as PairT; +use sp_authority_discovery::AuthorityPair as AuthorityDiscoveryPair; +use sp_keyring::Sr25519Keyring; + +use assert_matches::assert_matches; +use futures::Future; +use parity_scale_codec::Encode; +use rand::{Rng, SeedableRng}; + +use std::sync::Arc; + +mod cluster; +mod grid; +mod requests; + +type VirtualOverseer = test_helpers::TestSubsystemContextHandle; + +const ASYNC_BACKING_PARAMETERS: AsyncBackingParameters = + AsyncBackingParameters { max_candidate_depth: 4, allowed_ancestry_len: 3 }; + +// Some deterministic genesis hash for req/res protocol names +const GENESIS_HASH: Hash = Hash::repeat_byte(0xff); + +struct TestConfig { + validator_count: usize, + // how many validators to place in each group. + group_size: usize, + // whether the local node should be a validator + local_validator: bool, +} + +#[derive(Clone)] +struct TestLocalValidator { + validator_id: ValidatorId, + validator_index: ValidatorIndex, + group_index: GroupIndex, +} + +struct TestState { + config: TestConfig, + local: Option, + validators: Vec, + session_info: SessionInfo, + req_sender: futures::channel::mpsc::Sender, +} + +impl TestState { + fn from_config( + config: TestConfig, + req_sender: futures::channel::mpsc::Sender, + rng: &mut impl Rng, + ) -> Self { + if config.group_size == 0 { + panic!("group size cannot be 0"); + } + + let mut validators = Vec::new(); + let mut discovery_keys = Vec::new(); + let mut assignment_keys = Vec::new(); + let mut validator_groups = Vec::new(); + + let local_validator_pos = if config.local_validator { + // ensure local validator is always in a full group. + Some(rng.gen_range(0..config.validator_count).saturating_sub(config.group_size - 1)) + } else { + None + }; + + for i in 0..config.validator_count { + let validator_pair = if Some(i) == local_validator_pos { + // Note: the specific key is used to ensure the keystore holds + // this key and the subsystem can detect that it is a validator. + Sr25519Keyring::Ferdie.pair().into() + } else { + ValidatorPair::generate().0 + }; + let assignment_id = AssignmentPair::generate().0.public(); + let discovery_id = AuthorityDiscoveryPair::generate().0.public(); + + let group_index = i / config.group_size; + validators.push(validator_pair); + discovery_keys.push(discovery_id); + assignment_keys.push(assignment_id); + if validator_groups.len() == group_index { + validator_groups.push(vec![ValidatorIndex(i as _)]); + } else { + validator_groups.last_mut().unwrap().push(ValidatorIndex(i as _)); + } + } + + let local = if let Some(local_pos) = local_validator_pos { + Some(TestLocalValidator { + validator_id: validators[local_pos].public().clone(), + validator_index: ValidatorIndex(local_pos as _), + group_index: GroupIndex((local_pos / config.group_size) as _), + }) + } else { + None + }; + + let validator_public = validator_pubkeys(&validators); + let session_info = SessionInfo { + validators: validator_public, + discovery_keys, + validator_groups: IndexedVec::from(validator_groups), + assignment_keys, + n_cores: 0, + zeroth_delay_tranche_width: 0, + relay_vrf_modulo_samples: 0, + n_delay_tranches: 0, + no_show_slots: 0, + needed_approvals: 0, + active_validator_indices: vec![], + dispute_period: 6, + random_seed: [0u8; 32], + }; + + TestState { config, local, validators, session_info, req_sender } + } + + fn make_availability_cores(&self, f: impl Fn(usize) -> CoreState) -> Vec { + (0..self.session_info.validator_groups.len()).map(f).collect() + } + + fn group_validators( + &self, + group_index: GroupIndex, + exclude_local: bool, + ) -> Vec { + self.session_info + .validator_groups + .get(group_index) + .unwrap() + .iter() + .cloned() + .filter(|&i| { + self.local.as_ref().map_or(true, |l| !exclude_local || l.validator_index != i) + }) + .collect() + } + + fn validator_id(&self, validator_index: ValidatorIndex) -> ValidatorId { + self.session_info.validators.get(validator_index).unwrap().clone() + } + + fn discovery_id(&self, validator_index: ValidatorIndex) -> AuthorityDiscoveryId { + self.session_info.discovery_keys[validator_index.0 as usize].clone() + } + + fn sign_statement( + &self, + validator_index: ValidatorIndex, + statement: CompactStatement, + context: &SigningContext, + ) -> SignedStatement { + let payload = statement.signing_payload(context); + let pair = &self.validators[validator_index.0 as usize]; + let signature = pair.sign(&payload[..]); + + SignedStatement::new(statement, validator_index, signature, context, &pair.public()) + .unwrap() + } + + // send a request out, returning a future which expects a response. + async fn send_request( + &mut self, + peer: PeerId, + request: AttestedCandidateRequest, + ) -> impl Future { + let (tx, rx) = futures::channel::oneshot::channel(); + let req = sc_network::config::IncomingRequest { + peer, + payload: request.encode(), + pending_response: tx, + }; + self.req_sender.send(req).await.unwrap(); + + rx.map(|r| r.unwrap()) + } +} + +fn test_harness>( + config: TestConfig, + test: impl FnOnce(TestState, VirtualOverseer) -> T, +) { + let pool = sp_core::testing::TaskExecutor::new(); + let keystore = if config.local_validator { + test_helpers::mock::make_ferdie_keystore() + } else { + Arc::new(LocalKeystore::in_memory()) as SyncCryptoStorePtr + }; + let req_protocol_names = ReqProtocolNames::new(&GENESIS_HASH, None); + let (statement_req_receiver, _) = IncomingRequest::get_config_receiver(&req_protocol_names); + let (candidate_req_receiver, req_cfg) = + IncomingRequest::get_config_receiver(&req_protocol_names); + let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(0); + + let test_state = TestState::from_config(config, req_cfg.inbound_queue.unwrap(), &mut rng); + + let (context, virtual_overseer) = test_helpers::make_subsystem_context(pool.clone()); + let subsystem = async move { + let subsystem = crate::StatementDistributionSubsystem::new( + keystore, + statement_req_receiver, + candidate_req_receiver, + Metrics::default(), + rng, + ); + + if let Err(e) = subsystem.run(context).await { + panic!("Fatal error: {:?}", e); + } + }; + + let test_fut = test(test_state, virtual_overseer); + + futures::pin_mut!(test_fut); + futures::pin_mut!(subsystem); + futures::executor::block_on(future::join( + async move { + let mut virtual_overseer = test_fut.await; + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + }, + subsystem, + )); +} + +struct PerParaData { + min_relay_parent: BlockNumber, + head_data: HeadData, +} + +impl PerParaData { + pub fn new(min_relay_parent: BlockNumber, head_data: HeadData) -> Self { + Self { min_relay_parent, head_data } + } +} + +struct TestLeaf { + number: BlockNumber, + hash: Hash, + parent_hash: Hash, + session: SessionIndex, + availability_cores: Vec, + para_data: Vec<(ParaId, PerParaData)>, +} + +impl TestLeaf { + pub fn para_data(&self, para_id: ParaId) -> &PerParaData { + self.para_data + .iter() + .find_map(|(p_id, data)| if *p_id == para_id { Some(data) } else { None }) + .unwrap() + } +} + +async fn activate_leaf( + virtual_overseer: &mut VirtualOverseer, + para_id: ParaId, + leaf: &TestLeaf, + test_state: &TestState, + expect_session_info_request: bool, +) { + let activated = ActivatedLeaf { + hash: leaf.hash, + number: leaf.number, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + }; + + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work( + activated, + )))) + .await; + + handle_leaf_activation( + virtual_overseer, + para_id, + leaf, + test_state, + expect_session_info_request, + ) + .await; +} + +async fn handle_leaf_activation( + virtual_overseer: &mut VirtualOverseer, + para_id: ParaId, + leaf: &TestLeaf, + test_state: &TestState, + expect_session_info_request: bool, +) { + let TestLeaf { number, hash, parent_hash, para_data, session, availability_cores } = leaf; + let PerParaData { min_relay_parent, head_data } = leaf.para_data(para_id); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) + ) if parent == *hash => { + tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); + } + ); + + let mrp_response: Vec<(ParaId, BlockNumber)> = para_data + .iter() + .map(|(para_id, data)| (*para_id, data.min_relay_parent)) + .collect(); + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetMinimumRelayParents(parent, tx) + ) if parent == *hash => { + tx.send(mrp_response).unwrap(); + } + ); + + let header = Header { + parent_hash: *parent_hash, + number: *number, + state_root: Hash::zero(), + extrinsics_root: Hash::zero(), + digest: Default::default(), + }; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ChainApi( + ChainApiMessage::BlockHeader(parent, tx) + ) if parent == *hash => { + tx.send(Ok(Some(header))).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx))) if parent == *hash => { + tx.send(Ok(*session)).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AvailabilityCores(tx))) if parent == *hash => { + tx.send(Ok(availability_cores.clone())).unwrap(); + } + ); + + let validator_groups = test_state.session_info.validator_groups.to_vec(); + let group_rotation_info = + GroupRotationInfo { session_start_block: 1, group_rotation_frequency: 12, now: 1 }; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::ValidatorGroups(tx))) if parent == *hash => { + tx.send(Ok((validator_groups, group_rotation_info))).unwrap(); + } + ); + + if expect_session_info_request { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionInfo(s, tx))) if s == *session => { + tx.send(Ok(Some(test_state.session_info.clone()))).unwrap(); + } + ); + } +} + +async fn answer_expected_hypothetical_depth_request( + virtual_overseer: &mut VirtualOverseer, + responses: Vec<(HypotheticalCandidate, FragmentTreeMembership)>, + expected_leaf_hash: Option, + expected_backed_in_path_only: bool, +) { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ProspectiveParachains( + ProspectiveParachainsMessage::GetHypotheticalFrontier(req, tx) + ) => { + assert_eq!(req.fragment_tree_relay_parent, expected_leaf_hash); + assert_eq!(req.backed_in_path_only, expected_backed_in_path_only); + for (i, (candidate, _)) in responses.iter().enumerate() { + assert!( + req.candidates.iter().find(|c| c == &candidate).is_some(), + "did not receive request for hypothetical candidate {}", + i, + ); + } + + tx.send(responses); + } + ) +} + +fn validator_pubkeys(val_ids: &[ValidatorPair]) -> IndexedVec { + val_ids.iter().map(|v| v.public().into()).collect() +} + +async fn connect_peer( + virtual_overseer: &mut VirtualOverseer, + peer: PeerId, + authority_ids: Option>, +) { + virtual_overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::NetworkBridgeUpdate( + NetworkBridgeEvent::PeerConnected( + peer, + ObservedRole::Authority, + ValidationVersion::VStaging.into(), + authority_ids, + ), + ), + }) + .await; +} + +async fn disconnect_peer(virtual_overseer: &mut VirtualOverseer, peer: PeerId) { + virtual_overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::NetworkBridgeUpdate( + NetworkBridgeEvent::PeerDisconnected(peer), + ), + }) + .await; +} + +async fn send_peer_view_change(virtual_overseer: &mut VirtualOverseer, peer: PeerId, view: View) { + virtual_overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::NetworkBridgeUpdate( + NetworkBridgeEvent::PeerViewChange(peer, view), + ), + }) + .await; +} + +async fn send_peer_message( + virtual_overseer: &mut VirtualOverseer, + peer: PeerId, + message: protocol_vstaging::StatementDistributionMessage, +) { + virtual_overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::NetworkBridgeUpdate( + NetworkBridgeEvent::PeerMessage(peer, Versioned::VStaging(message)), + ), + }) + .await; +} + +async fn send_new_topology(virtual_overseer: &mut VirtualOverseer, topology: NewGossipTopology) { + virtual_overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::NetworkBridgeUpdate( + NetworkBridgeEvent::NewGossipTopology(topology), + ), + }) + .await; +} diff --git a/node/network/statement-distribution/src/vstaging/tests/requests.rs b/node/network/statement-distribution/src/vstaging/tests/requests.rs new file mode 100644 index 000000000000..5e624bd622eb --- /dev/null +++ b/node/network/statement-distribution/src/vstaging/tests/requests.rs @@ -0,0 +1,27 @@ +// Copyright 2023 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +// TODO [now]: peer reported for providing statements meant to be masked out + +// TODO [now]: peer reported for not providing enough statements, request retried + +// TODO [now]: peer reported for providing duplicate statements + +// TODO [now]: peer reported for providing statements with invalid signatures or wrong validator IDs + +// TODO [now]: local node sanity checks incoming requests + +// TODO [now]: local node respects statement mask diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index 56b656ae9e3f..25252e6bf201 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -482,6 +482,8 @@ pub struct Overseer { NetworkBridgeTxMessage, CandidateBackingMessage, RuntimeApiMessage, + ProspectiveParachainsMessage, + ChainApiMessage, ])] statement_distribution: StatementDistribution, diff --git a/node/primitives/src/lib.rs b/node/primitives/src/lib.rs index 5080c34b7fca..33fd7c733f4a 100644 --- a/node/primitives/src/lib.rs +++ b/node/primitives/src/lib.rs @@ -275,7 +275,15 @@ impl StatementWithPVD { pub fn drop_pvd_from_signed(signed: SignedFullStatementWithPVD) -> SignedFullStatement { signed .convert_to_superpayload_with(|s| s.drop_pvd()) - .expect("persisted_validation_data doesn't affect encoded_as; qed") + .expect("persisted_validation_data doesn't affect encode_as; qed") + } + + /// Converts the statement to a compact signed statement by dropping the [`CommittedCandidateReceipt`] + /// and the [`PersistedValidationData`]. + pub fn signed_to_compact(signed: SignedFullStatementWithPVD) -> Signed { + signed + .convert_to_superpayload_with(|s| s.to_compact()) + .expect("doesn't affect encode_as; qed") } } @@ -613,3 +621,10 @@ pub fn maybe_compress_pov(pov: PoV) -> PoV { let pov = PoV { block_data: BlockData(raw) }; pov } + +/// How many votes we need to consider a candidate backed. +/// +/// WARNING: This has to be kept in sync with the runtime check in the inclusion module. +pub fn minimum_votes(n_validators: usize) -> usize { + std::cmp::min(2, n_validators) +} diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index d6b3bcefbbd4..ad3615d6a662 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -901,6 +901,9 @@ where config.network.request_response_protocols.push(cfg); let (statement_req_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); config.network.request_response_protocols.push(cfg); + let (candidate_req_vstaging_receiver, cfg) = + IncomingRequest::get_config_receiver(&req_protocol_names); + config.network.request_response_protocols.push(cfg); let (dispute_req_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); config.network.request_response_protocols.push(cfg); @@ -1086,6 +1089,7 @@ where collation_req_vstaging_receiver, available_data_req_receiver, statement_req_receiver, + candidate_req_vstaging_receiver, dispute_req_receiver, registry: prometheus_registry.as_ref(), spawner, diff --git a/node/service/src/overseer.rs b/node/service/src/overseer.rs index 3dd1142482dd..404067af0da3 100644 --- a/node/service/src/overseer.rs +++ b/node/service/src/overseer.rs @@ -110,6 +110,9 @@ where IncomingRequestReceiver, /// Receiver for incoming large statement requests. pub statement_req_receiver: IncomingRequestReceiver, + /// Receiver for incoming candidate requests. + pub candidate_req_vstaging_receiver: + IncomingRequestReceiver, /// Receiver for incoming disputes. pub dispute_req_receiver: IncomingRequestReceiver, /// Prometheus registry, commonly used for production systems, less so for test. @@ -154,6 +157,7 @@ pub fn prepared_overseer_builder( collation_req_vstaging_receiver, available_data_req_receiver, statement_req_receiver, + candidate_req_vstaging_receiver, dispute_req_receiver, registry, spawner, @@ -294,6 +298,7 @@ where .statement_distribution(StatementDistributionSubsystem::new( keystore.clone(), statement_req_receiver, + candidate_req_vstaging_receiver, Metrics::register(registry)?, rand::rngs::StdRng::from_entropy(), )) diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 10330757104e..56a19fb5cee0 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -887,6 +887,17 @@ pub enum GossipSupportMessage { NetworkBridgeUpdate(NetworkBridgeEvent), } +/// Request introduction of a candidate into the prospective parachains subsystem. +#[derive(Debug, PartialEq, Eq, Clone)] +pub struct IntroduceCandidateRequest { + /// The para-id of the candidate. + pub candidate_para: ParaId, + /// The candidate receipt itself. + pub candidate_receipt: CommittedCandidateReceipt, + /// The persisted validation data of the candidate. + pub persisted_validation_data: PersistedValidationData, +} + /// A hypothetical candidate to be evaluated for frontier membership /// in the prospective parachains subsystem. /// @@ -994,17 +1005,16 @@ pub type FragmentTreeMembership = Vec<(Hash, Vec)>; pub enum ProspectiveParachainsMessage { /// Inform the Prospective Parachains Subsystem of a new candidate. /// - /// The response sender accepts the candidate membership, which is empty - /// if the candidate was already known. - CandidateSeconded( - ParaId, - CommittedCandidateReceipt, - PersistedValidationData, - oneshot::Sender, - ), - /// Inform the Prospective Parachains Subsystem that a previously seconded candidate - /// has been backed. This requires that `CandidateSeconded` was sent for the candidate - /// some time in the past. + /// The response sender accepts the candidate membership, which is the existing + /// membership of the candidate if it was already known. + IntroduceCandidate(IntroduceCandidateRequest, oneshot::Sender), + /// Inform the Prospective Parachains Subsystem that a previously introduced candidate + /// has been seconded. This requires that the candidate was successfully introduced in + /// the past. + CandidateSeconded(ParaId, CandidateHash), + /// Inform the Prospective Parachains Subsystem that a previously introduced candidate + /// has been backed. This requires that the candidate was successfully introduced in + /// the past. CandidateBacked(ParaId, CandidateHash), /// Get a backable candidate hash for the given parachain, under the given relay-parent hash, /// which is a descendant of the given candidate hashes. Returns `None` on the channel diff --git a/node/subsystem-util/src/lib.rs b/node/subsystem-util/src/lib.rs index 723aaa4b8f2b..56e1593b0229 100644 --- a/node/subsystem-util/src/lib.rs +++ b/node/subsystem-util/src/lib.rs @@ -275,7 +275,7 @@ pub async fn executor_params_at_relay_parent( /// From the given set of validators, find the first key we can sign with, if any. pub async fn signing_key( - validators: &[ValidatorId], + validators: impl IntoIterator, keystore: &SyncCryptoStorePtr, ) -> Option { signing_key_and_index(validators, keystore).await.map(|(k, _)| k) @@ -284,10 +284,10 @@ pub async fn signing_key( /// From the given set of validators, find the first key we can sign with, if any, and return it /// along with the validator index. pub async fn signing_key_and_index( - validators: &[ValidatorId], + validators: impl IntoIterator, keystore: &SyncCryptoStorePtr, ) -> Option<(ValidatorId, ValidatorIndex)> { - for (i, v) in validators.iter().enumerate() { + for (i, v) in validators.into_iter().enumerate() { if CryptoStore::has_keys(&**keystore, &[(v.to_raw_vec(), ValidatorId::ID)]).await { return Some((v.clone(), ValidatorIndex(i as _))) } diff --git a/primitives/src/v2/mod.rs b/primitives/src/v2/mod.rs index ea4ae8470cdf..f0e128a18c18 100644 --- a/primitives/src/v2/mod.rs +++ b/primitives/src/v2/mod.rs @@ -750,7 +750,7 @@ impl TypeIndex for CoreIndex { } /// The unique (during session) index of a validator group. -#[derive(Encode, Decode, Default, Clone, Copy, Debug, PartialEq, Eq, TypeInfo)] +#[derive(Encode, Decode, Default, Clone, Copy, Debug, PartialEq, Eq, TypeInfo, PartialOrd, Ord)] #[cfg_attr(feature = "std", derive(Hash))] pub struct GroupIndex(pub u32); @@ -1451,7 +1451,7 @@ const BACKING_STATEMENT_MAGIC: [u8; 4] = *b"BKNG"; /// Statements that can be made about parachain candidates. These are the /// actual values that are signed. -#[derive(Clone, PartialEq, Eq, RuntimeDebug)] +#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Hash))] pub enum CompactStatement { /// Proposal of a parachain candidate. @@ -1466,6 +1466,13 @@ impl CompactStatement { pub fn signing_payload(&self, context: &SigningContext) -> Vec { (self, context).encode() } + + /// Get the underlying candidate hash this references. + pub fn candidate_hash(&self) -> &CandidateHash { + match *self { + CompactStatement::Seconded(ref h) | CompactStatement::Valid(ref h) => h, + } + } } // Inner helper for codec on `CompactStatement`. @@ -1514,15 +1521,6 @@ impl parity_scale_codec::Decode for CompactStatement { } } -impl CompactStatement { - /// Get the underlying candidate hash this references. - pub fn candidate_hash(&self) -> &CandidateHash { - match *self { - CompactStatement::Seconded(ref h) | CompactStatement::Valid(ref h) => h, - } - } -} - /// `IndexedVec` struct indexed by type specific indices. #[derive(Clone, Encode, Decode, RuntimeDebug, TypeInfo)] #[cfg_attr(feature = "std", derive(PartialEq))] diff --git a/primitives/test-helpers/src/lib.rs b/primitives/test-helpers/src/lib.rs index f1603a53bf2a..ecec7d455ad9 100644 --- a/primitives/test-helpers/src/lib.rs +++ b/primitives/test-helpers/src/lib.rs @@ -23,14 +23,16 @@ //! contain randomness based data. use polkadot_primitives::{ CandidateCommitments, CandidateDescriptor, CandidateReceipt, CollatorId, CollatorSignature, - CommittedCandidateReceipt, Hash, HeadData, Id as ParaId, ValidationCode, ValidationCodeHash, - ValidatorId, + CommittedCandidateReceipt, Hash, HeadData, Id as ParaId, PersistedValidationData, + ValidationCode, ValidationCodeHash, ValidatorId, }; pub use rand; use sp_application_crypto::sr25519; use sp_keyring::Sr25519Keyring; use sp_runtime::generic::Digest; +const MAX_POV_SIZE: u32 = 1_000_000; + /// Creates a candidate receipt with filler data. pub fn dummy_candidate_receipt>(relay_parent: H) -> CandidateReceipt { CandidateReceipt:: { @@ -146,6 +148,46 @@ pub fn dummy_collator_signature() -> CollatorSignature { CollatorSignature::from(sr25519::Signature([0u8; 64])) } +/// Create a meaningless persisted validation data. +pub fn dummy_pvd(parent_head: HeadData, relay_parent_number: u32) -> PersistedValidationData { + PersistedValidationData { + parent_head, + relay_parent_number, + max_pov_size: MAX_POV_SIZE, + relay_parent_storage_root: dummy_hash(), + } +} + +/// Create a meaningless candidate, returning its receipt and PVD. +pub fn make_candidate( + relay_parent_hash: Hash, + relay_parent_number: u32, + para_id: ParaId, + parent_head: HeadData, + head_data: HeadData, + validation_code_hash: ValidationCodeHash, +) -> (CommittedCandidateReceipt, PersistedValidationData) { + let pvd = dummy_pvd(parent_head, relay_parent_number); + let commitments = CandidateCommitments { + head_data, + horizontal_messages: Default::default(), + upward_messages: Default::default(), + new_validation_code: None, + processed_downward_messages: 0, + hrmp_watermark: relay_parent_number, + }; + + let mut candidate = + dummy_candidate_receipt_bad_sig(relay_parent_hash, Some(Default::default())); + candidate.commitments_hash = commitments.hash(); + candidate.descriptor.para_id = para_id; + candidate.descriptor.persisted_validation_data_hash = pvd.hash(); + candidate.descriptor.validation_code_hash = validation_code_hash; + let candidate = CommittedCandidateReceipt { descriptor: candidate.descriptor, commitments }; + + (candidate, pvd) +} + /// Create a new candidate descriptor, and apply a valid signature /// using the provided `collator` key. pub fn make_valid_candidate_descriptor>( diff --git a/runtime/parachains/src/inclusion/mod.rs b/runtime/parachains/src/inclusion/mod.rs index 4f5a7d24635b..14f654a9ce70 100644 --- a/runtime/parachains/src/inclusion/mod.rs +++ b/runtime/parachains/src/inclusion/mod.rs @@ -172,8 +172,7 @@ impl Default for ProcessedCandidates { /// Number of backing votes we need for a valid backing. /// -/// WARNING: This check has to be kept in sync with the node side check in the backing -/// subsystem. +/// WARNING: This check has to be kept in sync with the node side checks. pub fn minimum_backing_votes(n_validators: usize) -> usize { // For considerations on this value see: // https://github.com/paritytech/polkadot/pull/1656#issuecomment-999734650 From 2b1f1c714bedd9e8da237e1eb14b3c5010c1e1a0 Mon Sep 17 00:00:00 2001 From: asynchronous rob Date: Tue, 7 Mar 2023 20:09:51 -0600 Subject: [PATCH 40/76] miscellaneous fixes to make asynchronous backing work (#6791) * propagate network-protocol-staging feature * add feature to adder-collator as well * allow collation-generation of occupied cores * prospective parachains: special treatment for pending availability candidates * runtime: fetch candidates pending availability * lazily construct PVD for pending candidates * fix fallout in prospective parachains hypothetical/select_child * runtime: enact candidates when creating paras-inherent * make tests compile * test pending availability in the scope * add prospective parachains test * fix validity constraints leftovers * drop prints * Fix typos --------- Co-authored-by: Chris Sosnin Co-authored-by: Marcin S --- Cargo.toml | 1 + cli/Cargo.toml | 1 + node/collation-generation/src/lib.rs | 18 +- .../src/fragment_tree.rs | 296 ++++++++++++++++-- node/core/prospective-parachains/src/lib.rs | 215 ++++++++++--- node/core/prospective-parachains/src/tests.rs | 134 +++++++- node/core/runtime-api/src/cache.rs | 19 +- node/core/runtime-api/src/lib.rs | 20 +- node/core/runtime-api/src/tests.rs | 6 +- node/service/Cargo.toml | 2 + node/subsystem-types/src/messages.rs | 8 +- node/subsystem-types/src/runtime_client.rs | 12 +- .../test-parachains/adder/collator/Cargo.toml | 3 + primitives/src/runtime_api.rs | 4 +- primitives/src/vstaging/mod.rs | 27 ++ runtime/parachains/src/inclusion/mod.rs | 8 + runtime/parachains/src/paras_inherent/mod.rs | 2 +- .../src/runtime_api_impl/vstaging.rs | 40 ++- runtime/rococo/src/lib.rs | 4 +- runtime/westend/src/lib.rs | 4 +- 20 files changed, 682 insertions(+), 142 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index e2257db47c30..9db5405e9c50 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -208,6 +208,7 @@ fast-runtime = [ "polkadot-cli/fast-runtime" ] runtime-metrics = [ "polkadot-cli/runtime-metrics" ] pyroscope = ["polkadot-cli/pyroscope"] jemalloc-allocator = ["polkadot-node-core-pvf/jemalloc-allocator", "polkadot-overseer/jemalloc-allocator"] +network-protocol-staging = ["polkadot-cli/network-protocol-staging"] # Configuration for building a .deb package - for use with `cargo-deb` [package.metadata.deb] diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 01247bbc996f..0edf56d90cef 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -75,3 +75,4 @@ rococo-native = ["service/rococo-native"] malus = ["full-node", "service/malus"] runtime-metrics = ["service/runtime-metrics", "polkadot-node-metrics/runtime-metrics"] +network-protocol-staging = ["service/network-protocol-staging"] diff --git a/node/collation-generation/src/lib.rs b/node/collation-generation/src/lib.rs index 0347ee683de9..445286f6a781 100644 --- a/node/collation-generation/src/lib.rs +++ b/node/collation-generation/src/lib.rs @@ -198,15 +198,15 @@ async fn handle_new_activations( let (scheduled_core, assumption) = match core { CoreState::Scheduled(scheduled_core) => (scheduled_core, OccupiedCoreAssumption::Free), - CoreState::Occupied(_occupied_core) => { - // TODO: https://github.com/paritytech/polkadot/issues/1573 - gum::trace!( - target: LOG_TARGET, - core_idx = %core_idx, - relay_parent = ?relay_parent, - "core is occupied. Keep going.", - ); - continue + CoreState::Occupied(occupied_core) => { + // TODO [now]: this assumes that next up == current. + // in practice we should only set `OccupiedCoreAssumption::Included` + // when the candidate occupying the core is also of the same para. + if let Some(scheduled) = occupied_core.next_up_on_available { + (scheduled, OccupiedCoreAssumption::Included) + } else { + continue + } }, CoreState::Free => { gum::trace!( diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 1ded5fa44fdb..c6f388f851b5 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -262,6 +262,16 @@ struct CandidateEntry { state: CandidateState, } +/// A candidate existing on-chain but pending availability, for special treatment +/// in the [`Scope`]. +#[derive(Debug, Clone)] +pub(crate) struct PendingAvailability { + /// The candidate hash. + pub candidate_hash: CandidateHash, + /// The block info of the relay parent. + pub relay_parent: RelayChainBlockInfo, +} + /// The scope of a [`FragmentTree`]. #[derive(Debug)] pub(crate) struct Scope { @@ -269,6 +279,7 @@ pub(crate) struct Scope { relay_parent: RelayChainBlockInfo, ancestors: BTreeMap, ancestors_by_hash: HashMap, + pending_availability: Vec, base_constraints: Constraints, max_depth: usize, } @@ -304,6 +315,7 @@ impl Scope { para: ParaId, relay_parent: RelayChainBlockInfo, base_constraints: Constraints, + pending_availability: Vec, max_depth: usize, ancestors: impl IntoIterator, ) -> Result { @@ -330,6 +342,7 @@ impl Scope { para, relay_parent, base_constraints, + pending_availability, max_depth, ancestors: ancestors_map, ancestors_by_hash, @@ -354,6 +367,14 @@ impl Scope { self.ancestors_by_hash.get(hash).map(|info| info.clone()) } + /// Whether the candidate in question is one pending availability in this scope. + pub fn get_pending_availability( + &self, + candidate_hash: &CandidateHash, + ) -> Option<&PendingAvailability> { + self.pending_availability.iter().find(|c| &c.candidate_hash == candidate_hash) + } + /// Get the base constraints of the scope pub fn base_constraints(&self) -> &Constraints { &self.base_constraints @@ -609,7 +630,12 @@ impl FragmentTree { let parent_rp = self .scope .ancestor_by_hash(&node.relay_parent()) - .expect("nodes in tree can only contain ancestors within scope; qed"); + .or_else(|| { + self.scope + .get_pending_availability(&node.candidate_hash) + .map(|_| self.scope.earliest_relay_parent()) + }) + .expect("All nodes in tree are either pending availability or within scope; qed"); (node.cumulative_modifications.clone(), node.depth + 1, parent_rp) }, @@ -713,11 +739,17 @@ impl FragmentTree { .nodes .iter() .take_while(|n| n.parent == NodePointer::Root) + .filter(|n| self.scope.get_pending_availability(&n.candidate_hash).is_none()) .filter(|n| pred(&n.candidate_hash)) .map(|n| n.candidate_hash) .next(), - NodePointer::Storage(ptr) => - self.nodes[ptr].children.iter().filter(|n| pred(&n.1)).map(|n| n.1).next(), + NodePointer::Storage(ptr) => self.nodes[ptr] + .children + .iter() + .filter(|n| self.scope.get_pending_availability(&n.1).is_none()) + .filter(|n| pred(&n.1)) + .map(|n| n.1) + .next(), } } @@ -750,7 +782,14 @@ impl FragmentTree { let parent_rp = self .scope .ancestor_by_hash(&node.relay_parent()) - .expect("nodes in tree can only contain ancestors within scope; qed"); + .or_else(|| { + // if the relay-parent is out of scope _and_ it is in the tree, + // it must be a candidate pending availability. + self.scope + .get_pending_availability(&node.candidate_hash) + .map(|c| c.relay_parent.clone()) + }) + .expect("All nodes in tree are either pending availability or within scope; qed"); (node.cumulative_modifications.clone(), node.depth + 1, parent_rp) }, @@ -777,31 +816,58 @@ impl FragmentTree { // Add nodes to tree wherever // 1. parent hash is correct - // 2. relay-parent does not move backwards - // 3. candidate outputs fulfill constraints + // 2. relay-parent does not move backwards. + // 3. all non-pending-availability candidates have relay-parent in scope. + // 4. candidate outputs fulfill constraints let required_head_hash = child_constraints.required_parent.hash(); for candidate in storage.iter_para_children(&required_head_hash) { - let relay_parent = match self.scope.ancestor_by_hash(&candidate.relay_parent) { - None => continue, // not in chain - Some(info) => { - if info.number < earliest_rp.number { - // moved backwards - continue - } - - info - }, + let pending = self.scope.get_pending_availability(&candidate.candidate_hash); + let relay_parent = pending + .map(|p| p.relay_parent.clone()) + .or_else(|| self.scope.ancestor_by_hash(&candidate.relay_parent)); + + let relay_parent = match relay_parent { + Some(r) => r, + None => continue, }; + // require: pending availability candidates don't move backwards + // and only those can be out-of-scope. + // + // earliest_rp can be before the earliest relay parent in the scope + // when the parent is a pending availability candidate as well, but + // only other pending candidates can have a relay parent out of scope. + let min_relay_parent_number = pending + .map(|p| match parent_pointer { + NodePointer::Root => p.relay_parent.number, + NodePointer::Storage(_) => earliest_rp.number, + }) + .unwrap_or_else(|| { + std::cmp::max( + earliest_rp.number, + self.scope.earliest_relay_parent().number, + ) + }); + + if relay_parent.number < min_relay_parent_number { + continue // relay parent moved backwards. + } + // don't add candidates where the parent already has it as a child. if self.node_has_candidate_child(parent_pointer, &candidate.candidate_hash) { continue } let fragment = { + let mut constraints = child_constraints.clone(); + if let Some(ref p) = pending { + // overwrite for candidates pending availability as a special-case. + constraints.min_relay_parent_number = p.relay_parent.number; + } + let f = Fragment::new( relay_parent.clone(), - child_constraints.clone(), + constraints, candidate.candidate.partial_clone(), ); @@ -952,9 +1018,17 @@ mod tests { let max_depth = 2; let base_constraints = make_constraints(8, vec![8, 9], vec![1, 2, 3].into()); + let pending_availability = Vec::new(); assert_matches!( - Scope::with_ancestors(para_id, relay_parent, base_constraints, max_depth, ancestors), + Scope::with_ancestors( + para_id, + relay_parent, + base_constraints, + pending_availability, + max_depth, + ancestors + ), Err(UnexpectedAncestor { number: 8, prev: 10 }) ); } @@ -976,9 +1050,17 @@ mod tests { let max_depth = 2; let base_constraints = make_constraints(0, vec![], vec![1, 2, 3].into()); + let pending_availability = Vec::new(); assert_matches!( - Scope::with_ancestors(para_id, relay_parent, base_constraints, max_depth, ancestors,), + Scope::with_ancestors( + para_id, + relay_parent, + base_constraints, + pending_availability, + max_depth, + ancestors, + ), Err(UnexpectedAncestor { number: 99999, prev: 0 }) ); } @@ -1012,10 +1094,17 @@ mod tests { let max_depth = 2; let base_constraints = make_constraints(3, vec![2], vec![1, 2, 3].into()); + let pending_availability = Vec::new(); - let scope = - Scope::with_ancestors(para_id, relay_parent, base_constraints, max_depth, ancestors) - .unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent, + base_constraints, + pending_availability, + max_depth, + ancestors, + ) + .unwrap(); assert_eq!(scope.ancestors.len(), 2); assert_eq!(scope.ancestors_by_hash.len(), 2); @@ -1100,6 +1189,7 @@ mod tests { let candidate_b_hash = candidate_b.hash(); let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); let ancestors = vec![RelayChainBlockInfo { number: pvd_a.relay_parent_number, @@ -1115,9 +1205,15 @@ mod tests { storage.add_candidate(candidate_a, pvd_a).unwrap(); storage.add_candidate(candidate_b, pvd_b).unwrap(); - let scope = - Scope::with_ancestors(para_id, relay_parent_b_info, base_constraints, 4, ancestors) - .unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_b_info, + base_constraints, + pending_availability, + 4, + ancestors, + ) + .unwrap(); let tree = FragmentTree::populate(scope, &storage); let candidates: Vec<_> = tree.candidates().collect(); @@ -1172,6 +1268,7 @@ mod tests { let candidate_a2_hash = candidate_a2.hash(); let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); let ancestors = vec![RelayChainBlockInfo { number: pvd_a.relay_parent_number, @@ -1187,9 +1284,15 @@ mod tests { storage.add_candidate(candidate_a, pvd_a).unwrap(); storage.add_candidate(candidate_b, pvd_b).unwrap(); - let scope = - Scope::with_ancestors(para_id, relay_parent_b_info, base_constraints, 4, ancestors) - .unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_b_info, + base_constraints, + pending_availability, + 4, + ancestors, + ) + .unwrap(); let mut tree = FragmentTree::populate(scope, &storage); storage.add_candidate(candidate_a2, pvd_a2).unwrap(); @@ -1229,6 +1332,7 @@ mod tests { let candidate_b_hash = candidate_b.hash(); let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); let relay_parent_a_info = RelayChainBlockInfo { number: pvd_a.relay_parent_number, @@ -1237,9 +1341,15 @@ mod tests { }; storage.add_candidate(candidate_a, pvd_a).unwrap(); - let scope = - Scope::with_ancestors(para_id, relay_parent_a_info, base_constraints, 4, vec![]) - .unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + pending_availability, + 4, + vec![], + ) + .unwrap(); let mut tree = FragmentTree::populate(scope, &storage); storage.add_candidate(candidate_b, pvd_b).unwrap(); @@ -1278,6 +1388,7 @@ mod tests { let candidate_b_hash = candidate_b.hash(); let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); let relay_parent_a_info = RelayChainBlockInfo { number: pvd_a.relay_parent_number, @@ -1286,9 +1397,15 @@ mod tests { }; storage.add_candidate(candidate_a, pvd_a).unwrap(); - let scope = - Scope::with_ancestors(para_id, relay_parent_a_info, base_constraints, 4, vec![]) - .unwrap(); + let scope = Scope::with_ancestors( + para_id, + relay_parent_a_info, + base_constraints, + pending_availability, + 4, + vec![], + ) + .unwrap(); let mut tree = FragmentTree::populate(scope, &storage); storage.add_candidate(candidate_b, pvd_b).unwrap(); @@ -1317,6 +1434,7 @@ mod tests { ); let candidate_a_hash = candidate_a.hash(); let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); let relay_parent_a_info = RelayChainBlockInfo { number: pvd_a.relay_parent_number, @@ -1330,6 +1448,7 @@ mod tests { para_id, relay_parent_a_info, base_constraints, + pending_availability, max_depth, vec![], ) @@ -1381,6 +1500,7 @@ mod tests { let candidate_b_hash = candidate_b.hash(); let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); let relay_parent_a_info = RelayChainBlockInfo { number: pvd_a.relay_parent_number, @@ -1395,6 +1515,7 @@ mod tests { para_id, relay_parent_a_info, base_constraints, + pending_availability, max_depth, vec![], ) @@ -1446,6 +1567,7 @@ mod tests { let candidate_b_hash = candidate_b.hash(); let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); let relay_parent_a_info = RelayChainBlockInfo { number: pvd_a.relay_parent_number, @@ -1460,6 +1582,7 @@ mod tests { para_id, relay_parent_a_info, base_constraints, + pending_availability, max_depth, vec![], ) @@ -1542,6 +1665,7 @@ mod tests { let candidate_a_hash = candidate_a.hash(); let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); let relay_parent_a_info = RelayChainBlockInfo { number: pvd_a.relay_parent_number, @@ -1554,6 +1678,7 @@ mod tests { para_id, relay_parent_a_info, base_constraints, + pending_availability, max_depth, vec![], ) @@ -1623,6 +1748,7 @@ mod tests { ); let base_constraints = make_constraints(0, vec![0], vec![0x0a].into()); + let pending_availability = Vec::new(); let relay_parent_a_info = RelayChainBlockInfo { number: pvd_a.relay_parent_number, @@ -1643,6 +1769,7 @@ mod tests { para_id, relay_parent_a_info, base_constraints, + pending_availability, max_depth, vec![], ) @@ -1707,4 +1834,105 @@ mod tests { vec![2], // non-empty if `false`. ); } + + #[test] + fn pending_availability_in_scope() { + let mut storage = CandidateStorage::new(); + + let para_id = ParaId::from(5u32); + let relay_parent_a = Hash::repeat_byte(1); + let relay_parent_b = Hash::repeat_byte(2); + let relay_parent_c = Hash::repeat_byte(3); + + let (pvd_a, candidate_a) = make_committed_candidate( + para_id, + relay_parent_a, + 0, + vec![0x0a].into(), + vec![0x0b].into(), + 0, + ); + let candidate_a_hash = candidate_a.hash(); + + let (pvd_b, candidate_b) = make_committed_candidate( + para_id, + relay_parent_b, + 1, + vec![0x0b].into(), + vec![0x0c].into(), + 1, + ); + + // Note that relay parent `a` is not allowed. + let base_constraints = make_constraints(1, vec![], vec![0x0a].into()); + + let relay_parent_a_info = RelayChainBlockInfo { + number: pvd_a.relay_parent_number, + hash: relay_parent_a, + storage_root: pvd_a.relay_parent_storage_root, + }; + let pending_availability = vec![PendingAvailability { + candidate_hash: candidate_a_hash, + relay_parent: relay_parent_a_info, + }]; + + let relay_parent_b_info = RelayChainBlockInfo { + number: pvd_b.relay_parent_number, + hash: relay_parent_b, + storage_root: pvd_b.relay_parent_storage_root, + }; + let relay_parent_c_info = RelayChainBlockInfo { + number: pvd_b.relay_parent_number + 1, + hash: relay_parent_c, + storage_root: Hash::zero(), + }; + + let max_depth = 4; + storage.add_candidate(candidate_a, pvd_a).unwrap(); + storage.add_candidate(candidate_b, pvd_b).unwrap(); + storage.mark_backed(&candidate_a_hash); + + let scope = Scope::with_ancestors( + para_id, + relay_parent_c_info, + base_constraints, + pending_availability, + max_depth, + vec![relay_parent_b_info], + ) + .unwrap(); + let tree = FragmentTree::populate(scope, &storage); + + let candidates: Vec<_> = tree.candidates().collect(); + assert_eq!(candidates.len(), 2); + assert_eq!(tree.nodes.len(), 2); + + let candidate_d_hash = CandidateHash(Hash::repeat_byte(0xAA)); + + assert_eq!( + tree.hypothetical_depths( + candidate_d_hash, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0b]).hash(), + relay_parent: relay_parent_c, + }, + &storage, + false, + ), + vec![1], + ); + + assert_eq!( + tree.hypothetical_depths( + candidate_d_hash, + HypotheticalCandidate::Incomplete { + parent_head_data_hash: HeadData::from(vec![0x0c]).hash(), + relay_parent: relay_parent_b, + }, + &storage, + false, + ), + vec![2], + ); + } } diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index f0e6eaaa14c8..5ef35bcaa628 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -47,12 +47,15 @@ use polkadot_node_subsystem_util::{ runtime::{prospective_parachains_mode, ProspectiveParachainsMode}, }; use polkadot_primitives::vstaging::{ - BlockNumber, CandidateHash, CoreState, Hash, Id as ParaId, PersistedValidationData, + BlockNumber, CandidateHash, CandidatePendingAvailability, CommittedCandidateReceipt, CoreState, + Hash, HeadData, Header, Id as ParaId, PersistedValidationData, }; use crate::{ error::{FatalError, FatalResult, JfyiError, JfyiErrorResult, Result}, - fragment_tree::{CandidateStorage, FragmentTree, Scope as TreeScope}, + fragment_tree::{ + CandidateStorage, CandidateStorageInsertionError, FragmentTree, Scope as TreeScope, + }, }; mod error; @@ -68,6 +71,7 @@ const LOG_TARGET: &str = "parachain::prospective-parachains"; struct RelayBlockViewData { // Scheduling info for paras and upcoming paras. fragment_trees: HashMap, + pending_availability: HashSet, } struct View { @@ -176,6 +180,7 @@ async fn handle_active_leaves_update( view.active_leaves.remove(deactivated); } + let mut temp_header_cache = HashMap::new(); for activated in update.activated.into_iter() { let hash = activated.hash; @@ -194,25 +199,29 @@ async fn handle_active_leaves_update( return Ok(()) }; - let scheduled_paras = fetch_upcoming_paras(&mut *ctx, hash).await?; + let mut pending_availability = HashSet::new(); + let scheduled_paras = + fetch_upcoming_paras(&mut *ctx, hash, &mut pending_availability).await?; - let block_info: RelayChainBlockInfo = match fetch_block_info(&mut *ctx, hash).await? { - None => { - gum::warn!( - target: LOG_TARGET, - block_hash = ?hash, - "Failed to get block info for newly activated leaf block." - ); + let block_info: RelayChainBlockInfo = + match fetch_block_info(&mut *ctx, &mut temp_header_cache, hash).await? { + None => { + gum::warn!( + target: LOG_TARGET, + block_hash = ?hash, + "Failed to get block info for newly activated leaf block." + ); - // `update.activated` is an option, but we can use this - // to exit the 'loop' and skip this block without skipping - // pruning logic. - continue - }, - Some(info) => info, - }; + // `update.activated` is an option, but we can use this + // to exit the 'loop' and skip this block without skipping + // pruning logic. + continue + }, + Some(info) => info, + }; - let ancestry = fetch_ancestry(&mut *ctx, hash, allowed_ancestry_len).await?; + let ancestry = + fetch_ancestry(&mut *ctx, &mut temp_header_cache, hash, allowed_ancestry_len).await?; // Find constraints. let mut fragment_trees = HashMap::new(); @@ -220,9 +229,9 @@ async fn handle_active_leaves_update( let candidate_storage = view.candidate_storage.entry(para).or_insert_with(CandidateStorage::new); - let constraints = fetch_base_constraints(&mut *ctx, hash, para).await?; + let backing_state = fetch_backing_state(&mut *ctx, hash, para).await?; - let constraints = match constraints { + let (constraints, pending_availability) = match backing_state { Some(c) => c, None => { // This indicates a runtime conflict of some kind. @@ -231,27 +240,61 @@ async fn handle_active_leaves_update( target: LOG_TARGET, para_id = ?para, relay_parent = ?hash, - "Failed to get inclusion constraints." + "Failed to get inclusion backing state." ); continue }, }; + let pending_availability = preprocess_candidates_pending_availability( + ctx, + &mut temp_header_cache, + constraints.required_parent.clone(), + pending_availability, + ) + .await?; + let mut compact_pending = Vec::with_capacity(pending_availability.len()); + + for c in pending_availability { + let res = candidate_storage.add_candidate(c.candidate, c.persisted_validation_data); + let candidate_hash = c.compact.candidate_hash; + compact_pending.push(c.compact); + + match res { + Ok(_) | Err(CandidateStorageInsertionError::CandidateAlreadyKnown(_)) => { + // Anything on-chain is guaranteed to be backed. + candidate_storage.mark_backed(&candidate_hash); + }, + Err(err) => { + gum::warn!( + target: LOG_TARGET, + ?candidate_hash, + para_id = ?para, + ?err, + "Scraped invalid candidate pending availability", + ); + }, + } + } + let scope = TreeScope::with_ancestors( para, block_info.clone(), constraints, + compact_pending, max_candidate_depth, ancestry.iter().cloned(), ) .expect("ancestors are provided in reverse order and correctly; qed"); let tree = FragmentTree::populate(scope, &*candidate_storage); + fragment_trees.insert(para, tree); } - view.active_leaves.insert(hash, RelayBlockViewData { fragment_trees }); + view.active_leaves + .insert(hash, RelayBlockViewData { fragment_trees, pending_availability }); } if !update.deactivated.is_empty() { @@ -266,21 +309,23 @@ fn prune_view_candidate_storage(view: &mut View, metrics: &Metrics) { metrics.time_prune_view_candidate_storage(); let active_leaves = &view.active_leaves; - view.candidate_storage.retain(|para_id, storage| { - let mut coverage = HashSet::new(); - let mut contained = false; - for head in active_leaves.values() { - if let Some(tree) = head.fragment_trees.get(¶_id) { - coverage.extend(tree.candidates()); - contained = true; - } + let mut live_candidates = HashSet::new(); + let mut live_paras = HashSet::new(); + for sub_view in active_leaves.values() { + for (para_id, fragment_tree) in &sub_view.fragment_trees { + live_candidates.extend(fragment_tree.candidates()); + live_paras.insert(*para_id); } - if !contained { + live_candidates.extend(sub_view.pending_availability.iter().cloned()); + } + + view.candidate_storage.retain(|para_id, storage| { + if !live_paras.contains(¶_id) { return false } - storage.retain(|h| coverage.contains(&h)); + storage.retain(|h| live_candidates.contains(&h)); // Even if `storage` is now empty, we retain. // This maintains a convenient invariant that para-id storage exists @@ -289,6 +334,66 @@ fn prune_view_candidate_storage(view: &mut View, metrics: &Metrics) { }) } +struct ImportablePendingAvailability { + candidate: CommittedCandidateReceipt, + persisted_validation_data: PersistedValidationData, + compact: crate::fragment_tree::PendingAvailability, +} + +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] +async fn preprocess_candidates_pending_availability( + ctx: &mut Context, + cache: &mut HashMap, + required_parent: HeadData, + pending_availability: Vec, +) -> JfyiErrorResult> { + let mut required_parent = required_parent; + + let mut importable = Vec::new(); + let expected_count = pending_availability.len(); + + for (i, pending) in pending_availability.into_iter().enumerate() { + let relay_parent = + match fetch_block_info(ctx, cache, pending.descriptor.relay_parent).await? { + None => { + gum::debug!( + target: LOG_TARGET, + ?pending.candidate_hash, + ?pending.descriptor.para_id, + index = ?i, + ?expected_count, + "Had to stop processing pending candidates early due to missing info.", + ); + + break + }, + Some(b) => b, + }; + + let next_required_parent = pending.commitments.head_data.clone(); + importable.push(ImportablePendingAvailability { + candidate: CommittedCandidateReceipt { + descriptor: pending.descriptor, + commitments: pending.commitments, + }, + persisted_validation_data: PersistedValidationData { + parent_head: required_parent, + max_pov_size: pending.max_pov_size, + relay_parent_number: relay_parent.number, + relay_parent_storage_root: relay_parent.storage_root, + }, + compact: crate::fragment_tree::PendingAvailability { + candidate_hash: pending.candidate_hash, + relay_parent, + }, + }); + + required_parent = next_required_parent; + } + + Ok(importable) +} + #[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] async fn handle_candidate_introduced( _ctx: &mut Context, @@ -321,14 +426,12 @@ async fn handle_candidate_introduced( let candidate_hash = match storage.add_candidate(candidate, pvd) { Ok(c) => c, - Err(crate::fragment_tree::CandidateStorageInsertionError::CandidateAlreadyKnown(c)) => { + Err(CandidateStorageInsertionError::CandidateAlreadyKnown(c)) => { // Candidate known - return existing fragment tree membership. let _ = tx.send(fragment_tree_membership(&view.active_leaves, para, c)); return Ok(()) }, - Err( - crate::fragment_tree::CandidateStorageInsertionError::PersistedValidationDataMismatch, - ) => { + Err(CandidateStorageInsertionError::PersistedValidationDataMismatch) => { // We can't log the candidate hash without either doing more ~expensive // hashing but this branch indicates something is seriously wrong elsewhere // so it's doubtful that it would affect debugging. @@ -664,25 +767,29 @@ fn answer_prospective_validation_data_request( } #[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] -async fn fetch_base_constraints( +async fn fetch_backing_state( ctx: &mut Context, relay_parent: Hash, para_id: ParaId, -) -> JfyiErrorResult> { +) -> JfyiErrorResult)>> { let (tx, rx) = oneshot::channel(); ctx.send_message(RuntimeApiMessage::Request( relay_parent, - RuntimeApiRequest::StagingValidityConstraints(para_id, tx), + RuntimeApiRequest::StagingParaBackingState(para_id, tx), )) .await; - Ok(rx.await.map_err(JfyiError::RuntimeApiRequestCanceled)??.map(From::from)) + Ok(rx + .await + .map_err(JfyiError::RuntimeApiRequestCanceled)?? + .map(|s| (From::from(s.constraints), s.pending_availability))) } #[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] async fn fetch_upcoming_paras( ctx: &mut Context, relay_parent: Hash, + pending_availability: &mut HashSet, ) -> JfyiErrorResult> { let (tx, rx) = oneshot::channel(); @@ -699,6 +806,8 @@ async fn fetch_upcoming_paras( for core in cores { match core { CoreState::Occupied(occupied) => { + pending_availability.insert(occupied.candidate_hash); + if let Some(next_up_on_available) = occupied.next_up_on_available { upcoming.insert(next_up_on_available.para_id); } @@ -720,6 +829,7 @@ async fn fetch_upcoming_paras( #[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] async fn fetch_ancestry( ctx: &mut Context, + cache: &mut HashMap, relay_hash: Hash, ancestors: usize, ) -> JfyiErrorResult> { @@ -738,7 +848,7 @@ async fn fetch_ancestry( let hashes = rx.map_err(JfyiError::ChainApiRequestCanceled).await??; let mut block_info = Vec::with_capacity(hashes.len()); for hash in hashes { - match fetch_block_info(ctx, hash).await? { + match fetch_block_info(ctx, cache, hash).await? { None => { gum::warn!( target: LOG_TARGET, @@ -759,14 +869,33 @@ async fn fetch_ancestry( } #[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] -async fn fetch_block_info( +async fn fetch_block_header_with_cache( ctx: &mut Context, + cache: &mut HashMap, relay_hash: Hash, -) -> JfyiErrorResult> { +) -> JfyiErrorResult> { + if let Some(h) = cache.get(&relay_hash) { + return Ok(Some(h.clone())) + } + let (tx, rx) = oneshot::channel(); ctx.send_message(ChainApiMessage::BlockHeader(relay_hash, tx)).await; let header = rx.map_err(JfyiError::ChainApiRequestCanceled).await??; + if let Some(ref h) = header { + cache.insert(relay_hash, h.clone()); + } + Ok(header) +} + +#[overseer::contextbounds(ProspectiveParachains, prefix = self::overseer)] +async fn fetch_block_info( + ctx: &mut Context, + cache: &mut HashMap, + relay_hash: Hash, +) -> JfyiErrorResult> { + let header = fetch_block_header_with_cache(ctx, cache, relay_hash).await?; + Ok(header.map(|header| RelayChainBlockInfo { hash: relay_hash, number: header.number, diff --git a/node/core/prospective-parachains/src/tests.rs b/node/core/prospective-parachains/src/tests.rs index 1936a482e685..cd1f2d494cc4 100644 --- a/node/core/prospective-parachains/src/tests.rs +++ b/node/core/prospective-parachains/src/tests.rs @@ -26,7 +26,7 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_types::{jaeger, ActivatedLeaf, LeafStatus}; use polkadot_primitives::{ - vstaging::{AsyncBackingParameters, Constraints, InboundHrmpLimitations}, + vstaging::{AsyncBackingParameters, BackingState, Constraints, InboundHrmpLimitations}, CommittedCandidateReceipt, HeadData, Header, PersistedValidationData, ScheduledCore, ValidationCodeHash, }; @@ -126,14 +126,24 @@ fn test_harness>( view } +#[derive(Debug, Clone)] struct PerParaData { min_relay_parent: BlockNumber, head_data: HeadData, + pending_availability: Vec, } impl PerParaData { pub fn new(min_relay_parent: BlockNumber, head_data: HeadData) -> Self { - Self { min_relay_parent, head_data } + Self { min_relay_parent, head_data, pending_availability: Vec::new() } + } + + pub fn new_with_pending( + min_relay_parent: BlockNumber, + head_data: HeadData, + pending: Vec, + ) -> Self { + Self { min_relay_parent, head_data, pending_availability: pending } } } @@ -176,7 +186,7 @@ async fn activate_leaf( leaf: &TestLeaf, test_state: &TestState, ) { - let TestLeaf { number, hash, para_data: _ } = leaf; + let TestLeaf { number, hash, .. } = leaf; let activated = ActivatedLeaf { hash: *hash, @@ -250,26 +260,39 @@ async fn handle_leaf_activation( let para_id = match message { AllMessages::RuntimeApi(RuntimeApiMessage::Request( _, - RuntimeApiRequest::StagingValidityConstraints(p_id, _), + RuntimeApiRequest::StagingParaBackingState(p_id, _), )) => p_id, _ => panic!("received unexpected message {:?}", message), }; - let PerParaData { min_relay_parent, head_data } = leaf.para_data(para_id); + let PerParaData { min_relay_parent, head_data, pending_availability } = + leaf.para_data(para_id); let constraints = dummy_constraints( *min_relay_parent, vec![*number], head_data.clone(), test_state.validation_code_hash, ); + let backing_state = + BackingState { constraints, pending_availability: pending_availability.clone() }; + assert_matches!( message, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingValidityConstraints(p_id, tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingParaBackingState(p_id, tx)) ) if parent == *hash && p_id == para_id => { - tx.send(Ok(Some(constraints))).unwrap(); + tx.send(Ok(Some(backing_state))).unwrap(); } ); + + for pending in pending_availability { + send_block_header( + virtual_overseer, + pending.descriptor.relay_parent, + pending.relay_parent_number, + ) + .await; + } } // Get minimum relay parents. @@ -1320,3 +1343,100 @@ fn correctly_updates_leaves() { assert_eq!(view.active_leaves.len(), 0); assert_eq!(view.candidate_storage.len(), 0); } + +#[test] +fn persists_pending_availability_candidate() { + let mut test_state = TestState::default(); + let para_id = ParaId::from(1); + test_state.availability_cores = test_state + .availability_cores + .into_iter() + .filter(|core| core.para_id().map_or(false, |id| id == para_id)) + .collect(); + assert_eq!(test_state.availability_cores.len(), 1); + + test_harness(|mut virtual_overseer| async move { + let para_head = HeadData(vec![1, 2, 3]); + + // Min allowed relay parent for leaf `a` which goes out of scope in the test. + let candidate_relay_parent = Hash::from_low_u64_be(5); + let candidate_relay_parent_number = 97; + + let leaf_a = TestLeaf { + number: candidate_relay_parent_number + ALLOWED_ANCESTRY_LEN, + hash: Hash::from_low_u64_be(2), + para_data: vec![( + para_id, + PerParaData::new(candidate_relay_parent_number, para_head.clone()), + )], + }; + + let leaf_b_hash = Hash::from_low_u64_be(1); + let leaf_b_number = leaf_a.number + 1; + + // Activate leaf. + activate_leaf(&mut virtual_overseer, &leaf_a, &test_state).await; + + // Candidate A + let (candidate_a, pvd_a) = make_candidate( + candidate_relay_parent, + candidate_relay_parent_number, + para_id, + para_head.clone(), + HeadData(vec![1]), + test_state.validation_code_hash, + ); + let candidate_hash_a = candidate_a.hash(); + + // Candidate B, built on top of the candidate which is out of scope but pending availability. + let (candidate_b, pvd_b) = make_candidate( + leaf_b_hash, + leaf_b_number, + para_id, + HeadData(vec![1]), + HeadData(vec![2]), + test_state.validation_code_hash, + ); + let candidate_hash_b = candidate_b.hash(); + + introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; + second_candidate(&mut virtual_overseer, candidate_a.clone()).await; + back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await; + + let candidate_a_pending_av = CandidatePendingAvailability { + candidate_hash: candidate_hash_a, + descriptor: candidate_a.descriptor.clone(), + commitments: candidate_a.commitments.clone(), + relay_parent_number: candidate_relay_parent_number, + max_pov_size: MAX_POV_SIZE, + }; + let leaf_b = TestLeaf { + number: leaf_b_number, + hash: leaf_b_hash, + para_data: vec![( + 1.into(), + PerParaData::new_with_pending( + candidate_relay_parent_number + 1, + para_head.clone(), + vec![candidate_a_pending_av], + ), + )], + }; + activate_leaf(&mut virtual_overseer, &leaf_b, &test_state).await; + + introduce_candidate(&mut virtual_overseer, candidate_b.clone(), pvd_b).await; + second_candidate(&mut virtual_overseer, candidate_b.clone()).await; + back_candidate(&mut virtual_overseer, &candidate_b, candidate_hash_b).await; + + get_backable_candidate( + &mut virtual_overseer, + &leaf_b, + para_id, + vec![candidate_hash_a], + Some(candidate_hash_b), + ) + .await; + + virtual_overseer + }); +} diff --git a/node/core/runtime-api/src/cache.rs b/node/core/runtime-api/src/cache.rs index 1fe709515f94..9ffa62ed4842 100644 --- a/node/core/runtime-api/src/cache.rs +++ b/node/core/runtime-api/src/cache.rs @@ -65,8 +65,7 @@ pub(crate) struct RequestResultCache { version: LruCache, disputes: LruCache)>>, - staging_validity_constraints: - LruCache<(Hash, ParaId), Option>, + staging_para_backing_state: LruCache<(Hash, ParaId), Option>, staging_async_backing_parameters: LruCache, } @@ -96,7 +95,7 @@ impl Default for RequestResultCache { version: LruCache::new(DEFAULT_CACHE_CAP), disputes: LruCache::new(DEFAULT_CACHE_CAP), - staging_validity_constraints: LruCache::new(DEFAULT_CACHE_CAP), + staging_para_backing_state: LruCache::new(DEFAULT_CACHE_CAP), staging_async_backing_parameters: LruCache::new(DEFAULT_CACHE_CAP), } } @@ -394,19 +393,19 @@ impl RequestResultCache { self.disputes.put(relay_parent, value); } - pub(crate) fn staging_validity_constraints( + pub(crate) fn staging_para_backing_state( &mut self, key: (Hash, ParaId), - ) -> Option<&Option> { - self.staging_validity_constraints.get(&key) + ) -> Option<&Option> { + self.staging_para_backing_state.get(&key) } - pub(crate) fn cache_staging_validity_constraints( + pub(crate) fn cache_staging_para_backing_state( &mut self, key: (Hash, ParaId), - value: Option, + value: Option, ) { - self.staging_validity_constraints.put(key, value); + self.staging_para_backing_state.put(key, value); } pub(crate) fn staging_async_backing_parameters( @@ -461,6 +460,6 @@ pub(crate) enum RequestResult { Version(Hash, u32), Disputes(Hash, Vec<(SessionIndex, CandidateHash, DisputeState)>), - StagingValidityConstraints(Hash, ParaId, Option), + StagingParaBackingState(Hash, ParaId, Option), StagingAsyncBackingParameters(Hash, vstaging_primitives::AsyncBackingParameters), } diff --git a/node/core/runtime-api/src/lib.rs b/node/core/runtime-api/src/lib.rs index 99deed31203c..614193d170c9 100644 --- a/node/core/runtime-api/src/lib.rs +++ b/node/core/runtime-api/src/lib.rs @@ -158,9 +158,9 @@ where Disputes(relay_parent, disputes) => self.requests_cache.cache_disputes(relay_parent, disputes), - StagingValidityConstraints(relay_parent, para_id, constraints) => self + StagingParaBackingState(relay_parent, para_id, constraints) => self .requests_cache - .cache_staging_validity_constraints((relay_parent, para_id), constraints), + .cache_staging_para_backing_state((relay_parent, para_id), constraints), StagingAsyncBackingParameters(relay_parent, params) => self.requests_cache.cache_staging_async_backing_parameters(relay_parent, params), } @@ -277,9 +277,9 @@ where .map(|sender| Request::ValidationCodeHash(para, assumption, sender)), Request::Disputes(sender) => query!(disputes(), sender).map(|sender| Request::Disputes(sender)), - Request::StagingValidityConstraints(para, sender) => - query!(staging_validity_constraints(para), sender) - .map(|sender| Request::StagingValidityConstraints(para, sender)), + Request::StagingParaBackingState(para, sender) => + query!(staging_para_backing_state(para), sender) + .map(|sender| Request::StagingParaBackingState(para, sender)), Request::StagingAsyncBackingParameters(sender) => query!(staging_async_backing_parameters(), sender) .map(|sender| Request::StagingAsyncBackingParameters(sender)), @@ -502,11 +502,11 @@ where query!(ValidationCodeHash, validation_code_hash(para, assumption), ver = 2, sender), Request::Disputes(sender) => query!(Disputes, disputes(), ver = Request::DISPUTES_RUNTIME_REQUIREMENT, sender), - Request::StagingValidityConstraints(para, sender) => { + Request::StagingParaBackingState(para, sender) => { query!( - StagingValidityConstraints, - staging_validity_constraints(para), - ver = Request::VALIDITY_CONSTRAINTS, + StagingParaBackingState, + staging_para_backing_state(para), + ver = Request::STAGING_BACKING_STATE, sender ) }, @@ -514,7 +514,7 @@ where query!( StagingAsyncBackingParameters, staging_async_backing_parameters(), - ver = Request::VALIDITY_CONSTRAINTS, + ver = Request::STAGING_BACKING_STATE, sender ) }, diff --git a/node/core/runtime-api/src/tests.rs b/node/core/runtime-api/src/tests.rs index 8ed7e9011193..f1cf1c7f4692 100644 --- a/node/core/runtime-api/src/tests.rs +++ b/node/core/runtime-api/src/tests.rs @@ -21,7 +21,7 @@ use polkadot_node_primitives::{BabeAllowedSlots, BabeEpoch, BabeEpochConfigurati use polkadot_node_subsystem::SpawnGlue; use polkadot_node_subsystem_test_helpers::make_subsystem_context; use polkadot_primitives::{ - runtime_api::ParachainHost, vstaging, AuthorityDiscoveryId, Block, CandidateEvent, + runtime_api::ParachainHost, AuthorityDiscoveryId, Block, CandidateEvent, CommittedCandidateReceipt, CoreState, GroupRotationInfo, Id as ParaId, InboundDownwardMessage, InboundHrmpMessage, OccupiedCoreAssumption, PersistedValidationData, PvfCheckStatement, ScrapedOnChainVotes, SessionIndex, SessionInfo, ValidationCode, ValidationCodeHash, @@ -193,10 +193,6 @@ sp_api::mock_impl_runtime_apis! { ) -> Option { self.validation_code_hash.get(¶).map(|c| c.clone()) } - - fn staging_validity_constraints(_: ParaId) -> Option { - unimplemented!("Staging API not implemented"); - } } impl BabeApi for MockRuntimeApi { diff --git a/node/service/Cargo.toml b/node/service/Cargo.toml index 129f14d31b07..6e8a44fbac50 100644 --- a/node/service/Cargo.toml +++ b/node/service/Cargo.toml @@ -210,3 +210,5 @@ runtime-metrics = [ "polkadot-runtime?/runtime-metrics", "polkadot-runtime-parachains/runtime-metrics" ] + +network-protocol-staging = ["polkadot-node-network-protocol/network-protocol-staging"] diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 56a19fb5cee0..b5355ab3fa0b 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -637,9 +637,9 @@ pub enum RuntimeApiRequest { ), /// Returns all on-chain disputes at given block number. Available in `v3`. Disputes(RuntimeApiSender)>>), - /// Get the validity constraints of the given para. + /// Get the backing state of the given para. /// This is a staging API that will not be available on production runtimes. - StagingValidityConstraints(ParaId, RuntimeApiSender>), + StagingParaBackingState(ParaId, RuntimeApiSender>), /// Get candidate's acceptance limitations for asynchronous backing for a relay parent. /// /// If it's not supported by the Runtime, the async backing is said to be disabled. @@ -655,10 +655,10 @@ impl RuntimeApiRequest { /// `ExecutorParams` pub const EXECUTOR_PARAMS_RUNTIME_REQUIREMENT: u32 = 4; - /// Minimum version for validity constraints, required for async backing. + /// Minimum version for backing state, required for async backing. /// /// 99 for now, should be adjusted to VSTAGING/actual runtime version once released. - pub const VALIDITY_CONSTRAINTS: u32 = 99; + pub const STAGING_BACKING_STATE: u32 = 99; } /// A message to the Runtime API subsystem. diff --git a/node/subsystem-types/src/runtime_client.rs b/node/subsystem-types/src/runtime_client.rs index 52d1862ed4a5..9db6828af2be 100644 --- a/node/subsystem-types/src/runtime_client.rs +++ b/node/subsystem-types/src/runtime_client.rs @@ -182,13 +182,13 @@ pub trait RuntimeApiSubsystemClient { at: Hash, ) -> Result)>, ApiError>; - /// Returns the base constraints of the given para, if they exist. + /// Returns the state of parachain backing for a given para. /// This is a staging method! Do not use on production runtimes! - async fn staging_validity_constraints( + async fn staging_para_backing_state( &self, at: Hash, para_id: Id, - ) -> Result, ApiError>; + ) -> Result, ApiError>; // === BABE API === @@ -391,12 +391,12 @@ where self.runtime_api().disputes(at) } - async fn staging_validity_constraints( + async fn staging_para_backing_state( &self, at: Hash, para_id: Id, - ) -> Result, ApiError> { - self.runtime_api().staging_validity_constraints(at, para_id) + ) -> Result, ApiError> { + self.runtime_api().staging_para_backing_state(at, para_id) } /// Returns candidate's acceptance limitations for asynchronous backing for a relay parent. diff --git a/parachain/test-parachains/adder/collator/Cargo.toml b/parachain/test-parachains/adder/collator/Cargo.toml index 5db446a9c395..90be2dc8cabf 100644 --- a/parachain/test-parachains/adder/collator/Cargo.toml +++ b/parachain/test-parachains/adder/collator/Cargo.toml @@ -45,3 +45,6 @@ sc-service = { git = "https://github.com/paritytech/substrate", branch = "master sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } tokio = { version = "1.24.2", features = ["macros"] } + +[features] +network-protocol-staging = ["polkadot-cli/network-protocol-staging"] diff --git a/primitives/src/runtime_api.rs b/primitives/src/runtime_api.rs index f927fea0d4a8..6db1485f5409 100644 --- a/primitives/src/runtime_api.rs +++ b/primitives/src/runtime_api.rs @@ -225,10 +225,10 @@ sp_api::decl_runtime_apis! { /***** Asynchronous backing *****/ - /// Returns the base constraints of the given para, if they exist. + /// Returns the state of parachain backing for a given para. /// This is a staging method! Do not use on production runtimes! #[api_version(99)] - fn staging_validity_constraints(_: ppp::Id) -> Option; + fn staging_para_backing_state(_: ppp::Id) -> Option>; /// Returns candidate's acceptance limitations for asynchronous backing for a relay parent. #[api_version(99)] diff --git a/primitives/src/vstaging/mod.rs b/primitives/src/vstaging/mod.rs index 3cab73d8a0ec..6e0a46a78a06 100644 --- a/primitives/src/vstaging/mod.rs +++ b/primitives/src/vstaging/mod.rs @@ -99,5 +99,32 @@ pub struct Constraints { pub future_validation_code: Option<(N, ValidationCodeHash)>, } +/// A candidate pending availability. +#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +pub struct CandidatePendingAvailability { + /// The hash of the candidate. + pub candidate_hash: CandidateHash, + /// The candidate's descriptor. + pub descriptor: CandidateDescriptor, + /// The commitments of the candidate. + pub commitments: CandidateCommitments, + /// The candidate's relay parent's number. + pub relay_parent_number: N, + /// The maximum Proof-of-Validity size allowed, in bytes. + pub max_pov_size: u32, +} + +/// The per-parachain state of the backing system, including +/// state-machine constraints and candidates pending availability. +#[derive(RuntimeDebug, Clone, PartialEq, Encode, Decode, TypeInfo)] +pub struct BackingState { + /// The state-machine constraints of the parachain. + pub constraints: Constraints, + /// The candidates pending availability. These should be ordered, i.e. they should form + /// a sub-chain, where the first candidate builds on top of the required parent of the constraints + /// and each subsequent builds on top of the previous head-data. + pub pending_availability: Vec>, +} + pub mod executor_params; pub use executor_params::{ExecutorParam, ExecutorParams, ExecutorParamsHash}; diff --git a/runtime/parachains/src/inclusion/mod.rs b/runtime/parachains/src/inclusion/mod.rs index 14f654a9ce70..1907a340ab0b 100644 --- a/runtime/parachains/src/inclusion/mod.rs +++ b/runtime/parachains/src/inclusion/mod.rs @@ -118,6 +118,14 @@ impl CandidatePendingAvailability { &self.descriptor } + /// Get the candidate's relay parent's number. + pub(crate) fn relay_parent_number(&self) -> N + where + N: Clone, + { + self.relay_parent_number.clone() + } + #[cfg(any(feature = "runtime-benchmarks", test))] pub(crate) fn new( core: CoreIndex, diff --git a/runtime/parachains/src/paras_inherent/mod.rs b/runtime/parachains/src/paras_inherent/mod.rs index 20b9dbaf0d4c..28477b13c4bf 100644 --- a/runtime/parachains/src/paras_inherent/mod.rs +++ b/runtime/parachains/src/paras_inherent/mod.rs @@ -727,7 +727,7 @@ impl Pallet { &validator_public[..], bitfields.clone(), >::core_para, - false, + true, // we must enact the previous candidate for subsequent validation ); let freed = collect_all_freed_cores::(freed_concluded.iter().cloned()); diff --git a/runtime/parachains/src/runtime_api_impl/vstaging.rs b/runtime/parachains/src/runtime_api_impl/vstaging.rs index e1aea3eaf882..1af103bcdabd 100644 --- a/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -19,8 +19,8 @@ use crate::{configuration, disputes, dmp, hrmp, initializer, paras, session_info, shared, ump}; use primitives::{ vstaging::{ - AsyncBackingParameters, Constraints, ExecutorParams, InboundHrmpLimitations, - OutboundHrmpChannelLimitations, + AsyncBackingParameters, BackingState, CandidatePendingAvailability, Constraints, + ExecutorParams, InboundHrmpLimitations, OutboundHrmpChannelLimitations, }, CandidateHash, DisputeState, Id as ParaId, SessionIndex, }; @@ -47,10 +47,10 @@ pub fn session_executor_params( } } -/// Implementation for `StagingValidityConstraints` function from the runtime API -pub fn validity_constraints( +/// Implementation for `StagingParaBackingState` function from the runtime API +pub fn backing_state( para_id: ParaId, -) -> Option> { +) -> Option> { let config = >::config(); // Async backing is only expected to be enabled with a tracker capacity of 1. // Subsequent configuration update gets applied on new session, which always @@ -92,7 +92,7 @@ pub fn validity_constraints( }) .collect(); - Some(Constraints { + let constraints = Constraints { min_relay_parent_number, max_pov_size: config.max_pov_size, max_code_size: config.max_code_size, @@ -107,7 +107,33 @@ pub fn validity_constraints( validation_code_hash, upgrade_restriction, future_validation_code, - }) + }; + + let pending_availability = { + // Note: the API deals with a `Vec` as it is future-proof for cases + // where there may be multiple candidates pending availability at a time. + // But at the moment only one candidate can be pending availability per + // parachain. + crate::inclusion::PendingAvailability::::get(¶_id) + .and_then(|pending| { + let commitments = + crate::inclusion::PendingAvailabilityCommitments::::get(¶_id); + commitments.map(move |c| (pending, c)) + }) + .map(|(pending, commitments)| { + CandidatePendingAvailability { + candidate_hash: pending.candidate_hash(), + descriptor: pending.candidate_descriptor().clone(), + commitments, + relay_parent_number: pending.relay_parent_number(), + max_pov_size: constraints.max_pov_size, // assume always same in session. + } + }) + .into_iter() + .collect() + }; + + Some(BackingState { constraints, pending_availability }) } /// Implementation for `StagingAsyncBackingParameters` function from the runtime API diff --git a/runtime/rococo/src/lib.rs b/runtime/rococo/src/lib.rs index 91c683ef8068..a534a21545bd 100644 --- a/runtime/rococo/src/lib.rs +++ b/runtime/rococo/src/lib.rs @@ -1781,8 +1781,8 @@ sp_api::impl_runtime_apis! { runtime_parachains::runtime_api_impl::vstaging::get_session_disputes::() } - fn staging_validity_constraints(para_id: ParaId) -> Option { - runtime_parachains::runtime_api_impl::vstaging::validity_constraints::(para_id) + fn staging_para_backing_state(para_id: ParaId) -> Option { + runtime_parachains::runtime_api_impl::vstaging::backing_state::(para_id) } fn staging_async_backing_parameters() -> primitives::vstaging::AsyncBackingParameters { diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs index 993e8d22f95a..b5dbb3117afa 100644 --- a/runtime/westend/src/lib.rs +++ b/runtime/westend/src/lib.rs @@ -1488,8 +1488,8 @@ sp_api::impl_runtime_apis! { runtime_parachains::runtime_api_impl::vstaging::get_session_disputes::() } - fn staging_validity_constraints(para_id: ParaId) -> Option { - runtime_parachains::runtime_api_impl::vstaging::validity_constraints::(para_id) + fn staging_para_backing_state(para_id: ParaId) -> Option { + runtime_parachains::runtime_api_impl::vstaging::backing_state::(para_id) } fn staging_async_backing_parameters() -> primitives::vstaging::AsyncBackingParameters { From 5e53c38755a91f3af1d4d3027fc24c6bcb5dc632 Mon Sep 17 00:00:00 2001 From: "Mattia L.V. Bradascio" <28816406+bredamatt@users.noreply.github.com> Date: Wed, 8 Mar 2023 16:40:33 +0000 Subject: [PATCH 41/76] Remove restart from test (#6840) --- .../002-async-backing-runtime-upgrade.zndsl | 9 --------- 1 file changed, 9 deletions(-) diff --git a/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.zndsl b/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.zndsl index 2a4e2f1ded18..6213d1afb81e 100644 --- a/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.zndsl +++ b/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.zndsl @@ -32,12 +32,3 @@ bob: run ../misc/0002-download-polkadot-from-pr.sh with "{{POLKADOT_PR_BIN_URL}} # Bootstrap the runtime upgrade sleep 30 seconds - -alice: restart after 5 seconds -bob: restart after 5 seconds - -alice: is up within 10 seconds -bob: is up within 10 seconds - -alice: parachain 100 block height is at least 10 within 250 seconds -bob: parachain 101 block height is at least 10 within 250 seconds From cf5f666b7c199d40ce479e926ac82e3953bc083d Mon Sep 17 00:00:00 2001 From: Marcin S Date: Fri, 10 Mar 2023 02:03:07 +0100 Subject: [PATCH 42/76] Async Backing: Statement Distribution Tests (#6755) * start on handling incoming * split off session info into separate map * start in on a knowledge tracker * address some grumbles * format * missed comment * some docs for direct * add note on slashing * amend * simplify 'direct' code * finish up the 'direct' logic * add a bunch of tests for the direct-in-group logic * rename 'direct' to 'cluster', begin a candidate_entry module * distill candidate_entry * start in on a statement-store module * some utilities for the statement store * rewrite 'send_statement_direct' using new tools * filter sending logic on peers which have the relay-parent in their view. * some more logic for handling incoming statements * req/res: BackedCandidatePacket -> AttestedCandidate + tweaks * add a `validated_in_group` bitfield to BackedCandidateInventory * BackedCandidateInventory -> Manifest * start in on requester module * add outgoing request for attested candidate * add a priority mechanism for requester * some request dispatch logic * add seconded mask to tagged-request * amend manifest to hold group index * handle errors and set up scaffold for response validation * validate attested candidate responses * requester -> requests * add some utilities for manipulating requests * begin integrating requester * start grid module * tiny * refactor grid topology to expose more info to subsystems * fix grid_topology test * fix overseer test * implement topology group-based view construction logic * fmt * flesh out grid slightly more * add indexed groups utility * integrate Groups into per-session info * refactor statement store to borrow Groups * implement manifest knowledge utility * add a test for topology setup * don't send to group members * test for conflicting manifests * manifest knowledge tests * fmt * rename field * garbage collection for grid tracker * routines for finding correct/incorrect advertisers * add manifest import logic * tweak naming * more tests for manifest import * add comment * rework candidates into a view-wide tracker * fmt * start writing boilerplate for grid sending * fmt * some more group boilerplate * refactor handling of topology and authority IDs * fmt * send statements directly to grid peers where possible * send to cluster only if statement belongs to cluster * improve handling of cluster statements * handle incoming statements along the grid * API for introduction of candidates into the tree * backing: use new prospective parachains API * fmt prospective parachains changes * fmt statement-dist * fix condition * get ready for tracking importable candidates * prospective parachains: add Cow logic * incomplete and complete hypothetical candidates * remove keep_if_unneeded * fmt * implement more general HypotheticalFrontier * fmt, cleanup * add a by_parent_hash index to candidate tracker * more framework for future code * utilities for getting all hypothetical candidates for frontier * track origin in statement store * fmt * requests should return peer * apply post-confirmation reckoning * flesh out import/announce/circulate logic on new statements * adjust * adjust TODO comment * fix backing tests * update statement-distribution to use new indexedvec * fmt * query hypothetical candidates * implement `note_importable_under` * extract common utility of fragment tree updates * add a helper function for getting statements unknown by backing * import fresh statements to backing * send announcements and acknowledgements over grid * provide freshly importable statements also avoid tracking backed candidates in statement distribution * do not issue requests on newly importable candidates * add TODO for later when confirming candidate * write a routine for handling backed candidate notifications * simplify grid substantially * add some test TODOs * handle confirmed candidates & grid announcements * finish implementing manifest handling, including follow up statements * send follow-up statements when acknowledging freshly backed * fmt * handle incoming acknowledgements * a little DRYing * wire up network messages to handlers * fmt * some skeleton code for peer view update handling * more peer view skeleton stuff * Fix async backing statement distribution tests (#6621) * Fix compile errors in tests * Cargo fmt * Resolve some todos in async backing statement-distribution branch (#6482) * Implement `remove_by_relay_parent` * Extract `minimum_votes` to shared primitives. * Add `can_send_statements_received_with_prejudice` test * Fix test * Update docstrings * Cargo fmt * Fix compile error * Fix compile errors in tests * Cargo fmt * Add module docs; write `test_priority_ordering` (first draft) * Fix `test_priority_ordering` * Move `insert_or_update_priority`: `Drop` -> `set_cluster_priority` * Address review comments * Remove `Entry::get_mut` * fix test compilation * add a TODO for a test * clean up a couple of TODOs * implement sending pending cluster statements * refactor utility function for sending acknowledgement and statements * mostly implement catching peers up via grid * Fix clippy error * alter grid to track all pending statements * fix more TODOs and format * tweak a TODO in requests * some logic for dispatching requests * fmt * skeleton for response receiving * Async backing statement distribution: cluster tests (#6678) * Add `pending_statements_set_when_receiving_fresh_statements` * Add `pending_statements_updated_when_sending_statements` test * fix up * fmt * update TODO * rework seconded mask in requests * change doc * change unhandledresponse not to borrow request manager * only accept responses sufficient to back * finish implementing response handling * extract statement filter to protocol crate * rework requests: use statement filter in network protocol * dispatch cluster requests correctly * rework cluster statement sending * implement request answering * fmt * only send confirmed candidate statement messages on unified relay-parent * Fix Tests In Statement Distribution Branch * Async Backing: Integrate `vstaging` of statement distribution into `lib.rs` (#6715) * Integrate `handle_active_leaves_update` * Integrate `share_local_statement`/`handle_backed_candidate_message` * Start hooking up request/response flow * Finish hooking up request/response flow * Limit number of parallel requests in responder * Fix test compilation errors * Fix missing check for prospective parachains mode * Fix some more compile errors * clean up some review comments * clean up warnings * Async backing statement distribution: grid tests (#6673) * Add `manifest_import_returns_ok_true` test * cargo fmt * Add pending_communication_receiving_manifest_on_confirmed_candidate * Add `senders_can_provide_manifests_in_acknowledgement` test * Add a couple of tests for pending statements * Add `pending_statements_cleared_when_sending` test * Add `pending_statements_respect_remote_knowledge` test * Refactor group creation in tests * Clarify docs * Address some review comments * Make some clarifications * Fix post-merge errors * Clarify test `senders_can_provide_manifests_in_acknowledgement` * Try writing `pending_statements_are_updated_after_manifest_exchange` * Document "seconding limit" and `reject_overflowing_manifests` test * Test that seconding counts are not updated for validators on error * Fix tests * Fix manifest exchange test * Add more tests in `requests.rs` (#6707) This resolves remaining TODOs in this file. * remove outdated inventory terminology * Async backing statement distribution: `Candidates` tests (#6658) * Async Backing: Fix clippy errors in statement distribution branch (#6720) * Integrate `handle_active_leaves_update` * Integrate `share_local_statement`/`handle_backed_candidate_message` * Start hooking up request/response flow * Finish hooking up request/response flow * Limit number of parallel requests in responder * Fix test compilation errors * Fix missing check for prospective parachains mode * Fix some more compile errors * Async Backing: Fix clippy errors in statement distribution branch * Fix some more clippy lints * add tests module * fix warnings in existing tests * create basic test harness * create a test state struct * fmt * create empty cluster & grid modules for tests * some TODOs for cluster test suite * describe test-suite for grid logic * describe request test suite * fix seconding-limit bug * Remove extraneous `pub` This somehow made it into my clippy PR. * Fix some test compile warnings * Remove some unneeded `allow`s * adapt some new test helpers from Marcin * add helper for activating a gossip topology * add utility for signing statements * helpers for connecting/disconnecting peers * round out network utilities * fmt * fix bug in initializing validator-meta * fix compilation * implement first cluster test * TODOs for incoming request tests * Remove unneeded `make_committed_candidate` helper * fmt * Hook up request sender * Add `valid_statement_without_prior_seconded_is_ignored` test * Fix `valid_statement_without_prior_seconded_is_ignored` test * some more tests for cluster * add a TODO about grid senders * integrate inbound req/res into test harness * polish off initial cluster test suite * keep introduce candidate request * fix tests after introduce candidate request * fmt * Add grid protocol to module docs * Remove obsolete test * Fix comments * Test `backed_in_path_only: true` * Update node/network/protocol/src/lib.rs Co-authored-by: Chris Sosnin <48099298+slumber@users.noreply.github.com> * Update node/network/protocol/src/request_response/mod.rs Co-authored-by: Chris Sosnin <48099298+slumber@users.noreply.github.com> * Mark receiver with `vstaging` * First draft of `ensure_seconding_limit_is_respected` test * validate grid senders based on manifest kind * fix mask_seconded/valid * fix unwanted-mask check * fix build * resolve todo on leaf mode * Unify protocol naming to vstaging * Fix `ensure_seconding_limit_is_respected` test * Start `backed_candidate_leads_to_advertisement` test * fmt, fix grid test after topology change * Send Backed notification * Finish `backed_candidate_leads_to_advertisement` test * Finish `peer_reported_for_duplicate_statements` test * Finish `received_advertisement_before_confirmation_leads_to_request` * Add `advertisements_rejected_from_incorrect_peers` test * Add `manifest_rejected_*` tests * Add `manifest_rejected_when_group_does_not_match_para` test * Add `local_node_sanity_checks_incoming_requests` test * Add `local_node_respects_statement_mask` test * Add tests where peer is reported for providing invalid signatures * Add `cluster_peer_allowed_to_send_incomplete_statements` test * Add `received_advertisement_after_backing_leads_to_acknowledgement` * Add `received_advertisement_after_confirmation_before_backing` test * peer_reported_for_advertisement_conflicting_with_confirmed_candidate * Add `peer_reported_for_not_enough_statements` test * Add `peer_reported_for_providing_statements_meant_to_be_masked_out` * Add `additional_statements_are_shared_after_manifest_exchange` * Add `grid_statements_imported_to_backing` test * Add `relay_parent_entering_peer_view_leads_to_advertisement` test * Add `advertisement_not_re_sent_when_peer_re_enters_view` test * Update node/network/statement-distribution/src/vstaging/tests/grid.rs Co-authored-by: asynchronous rob * Resolve TODOs, update test * Address unused code * Add check after every test for unhandled requests * Refactor (`make_dummy_leaf` and `handle_sent_request`) * Refactor (`make_dummy_topology`) * Minor refactor --------- Co-authored-by: Robert Habermeier Co-authored-by: Chris Sosnin <48099298+slumber@users.noreply.github.com> Co-authored-by: Chris Sosnin --- node/network/protocol/src/lib.rs | 2 +- .../src/vstaging/grid.rs | 2 +- .../src/vstaging/mod.rs | 4 +- .../src/vstaging/tests/cluster.rs | 799 +++--- .../src/vstaging/tests/grid.rs | 2440 ++++++++++++++++- .../src/vstaging/tests/mod.rs | 149 +- .../src/vstaging/tests/requests.rs | 1557 ++++++++++- node/subsystem-types/src/messages.rs | 7 +- 8 files changed, 4527 insertions(+), 433 deletions(-) diff --git a/node/network/protocol/src/lib.rs b/node/network/protocol/src/lib.rs index 9f50bf29406e..0f2f5a9327ac 100644 --- a/node/network/protocol/src/lib.rs +++ b/node/network/protocol/src/lib.rs @@ -737,7 +737,7 @@ pub mod vstaging { /// Network messages used by the statement distribution subsystem. #[derive(Debug, Clone, Encode, Decode, PartialEq, Eq)] pub enum StatementDistributionMessage { - /// A notification of a signed statement in compact form. + /// A notification of a signed statement in compact form, for a given relay parent. #[codec(index = 0)] Statement(Hash, UncheckedSignedStatement), diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index bdcbabffd3e5..5934e05378e5 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -695,7 +695,7 @@ fn decompose_statement_filter<'a>( } /// A summary of a manifest being sent by a counterparty. -#[derive(Clone)] +#[derive(Debug, Clone)] pub struct ManifestSummary { /// The claimed parent head data hash of the candidate. pub claimed_parent_hash: Hash, diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index a562668627a1..5fe39096c1cb 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -119,7 +119,7 @@ const COST_UNEXPECTED_REQUEST: Rep = Rep::CostMajor("Unexpected attested candida const BENEFIT_VALID_RESPONSE: Rep = Rep::BenefitMajor("Peer Answered Candidate Request"); const BENEFIT_VALID_STATEMENT: Rep = Rep::BenefitMajor("Peer provided a valid statement"); const BENEFIT_VALID_STATEMENT_FIRST: Rep = - Rep::BenefitMajorFirst("Peer was the first to provide a valid statement"); + Rep::BenefitMajorFirst("Peer was the first to provide a given valid statement"); struct PerRelayParentState { local_validator: Option, @@ -144,6 +144,7 @@ struct LocalValidatorState { grid_tracker: GridTracker, } +#[derive(Debug)] struct PerSessionState { session_info: SessionInfo, groups: Groups, @@ -1006,6 +1007,7 @@ pub(crate) async fn share_local_statement( // two kinds of targets: those in our 'cluster' (currently just those in the same group), // and those we are propagating to through the grid. +#[derive(Debug)] enum DirectTargetKind { Cluster, Grid, diff --git a/node/network/statement-distribution/src/vstaging/tests/cluster.rs b/node/network/statement-distribution/src/vstaging/tests/cluster.rs index ca849dbd39a6..88fa13d98dc3 100644 --- a/node/network/statement-distribution/src/vstaging/tests/cluster.rs +++ b/node/network/statement-distribution/src/vstaging/tests/cluster.rs @@ -20,7 +20,12 @@ use polkadot_primitives_test_helpers::make_candidate; #[test] fn share_seconded_circulated_to_cluster() { - let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + let config = TestConfig { + validator_count: 20, + group_size: 3, + local_validator: true, + async_backing_params: None, + }; let relay_parent = Hash::repeat_byte(1); let peer_a = PeerId::random(); @@ -31,37 +36,18 @@ fn share_seconded_circulated_to_cluster() { let local_validator = state.local.clone().unwrap(); let local_para = ParaId::from(local_validator.group_index.0); + let test_leaf = state.make_dummy_leaf(relay_parent); + let (candidate, pvd) = make_candidate( relay_parent, 1, local_para, - vec![1, 2, 3].into(), + test_leaf.para_data(local_para).head_data.clone(), vec![4, 5, 6].into(), Hash::repeat_byte(42).into(), ); let candidate_hash = candidate.hash(); - let test_leaf = TestLeaf { - number: 1, - hash: relay_parent, - parent_hash: Hash::repeat_byte(0), - session: 1, - availability_cores: state.make_availability_cores(|i| { - CoreState::Scheduled(ScheduledCore { - para_id: ParaId::from(i as u32), - collator: None, - }) - }), - para_data: (0..state.session_info.validator_groups.len()) - .map(|i| { - ( - ParaId::from(i as u32), - PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, - ) - }) - .collect(), - }; - // peer A is in group, has relay parent in view. // peer B is in group, has no relay parent in view. // peer C is not in group, has relay parent in view. @@ -88,7 +74,7 @@ fn share_seconded_circulated_to_cluster() { send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; } - activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + activate_leaf(&mut overseer, &test_leaf, &state, true).await; answer_expected_hypothetical_depth_request( &mut overseer, @@ -141,36 +127,21 @@ fn share_seconded_circulated_to_cluster() { #[test] fn cluster_valid_statement_before_seconded_ignored() { - let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + let config = TestConfig { + validator_count: 20, + group_size: 3, + local_validator: true, + async_backing_params: None, + }; let relay_parent = Hash::repeat_byte(1); let peer_a = PeerId::random(); test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); let candidate_hash = CandidateHash(Hash::repeat_byte(42)); - let test_leaf = TestLeaf { - number: 1, - hash: relay_parent, - parent_hash: Hash::repeat_byte(0), - session: 1, - availability_cores: state.make_availability_cores(|i| { - CoreState::Scheduled(ScheduledCore { - para_id: ParaId::from(i as u32), - collator: None, - }) - }), - para_data: (0..state.session_info.validator_groups.len()) - .map(|i| { - ( - ParaId::from(i as u32), - PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, - ) - }) - .collect(), - }; + let test_leaf = state.make_dummy_leaf(relay_parent); // peer A is in group, has relay parent in view. let other_group_validators = state.group_validators(local_validator.group_index, true); @@ -183,7 +154,7 @@ fn cluster_valid_statement_before_seconded_ignored() { .await; send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; - activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + activate_leaf(&mut overseer, &test_leaf, &state, true).await; answer_expected_hypothetical_depth_request( &mut overseer, @@ -223,36 +194,21 @@ fn cluster_valid_statement_before_seconded_ignored() { #[test] fn cluster_statement_bad_signature() { - let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + let config = TestConfig { + validator_count: 20, + group_size: 3, + local_validator: true, + async_backing_params: None, + }; let relay_parent = Hash::repeat_byte(1); let peer_a = PeerId::random(); test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); let candidate_hash = CandidateHash(Hash::repeat_byte(42)); - let test_leaf = TestLeaf { - number: 1, - hash: relay_parent, - parent_hash: Hash::repeat_byte(0), - session: 1, - availability_cores: state.make_availability_cores(|i| { - CoreState::Scheduled(ScheduledCore { - para_id: ParaId::from(i as u32), - collator: None, - }) - }), - para_data: (0..state.session_info.validator_groups.len()) - .map(|i| { - ( - ParaId::from(i as u32), - PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, - ) - }) - .collect(), - }; + let test_leaf = state.make_dummy_leaf(relay_parent); // peer A is in group, has relay parent in view. let other_group_validators = state.group_validators(local_validator.group_index, true); @@ -267,7 +223,7 @@ fn cluster_statement_bad_signature() { .await; send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; - activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + activate_leaf(&mut overseer, &test_leaf, &state, true).await; answer_expected_hypothetical_depth_request( &mut overseer, @@ -318,36 +274,21 @@ fn cluster_statement_bad_signature() { #[test] fn useful_cluster_statement_from_non_cluster_peer_rejected() { - let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + let config = TestConfig { + validator_count: 20, + group_size: 3, + local_validator: true, + async_backing_params: None, + }; let relay_parent = Hash::repeat_byte(1); let peer_a = PeerId::random(); test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); let candidate_hash = CandidateHash(Hash::repeat_byte(42)); - let test_leaf = TestLeaf { - number: 1, - hash: relay_parent, - parent_hash: Hash::repeat_byte(0), - session: 1, - availability_cores: state.make_availability_cores(|i| { - CoreState::Scheduled(ScheduledCore { - para_id: ParaId::from(i as u32), - collator: None, - }) - }), - para_data: (0..state.session_info.validator_groups.len()) - .map(|i| { - ( - ParaId::from(i as u32), - PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, - ) - }) - .collect(), - }; + let test_leaf = state.make_dummy_leaf(relay_parent); // peer A is not in group, has relay parent in view. let not_our_group = @@ -364,7 +305,7 @@ fn useful_cluster_statement_from_non_cluster_peer_rejected() { .await; send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; - activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + activate_leaf(&mut overseer, &test_leaf, &state, true).await; answer_expected_hypothetical_depth_request( &mut overseer, @@ -402,36 +343,21 @@ fn useful_cluster_statement_from_non_cluster_peer_rejected() { #[test] fn statement_from_non_cluster_originator_unexpected() { - let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + let config = TestConfig { + validator_count: 20, + group_size: 3, + local_validator: true, + async_backing_params: None, + }; let relay_parent = Hash::repeat_byte(1); let peer_a = PeerId::random(); test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); - let local_para = ParaId::from(local_validator.group_index.0); let candidate_hash = CandidateHash(Hash::repeat_byte(42)); - let test_leaf = TestLeaf { - number: 1, - hash: relay_parent, - parent_hash: Hash::repeat_byte(0), - session: 1, - availability_cores: state.make_availability_cores(|i| { - CoreState::Scheduled(ScheduledCore { - para_id: ParaId::from(i as u32), - collator: None, - }) - }), - para_data: (0..state.session_info.validator_groups.len()) - .map(|i| { - ( - ParaId::from(i as u32), - PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, - ) - }) - .collect(), - }; + let test_leaf = state.make_dummy_leaf(relay_parent); // peer A is not in group, has relay parent in view. let other_group_validators = state.group_validators(local_validator.group_index, true); @@ -440,7 +366,7 @@ fn statement_from_non_cluster_originator_unexpected() { connect_peer(&mut overseer, peer_a.clone(), None).await; send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; - activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + activate_leaf(&mut overseer, &test_leaf, &state, true).await; answer_expected_hypothetical_depth_request( &mut overseer, @@ -478,7 +404,13 @@ fn statement_from_non_cluster_originator_unexpected() { #[test] fn seconded_statement_leads_to_request() { - let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + let group_size = 3; + let config = TestConfig { + validator_count: 20, + group_size, + local_validator: true, + async_backing_params: None, + }; let relay_parent = Hash::repeat_byte(1); let peer_a = PeerId::random(); @@ -486,28 +418,18 @@ fn seconded_statement_leads_to_request() { test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); let local_para = ParaId::from(local_validator.group_index.0); - let candidate_hash = CandidateHash(Hash::repeat_byte(42)); - let test_leaf = TestLeaf { - number: 1, - hash: relay_parent, - parent_hash: Hash::repeat_byte(0), - session: 1, - availability_cores: state.make_availability_cores(|i| { - CoreState::Scheduled(ScheduledCore { - para_id: ParaId::from(i as u32), - collator: None, - }) - }), - para_data: (0..state.session_info.validator_groups.len()) - .map(|i| { - ( - ParaId::from(i as u32), - PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, - ) - }) - .collect(), - }; + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + local_para, + test_leaf.para_data(local_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); // peer A is in group, has relay parent in view. let other_group_validators = state.group_validators(local_validator.group_index, true); @@ -521,7 +443,7 @@ fn seconded_statement_leads_to_request() { .await; send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; - activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + activate_leaf(&mut overseer, &test_leaf, &state, true).await; answer_expected_hypothetical_depth_request( &mut overseer, @@ -553,27 +475,37 @@ fn seconded_statement_leads_to_request() { if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST => { } ); + handle_sent_request( + &mut overseer, + peer_a, + candidate_hash, + StatementFilter::blank(group_size), + candidate.clone(), + pvd.clone(), + vec![], + ) + .await; + assert_matches!( overseer.recv().await, - AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendRequests(requests, IfDisconnected::ImmediateError)) => { - assert_eq!(requests.len(), 1); - assert_matches!( - &requests[0], - Requests::AttestedCandidateVStaging(outgoing) => { - assert_eq!(outgoing.peer, Recipient::Peer(peer_a.clone())); - assert_eq!(outgoing.payload.candidate_hash, candidate_hash); - } - ); - } + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_RESPONSE => { } ); + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + overseer }); } #[test] fn cluster_statements_shared_seconded_first() { - let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + let config = TestConfig { + validator_count: 20, + group_size: 3, + local_validator: true, + async_backing_params: None, + }; let relay_parent = Hash::repeat_byte(1); let peer_a = PeerId::random(); @@ -582,37 +514,18 @@ fn cluster_statements_shared_seconded_first() { let local_validator = state.local.clone().unwrap(); let local_para = ParaId::from(local_validator.group_index.0); + let test_leaf = state.make_dummy_leaf(relay_parent); + let (candidate, pvd) = make_candidate( relay_parent, 1, local_para, - vec![1, 2, 3].into(), + test_leaf.para_data(local_para).head_data.clone(), vec![4, 5, 6].into(), Hash::repeat_byte(42).into(), ); let candidate_hash = candidate.hash(); - let test_leaf = TestLeaf { - number: 1, - hash: relay_parent, - parent_hash: Hash::repeat_byte(0), - session: 1, - availability_cores: state.make_availability_cores(|i| { - CoreState::Scheduled(ScheduledCore { - para_id: ParaId::from(i as u32), - collator: None, - }) - }), - para_data: (0..state.session_info.validator_groups.len()) - .map(|i| { - ( - ParaId::from(i as u32), - PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, - ) - }) - .collect(), - }; - // peer A is in group, no relay parent in view. { let other_group_validators = state.group_validators(local_validator.group_index, true); @@ -625,7 +538,7 @@ fn cluster_statements_shared_seconded_first() { .await; } - activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + activate_leaf(&mut overseer, &test_leaf, &state, true).await; answer_expected_hypothetical_depth_request( &mut overseer, @@ -708,7 +621,12 @@ fn cluster_statements_shared_seconded_first() { #[test] fn cluster_accounts_for_implicit_view() { - let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + let config = TestConfig { + validator_count: 20, + group_size: 3, + local_validator: true, + async_backing_params: None, + }; let relay_parent = Hash::repeat_byte(1); let peer_a = PeerId::random(); @@ -718,37 +636,18 @@ fn cluster_accounts_for_implicit_view() { let local_validator = state.local.clone().unwrap(); let local_para = ParaId::from(local_validator.group_index.0); + let test_leaf = state.make_dummy_leaf(relay_parent); + let (candidate, pvd) = make_candidate( relay_parent, 1, local_para, - vec![1, 2, 3].into(), + test_leaf.para_data(local_para).head_data.clone(), vec![4, 5, 6].into(), Hash::repeat_byte(42).into(), ); let candidate_hash = candidate.hash(); - let test_leaf = TestLeaf { - number: 1, - hash: relay_parent, - parent_hash: Hash::repeat_byte(0), - session: 1, - availability_cores: state.make_availability_cores(|i| { - CoreState::Scheduled(ScheduledCore { - para_id: ParaId::from(i as u32), - collator: None, - }) - }), - para_data: (0..state.session_info.validator_groups.len()) - .map(|i| { - ( - ParaId::from(i as u32), - PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, - ) - }) - .collect(), - }; - // peer A is in group, has relay parent in view. // peer B is in group, has no relay parent in view. { @@ -771,7 +670,7 @@ fn cluster_accounts_for_implicit_view() { send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; } - activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + activate_leaf(&mut overseer, &test_leaf, &state, true).await; answer_expected_hypothetical_depth_request( &mut overseer, @@ -820,28 +719,11 @@ fn cluster_accounts_for_implicit_view() { // activate new leaf, which has relay-parent in implicit view. let next_relay_parent = Hash::repeat_byte(2); - let next_test_leaf = TestLeaf { - number: 2, - hash: next_relay_parent, - parent_hash: relay_parent, - session: 1, - availability_cores: state.make_availability_cores(|i| { - CoreState::Scheduled(ScheduledCore { - para_id: ParaId::from(i as u32), - collator: None, - }) - }), - para_data: (0..state.session_info.validator_groups.len()) - .map(|i| { - ( - ParaId::from(i as u32), - PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, - ) - }) - .collect(), - }; + let mut next_test_leaf = state.make_dummy_leaf(next_relay_parent); + next_test_leaf.parent_hash = relay_parent; + next_test_leaf.number = 2; - activate_leaf(&mut overseer, local_para, &next_test_leaf, &state, false).await; + activate_leaf(&mut overseer, &next_test_leaf, &state, false).await; answer_expected_hypothetical_depth_request( &mut overseer, @@ -889,47 +771,33 @@ fn cluster_accounts_for_implicit_view() { #[test] fn cluster_messages_imported_after_confirmed_candidate_importable_check() { - let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + let group_size = 3; + let config = TestConfig { + validator_count: 20, + group_size, + local_validator: true, + async_backing_params: None, + }; let relay_parent = Hash::repeat_byte(1); let peer_a = PeerId::random(); - let peer_b = PeerId::random(); test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); let local_para = ParaId::from(local_validator.group_index.0); + let test_leaf = state.make_dummy_leaf(relay_parent); + let (candidate, pvd) = make_candidate( relay_parent, 1, local_para, - vec![1, 2, 3].into(), + test_leaf.para_data(local_para).head_data.clone(), vec![4, 5, 6].into(), Hash::repeat_byte(42).into(), ); let candidate_hash = candidate.hash(); - let test_leaf = TestLeaf { - number: 1, - hash: relay_parent, - parent_hash: Hash::repeat_byte(0), - session: 1, - availability_cores: state.make_availability_cores(|i| { - CoreState::Scheduled(ScheduledCore { - para_id: ParaId::from(i as u32), - collator: None, - }) - }), - para_data: (0..state.session_info.validator_groups.len()) - .map(|i| { - ( - ParaId::from(i as u32), - PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, - ) - }) - .collect(), - }; - // peer A is in group, has relay parent in view. let other_group_validators = state.group_validators(local_validator.group_index, true); let v_a = other_group_validators[0]; @@ -944,7 +812,7 @@ fn cluster_messages_imported_after_confirmed_candidate_importable_check() { send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; } - activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + activate_leaf(&mut overseer, &test_leaf, &state, true).await; answer_expected_hypothetical_depth_request( &mut overseer, @@ -954,54 +822,53 @@ fn cluster_messages_imported_after_confirmed_candidate_importable_check() { ) .await; - let a_seconded = state - .sign_statement( - v_a, - CompactStatement::Seconded(candidate_hash), - &SigningContext { parent_hash: relay_parent, session_index: 1 }, - ) - .as_unchecked() - .clone(); + // Peer sends `Seconded` statement. + { + let a_seconded = state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); - send_peer_message( - &mut overseer, - peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, a_seconded), - ) - .await; + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::Statement( + relay_parent, + a_seconded, + ), + ) + .await; - assert_matches!( - overseer.recv().await, - AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) - if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST => { } - ); + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST => { } + ); + } - let req = assert_matches!( - overseer.recv().await, - AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendRequests(mut requests, IfDisconnected::ImmediateError)) => { - assert_eq!(requests.len(), 1); - assert_matches!( - requests.pop().unwrap(), - Requests::AttestedCandidateVStaging(mut outgoing) => { - assert_eq!(outgoing.peer, Recipient::Peer(peer_a.clone())); - assert_eq!(outgoing.payload.candidate_hash, candidate_hash); - - let res = AttestedCandidateResponse { - candidate_receipt: candidate.clone(), - persisted_validation_data: pvd.clone(), - statements: vec![], - }; - outgoing.pending_response.send(Ok(res.encode())); - } - ); - } - ); + // Send a request to peer and mock its response. + { + handle_sent_request( + &mut overseer, + peer_a, + candidate_hash, + StatementFilter::blank(group_size), + candidate.clone(), + pvd.clone(), + vec![], + ) + .await; - assert_matches!( - overseer.recv().await, - AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) - if p == peer_a && r == BENEFIT_VALID_RESPONSE => { } - ); + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_RESPONSE + ); + } answer_expected_hypothetical_depth_request( &mut overseer, @@ -1039,47 +906,33 @@ fn cluster_messages_imported_after_confirmed_candidate_importable_check() { #[test] fn cluster_messages_imported_after_new_leaf_importable_check() { - let config = TestConfig { validator_count: 20, group_size: 3, local_validator: true }; + let group_size = 3; + let config = TestConfig { + validator_count: 20, + group_size, + local_validator: true, + async_backing_params: None, + }; let relay_parent = Hash::repeat_byte(1); let peer_a = PeerId::random(); - let peer_b = PeerId::random(); test_harness(config, |state, mut overseer| async move { let local_validator = state.local.clone().unwrap(); let local_para = ParaId::from(local_validator.group_index.0); + let test_leaf = state.make_dummy_leaf(relay_parent); + let (candidate, pvd) = make_candidate( relay_parent, 1, local_para, - vec![1, 2, 3].into(), + test_leaf.para_data(local_para).head_data.clone(), vec![4, 5, 6].into(), Hash::repeat_byte(42).into(), ); let candidate_hash = candidate.hash(); - let test_leaf = TestLeaf { - number: 1, - hash: relay_parent, - parent_hash: Hash::repeat_byte(0), - session: 1, - availability_cores: state.make_availability_cores(|i| { - CoreState::Scheduled(ScheduledCore { - para_id: ParaId::from(i as u32), - collator: None, - }) - }), - para_data: (0..state.session_info.validator_groups.len()) - .map(|i| { - ( - ParaId::from(i as u32), - PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, - ) - }) - .collect(), - }; - // peer A is in group, has relay parent in view. let other_group_validators = state.group_validators(local_validator.group_index, true); let v_a = other_group_validators[0]; @@ -1094,7 +947,7 @@ fn cluster_messages_imported_after_new_leaf_importable_check() { send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; } - activate_leaf(&mut overseer, local_para, &test_leaf, &state, true).await; + activate_leaf(&mut overseer, &test_leaf, &state, true).await; answer_expected_hypothetical_depth_request( &mut overseer, @@ -1104,80 +957,62 @@ fn cluster_messages_imported_after_new_leaf_importable_check() { ) .await; - let a_seconded = state - .sign_statement( - v_a, - CompactStatement::Seconded(candidate_hash), - &SigningContext { parent_hash: relay_parent, session_index: 1 }, - ) - .as_unchecked() - .clone(); + // Peer sends `Seconded` statement. + { + let a_seconded = state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); - send_peer_message( - &mut overseer, - peer_a.clone(), - protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, a_seconded), - ) - .await; + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::Statement( + relay_parent, + a_seconded, + ), + ) + .await; - assert_matches!( - overseer.recv().await, - AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) - if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST => { } - ); + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST => { } + ); + } - let req = assert_matches!( - overseer.recv().await, - AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendRequests(mut requests, IfDisconnected::ImmediateError)) => { - assert_eq!(requests.len(), 1); - assert_matches!( - requests.pop().unwrap(), - Requests::AttestedCandidateVStaging(mut outgoing) => { - assert_eq!(outgoing.peer, Recipient::Peer(peer_a.clone())); - assert_eq!(outgoing.payload.candidate_hash, candidate_hash); - - let res = AttestedCandidateResponse { - candidate_receipt: candidate.clone(), - persisted_validation_data: pvd.clone(), - statements: vec![], - }; - outgoing.pending_response.send(Ok(res.encode())); - } - ); - } - ); + // Send a request to peer and mock its response. + { + handle_sent_request( + &mut overseer, + peer_a, + candidate_hash, + StatementFilter::blank(group_size), + candidate.clone(), + pvd.clone(), + vec![], + ) + .await; - assert_matches!( - overseer.recv().await, - AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) - if p == peer_a && r == BENEFIT_VALID_RESPONSE => { } - ); + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_RESPONSE => { } + ); + } answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; let next_relay_parent = Hash::repeat_byte(2); - let next_test_leaf = TestLeaf { - number: 2, - hash: next_relay_parent, - parent_hash: relay_parent, - session: 1, - availability_cores: state.make_availability_cores(|i| { - CoreState::Scheduled(ScheduledCore { - para_id: ParaId::from(i as u32), - collator: None, - }) - }), - para_data: (0..state.session_info.validator_groups.len()) - .map(|i| { - ( - ParaId::from(i as u32), - PerParaData { min_relay_parent: 1, head_data: vec![1, 2, 3].into() }, - ) - }) - .collect(), - }; + let mut next_test_leaf = state.make_dummy_leaf(next_relay_parent); + next_test_leaf.parent_hash = relay_parent; + next_test_leaf.number = 2; - activate_leaf(&mut overseer, local_para, &next_test_leaf, &state, false).await; + activate_leaf(&mut overseer, &next_test_leaf, &state, false).await; answer_expected_hypothetical_depth_request( &mut overseer, @@ -1203,7 +1038,7 @@ fn cluster_messages_imported_after_new_leaf_importable_check() { assert_matches!( s.payload(), FullStatementWithPVD::Seconded(c, p) - if c == &candidate && p == &pvd => {} + if c == &candidate && p == &pvd ); assert_eq!(s.validator_index(), v_a); } @@ -1213,4 +1048,210 @@ fn cluster_messages_imported_after_new_leaf_importable_check() { }); } -// TODO [now]: ensure seconding limit is respected +#[test] +fn ensure_seconding_limit_is_respected() { + // `max_candidate_depth: 1` for a `seconding_limit` of 2. + let config = TestConfig { + validator_count: 20, + group_size: 4, + local_validator: true, + async_backing_params: Some(AsyncBackingParameters { + max_candidate_depth: 1, + allowed_ancestry_len: 3, + }), + }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); + + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (candidate_1, pvd_1) = make_candidate( + relay_parent, + 1, + local_para, + test_leaf.para_data(local_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let (candidate_2, pvd_2) = make_candidate( + relay_parent, + 1, + local_para, + test_leaf.para_data(local_para).head_data.clone(), + vec![7, 8, 9].into(), + Hash::repeat_byte(43).into(), + ); + let (candidate_3, _pvd_3) = make_candidate( + relay_parent, + 1, + local_para, + test_leaf.para_data(local_para).head_data.clone(), + vec![10, 11, 12].into(), + Hash::repeat_byte(44).into(), + ); + let candidate_hash_1 = candidate_1.hash(); + let candidate_hash_2 = candidate_2.hash(); + let candidate_hash_3 = candidate_3.hash(); + + let other_group_validators = state.group_validators(local_validator.group_index, true); + let v_a = other_group_validators[0]; + + // peers A,B,C are in group, have relay parent in view. + { + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(v_a)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + // Confirm the candidates locally so that we don't send out requests. + + // Candidate 1. + { + let validator_index = state.local.as_ref().unwrap().validator_index; + let statement = state + .sign_full_statement( + validator_index, + Statement::Seconded(candidate_1), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + pvd_1, + ) + .clone(); + + overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::Share(relay_parent, statement), + }) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] + ); + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + } + + // Candidate 2. + { + let validator_index = state.local.as_ref().unwrap().validator_index; + let statement = state + .sign_full_statement( + validator_index, + Statement::Seconded(candidate_2), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + pvd_2, + ) + .clone(); + + overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::Share(relay_parent, statement), + }) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] + ); + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + } + + // Send first statement from peer A. + { + let statement = state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash_1), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST => { } + ); + } + + // Send second statement from peer A. + { + let statement = state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash_2), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST => { } + ); + } + + // Send third statement from peer A. + { + let statement = state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash_3), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == COST_EXCESSIVE_SECONDED => { } + ); + } + + overseer + }); +} diff --git a/node/network/statement-distribution/src/vstaging/tests/grid.rs b/node/network/statement-distribution/src/vstaging/tests/grid.rs index c5eb3826846e..a861755a7681 100644 --- a/node/network/statement-distribution/src/vstaging/tests/grid.rs +++ b/node/network/statement-distribution/src/vstaging/tests/grid.rs @@ -14,28 +14,2442 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -// TODO [now]: backed candidate leads to advertisement to relevant validators with relay-parent +use super::*; -// TODO [now]: received advertisement before confirmation leads to request +use bitvec::order::Lsb0; +use polkadot_node_network_protocol::vstaging::{ + BackedCandidateAcknowledgement, BackedCandidateManifest, +}; +use polkadot_node_subsystem::messages::CandidateBackingMessage; +use polkadot_primitives_test_helpers::make_candidate; -// TODO [now]: received advertisement after backing leads to acknowledgement +// Backed candidate leads to advertisement to relevant validators with relay-parent. +#[test] +fn backed_candidate_leads_to_advertisement() { + let validator_count = 6; + let group_size = 3; + let config = TestConfig { + validator_count, + group_size, + local_validator: true, + async_backing_params: None, + }; -// TODO [now]: received advertisement after confirmation but before backing leads to nothing + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + let peer_c = PeerId::random(); + let peer_d = PeerId::random(); -// TODO [now]: additional statements are shared after manifest exchange + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); -// TODO [now]: grid-sending validator view entering relay-parent leads to advertisement + let test_leaf = state.make_dummy_leaf(relay_parent); -// TODO [now]: advertisement not re-sent after re-entering relay parent (view oscillation) + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + local_para, + test_leaf.para_data(local_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); -// TODO [now]: acknowledgements sent only when candidate backed + let other_group_validators = state.group_validators(local_validator.group_index, true); + let target_group_validators = + state.group_validators((local_validator.group_index.0 + 1).into(), true); + let v_a = other_group_validators[0]; + let v_b = other_group_validators[1]; + let v_c = target_group_validators[0]; + let v_d = target_group_validators[1]; -// TODO [now]: grid statements imported to backing once candidate enters hypothetical frontier + // peer A is in group, has relay parent in view. + // peer B is in group, has no relay parent in view. + // peer C is not in group, has relay parent in view. + // peer D is not in group, has no relay parent in view. + { + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(v_a)].into_iter().collect()), + ) + .await; -// TODO [now]: advertisements rejected from incorrect peers + connect_peer( + &mut overseer, + peer_b.clone(), + Some(vec![state.discovery_id(v_b)].into_iter().collect()), + ) + .await; -// TODO [now]: manifests rejected with unknown relay parent or when not a validator + connect_peer( + &mut overseer, + peer_c.clone(), + Some(vec![state.discovery_id(v_c)].into_iter().collect()), + ) + .await; -// TODO [now]: advertisements rejected when candidate group doers not match para + connect_peer( + &mut overseer, + peer_d.clone(), + Some(vec![state.discovery_id(v_d)].into_iter().collect()), + ) + .await; -// TODO [now]: peer reported when advertisement conflicting with confirmed candidate. + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + // Send gossip topology. + send_new_topology(&mut overseer, state.make_dummy_topology()).await; + + // Confirm the candidate locally so that we don't send out requests. + { + let statement = state + .sign_full_statement( + local_validator.validator_index, + Statement::Seconded(candidate.clone()), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + pvd.clone(), + ) + .clone(); + + overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::Share(relay_parent, statement), + }) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] + ); + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + } + + // Send enough statements to make candidate backable, make sure announcements are sent. + + // Send statement from peer A. + { + let statement = state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST => { } + ); + } + + // Send statement from peer B. + { + let statement = state + .sign_statement( + v_b, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_b.clone(), + protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_b && r == BENEFIT_VALID_STATEMENT_FIRST => { } + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] + ); + } + + // Send Backed notification. + { + overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::Backed(candidate_hash), + }) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages:: NetworkBridgeTx( + NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::VStaging( + protocol_vstaging::ValidationProtocol::StatementDistribution( + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + ), + ), + ) + ) => { + assert_eq!(peers, vec![peer_c]); + assert_eq!(manifest, BackedCandidateManifest { + relay_parent, + candidate_hash, + group_index: local_validator.group_index, + para_id: local_para, + parent_head_data_hash: pvd.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + }); + } + ); + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + } + + overseer + }); +} + +#[test] +fn received_advertisement_before_confirmation_leads_to_request() { + let validator_count = 6; + let group_size = 3; + let config = TestConfig { + validator_count, + group_size, + local_validator: true, + async_backing_params: None, + }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + let peer_c = PeerId::random(); + let peer_d = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + + let other_group = + next_group_index(local_validator.group_index, validator_count, group_size); + let other_para = ParaId::from(other_group.0); + + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + other_para, + test_leaf.para_data(other_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let other_group_validators = state.group_validators(local_validator.group_index, true); + let target_group_validators = state.group_validators(other_group, true); + let v_a = other_group_validators[0]; + let v_b = other_group_validators[1]; + let v_c = target_group_validators[0]; + let v_d = target_group_validators[1]; + + // peer A is in group, has relay parent in view. + // peer B is in group, has no relay parent in view. + // peer C is not in group, has relay parent in view. + // peer D is not in group, has relay parent in view. + { + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(v_a)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_b.clone(), + Some(vec![state.discovery_id(v_b)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_c.clone(), + Some(vec![state.discovery_id(v_c)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_d.clone(), + Some(vec![state.discovery_id(v_d)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_d.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + // Send gossip topology. + send_new_topology(&mut overseer, state.make_dummy_topology()).await; + + // Receive an advertisement from C on an unconfirmed candidate. + { + let manifest = BackedCandidateManifest { + relay_parent, + candidate_hash, + group_index: other_group, + para_id: other_para, + parent_head_data_hash: pvd.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + }; + send_peer_message( + &mut overseer, + peer_c.clone(), + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + ) + .await; + + let statements = vec![ + state + .sign_statement( + v_c, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + state + .sign_statement( + v_d, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + ]; + handle_sent_request( + &mut overseer, + peer_c, + candidate_hash, + StatementFilter::blank(group_size), + candidate.clone(), + pvd.clone(), + statements, + ) + .await; + + // C provided two statements we're seeing for the first time. + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_STATEMENT => { } + ); + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_STATEMENT => { } + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_RESPONSE => { } + ); + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + } + + overseer + }); +} + +// 1. We receive manifest from grid peer, request, pass votes to backing, then receive Backed +// message. Only then should we send an acknowledgement to the grid peer. +// +// 2. (starting from end state of (1)) we receive a manifest about the same candidate from another +// grid peer and instantaneously acknowledge. +// +// Bit more context about this design choice: Statement-distribution doesn't fully emulate the +// statement logic of backing and only focuses on the number of statements. That means that we might +// request a manifest and for some reason the backing subsystem would still not consider the +// candidate as backed. So, in particular, we don't want to advertise such an unbacked candidate +// along the grid & increase load on ourselves and our peers for serving & importing such a +// candidate. +#[test] +fn received_advertisement_after_backing_leads_to_acknowledgement() { + let validator_count = 6; + let group_size = 3; + let config = TestConfig { + validator_count, + group_size, + local_validator: true, + async_backing_params: None, + }; + + let relay_parent = Hash::repeat_byte(1); + let peer_c = PeerId::random(); + let peer_d = PeerId::random(); + let peer_e = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + + let other_group = + next_group_index(local_validator.group_index, validator_count, group_size); + let other_para = ParaId::from(other_group.0); + + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + other_para, + test_leaf.para_data(other_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let target_group_validators = state.group_validators(other_group, true); + let v_c = target_group_validators[0]; + let v_d = target_group_validators[1]; + let v_e = target_group_validators[2]; + + // Connect C, D, E + { + connect_peer( + &mut overseer, + peer_c.clone(), + Some(vec![state.discovery_id(v_c)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_d.clone(), + Some(vec![state.discovery_id(v_d)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_e.clone(), + Some(vec![state.discovery_id(v_e)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_d.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_e.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + // Send gossip topology. + send_new_topology(&mut overseer, state.make_dummy_topology()).await; + + let manifest = BackedCandidateManifest { + relay_parent, + candidate_hash, + group_index: other_group, + para_id: other_para, + parent_head_data_hash: pvd.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + }; + + let statement_c = state + .sign_statement( + v_c, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + let statement_d = state + .sign_statement( + v_d, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + // Receive an advertisement from C. + { + send_peer_message( + &mut overseer, + peer_c.clone(), + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + manifest.clone(), + ), + ) + .await; + + // Should send a request to C. + let statements = vec![ + statement_c.clone(), + statement_d.clone(), + state + .sign_statement( + v_e, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + ]; + handle_sent_request( + &mut overseer, + peer_c, + candidate_hash, + StatementFilter::blank(group_size), + candidate.clone(), + pvd.clone(), + statements, + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_STATEMENT + ); + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_STATEMENT + ); + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_STATEMENT + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_RESPONSE + ); + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + } + + // Receive Backed message. + overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::Backed(candidate_hash), + }) + .await; + + // Should send an acknowledgement back to C. + { + assert_matches!( + overseer.recv().await, + AllMessages:: NetworkBridgeTx( + NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::VStaging( + protocol_vstaging::ValidationProtocol::StatementDistribution( + protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(ack), + ), + ), + ) + ) => { + assert_eq!(peers, vec![peer_c]); + assert_eq!(ack, BackedCandidateAcknowledgement { + candidate_hash, + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + }); + } + ); + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + } + + // Receive a manifest about the same candidate from peer D. + { + send_peer_message( + &mut overseer, + peer_d.clone(), + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + manifest.clone(), + ), + ) + .await; + + let expected_ack = BackedCandidateAcknowledgement { + candidate_hash, + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + }; + + // Instantaneously acknowledge. + assert_matches!( + overseer.recv().await, + AllMessages:: NetworkBridgeTx( + NetworkBridgeTxMessage::SendValidationMessages(messages) + ) => { + assert_eq!(messages.len(), 1); + assert_eq!(messages[0].0, vec![peer_d]); + + assert_matches!( + &messages[0].1, + Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( + protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(ack) + )) if *ack == expected_ack + ); + } + ); + } + + overseer + }); +} + +// Received advertisement after confirmation but before backing leads to nothing. +#[test] +fn received_advertisement_after_confirmation_before_backing() { + let validator_count = 6; + let group_size = 3; + let config = TestConfig { + validator_count, + group_size, + local_validator: true, + async_backing_params: None, + }; + + let relay_parent = Hash::repeat_byte(1); + let peer_c = PeerId::random(); + let peer_d = PeerId::random(); + let peer_e = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + + let other_group = + next_group_index(local_validator.group_index, validator_count, group_size); + let other_para = ParaId::from(other_group.0); + + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + other_para, + test_leaf.para_data(other_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let target_group_validators = state.group_validators(other_group, true); + let v_c = target_group_validators[0]; + let v_d = target_group_validators[1]; + let v_e = target_group_validators[2]; + + // Connect C, D, E + { + connect_peer( + &mut overseer, + peer_c.clone(), + Some(vec![state.discovery_id(v_c)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_d.clone(), + Some(vec![state.discovery_id(v_d)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_e.clone(), + Some(vec![state.discovery_id(v_e)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_d.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_e.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + // Send gossip topology. + send_new_topology(&mut overseer, state.make_dummy_topology()).await; + + let manifest = BackedCandidateManifest { + relay_parent, + candidate_hash, + group_index: other_group, + para_id: other_para, + parent_head_data_hash: pvd.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + }; + + let statement_c = state + .sign_statement( + v_c, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + let statement_d = state + .sign_statement( + v_d, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + // Receive an advertisement from C. + { + send_peer_message( + &mut overseer, + peer_c.clone(), + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + manifest.clone(), + ), + ) + .await; + + // Should send a request to C. + let statements = vec![ + statement_c.clone(), + statement_d.clone(), + state + .sign_statement( + v_e, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + ]; + handle_sent_request( + &mut overseer, + peer_c, + candidate_hash, + StatementFilter::blank(group_size), + candidate.clone(), + pvd.clone(), + statements, + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_STATEMENT + ); + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_STATEMENT + ); + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_STATEMENT + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_RESPONSE + ); + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + } + + // Receive advertisement from peer D (after confirmation but before backing). + { + send_peer_message( + &mut overseer, + peer_d.clone(), + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + manifest.clone(), + ), + ) + .await; + } + + overseer + }); +} + +#[test] +fn additional_statements_are_shared_after_manifest_exchange() { + let validator_count = 6; + let group_size = 3; + let config = TestConfig { + validator_count, + group_size, + local_validator: true, + async_backing_params: None, + }; + + let relay_parent = Hash::repeat_byte(1); + let peer_c = PeerId::random(); + let peer_d = PeerId::random(); + let peer_e = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + + let other_group = + next_group_index(local_validator.group_index, validator_count, group_size); + let other_para = ParaId::from(other_group.0); + + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + other_para, + test_leaf.para_data(other_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let target_group_validators = state.group_validators(other_group, true); + let v_c = target_group_validators[0]; + let v_d = target_group_validators[1]; + let v_e = target_group_validators[2]; + + // Connect C, D, E + { + connect_peer( + &mut overseer, + peer_c.clone(), + Some(vec![state.discovery_id(v_c)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_d.clone(), + Some(vec![state.discovery_id(v_d)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_e.clone(), + Some(vec![state.discovery_id(v_e)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_d.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_e.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + // Send gossip topology. + send_new_topology(&mut overseer, state.make_dummy_topology()).await; + + // Receive an advertisement from C. + { + let manifest = BackedCandidateManifest { + relay_parent, + candidate_hash, + group_index: other_group, + para_id: other_para, + parent_head_data_hash: pvd.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + }; + + send_peer_message( + &mut overseer, + peer_c.clone(), + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + manifest.clone(), + ), + ) + .await; + } + + // Should send a request to C. + { + let statements = vec![ + state + .sign_statement( + v_d, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + state + .sign_statement( + v_e, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + ]; + + handle_sent_request( + &mut overseer, + peer_c, + candidate_hash, + StatementFilter::blank(group_size), + candidate.clone(), + pvd.clone(), + statements, + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_STATEMENT + ); + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_STATEMENT + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_RESPONSE + ); + } + + let hypothetical = HypotheticalCandidate::Complete { + candidate_hash, + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), + }; + let membership = vec![(relay_parent, vec![0])]; + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![(hypothetical, membership)], + None, + false, + ) + .await; + + // Statements are sent to the Backing subsystem. + { + assert_matches!( + overseer.recv().await, + AllMessages::CandidateBacking( + CandidateBackingMessage::Statement(hash, statement) + ) => { + assert_eq!(hash, relay_parent); + assert_matches!( + statement.payload(), + FullStatementWithPVD::Seconded(c, p) + if c == &candidate && p == &pvd + ); + } + ); + assert_matches!( + overseer.recv().await, + AllMessages::CandidateBacking( + CandidateBackingMessage::Statement(hash, statement) + ) => { + assert_eq!(hash, relay_parent); + assert_matches!( + statement.payload(), + FullStatementWithPVD::Seconded(c, p) + if c == &candidate && p == &pvd + ); + } + ); + } + + // Receive Backed message. + overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::Backed(candidate_hash), + }) + .await; + + // Should send an acknowledgement back to C. + { + assert_matches!( + overseer.recv().await, + AllMessages:: NetworkBridgeTx( + NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::VStaging( + protocol_vstaging::ValidationProtocol::StatementDistribution( + protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(ack), + ), + ), + ) + ) => { + assert_eq!(peers, vec![peer_c]); + assert_eq!(ack, BackedCandidateAcknowledgement { + candidate_hash, + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + }); + } + ); + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + } + + // Receive a manifest about the same candidate from peer D. Contains different statements. + { + let manifest = BackedCandidateManifest { + relay_parent, + candidate_hash, + group_index: other_group, + para_id: other_para, + parent_head_data_hash: pvd.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + }; + + send_peer_message( + &mut overseer, + peer_d.clone(), + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + manifest.clone(), + ), + ) + .await; + + let expected_ack = BackedCandidateAcknowledgement { + candidate_hash, + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + }; + + // Instantaneously acknowledge. + assert_matches!( + overseer.recv().await, + AllMessages:: NetworkBridgeTx( + NetworkBridgeTxMessage::SendValidationMessages(messages) + ) => { + assert_eq!(messages.len(), 2); + assert_eq!(messages[0].0, vec![peer_d]); + assert_eq!(messages[1].0, vec![peer_d]); + + assert_matches!( + &messages[0].1, + Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( + protocol_vstaging::StatementDistributionMessage::BackedCandidateKnown(ack) + )) if *ack == expected_ack + ); + + assert_matches!( + &messages[1].1, + Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( + protocol_vstaging::StatementDistributionMessage::Statement(r, s) + )) if *r == relay_parent && s.unchecked_payload() == &CompactStatement::Seconded(candidate_hash) && s.unchecked_validator_index() == v_e + ); + } + ); + } + + overseer + }); +} + +// Grid-sending validator view entering relay-parent leads to advertisement. +#[test] +fn advertisement_sent_when_peer_enters_relay_parent_view() { + let validator_count = 6; + let group_size = 3; + let config = TestConfig { + validator_count, + group_size, + local_validator: true, + async_backing_params: None, + }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + let peer_c = PeerId::random(); + let peer_d = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); + + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + local_para, + test_leaf.para_data(local_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let other_group_validators = state.group_validators(local_validator.group_index, true); + let target_group_validators = + state.group_validators((local_validator.group_index.0 + 1).into(), true); + let v_a = other_group_validators[0]; + let v_b = other_group_validators[1]; + let v_c = target_group_validators[0]; + let v_d = target_group_validators[1]; + + // peer A is in group, has relay parent in view. + // peer B is in group, has no relay parent in view. + // peer C is not in group, has relay parent in view. + // peer D is not in group, has no relay parent in view. + { + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(v_a)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_b.clone(), + Some(vec![state.discovery_id(v_b)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_c.clone(), + Some(vec![state.discovery_id(v_c)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_d.clone(), + Some(vec![state.discovery_id(v_d)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + // Send gossip topology. + send_new_topology(&mut overseer, state.make_dummy_topology()).await; + + // Confirm the candidate locally so that we don't send out requests. + { + let statement = state + .sign_full_statement( + local_validator.validator_index, + Statement::Seconded(candidate.clone()), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + pvd.clone(), + ) + .clone(); + + overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::Share(relay_parent, statement), + }) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] + ); + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + } + + // Send enough statements to make candidate backable, make sure announcements are sent. + + // Send statement from peer A. + { + let statement = state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST => { } + ); + } + + // Send statement from peer B. + { + let statement = state + .sign_statement( + v_b, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_b.clone(), + protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_b && r == BENEFIT_VALID_STATEMENT_FIRST => { } + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] + ); + } + + // Send Backed notification. + overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::Backed(candidate_hash), + }) + .await; + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + + // Relay parent enters view of peer C. + { + send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; + + let expected_manifest = BackedCandidateManifest { + relay_parent, + candidate_hash, + group_index: local_validator.group_index, + para_id: local_para, + parent_head_data_hash: pvd.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + }; + + assert_matches!( + overseer.recv().await, + AllMessages:: NetworkBridgeTx( + NetworkBridgeTxMessage::SendValidationMessages(messages) + ) => { + assert_eq!(messages.len(), 1); + assert_eq!(messages[0].0, vec![peer_c]); + + assert_matches!( + &messages[0].1, + Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest) + )) => { + assert_eq!(*manifest, expected_manifest); + } + ); + } + ); + } + + overseer + }); +} + +// Advertisement not re-sent after re-entering relay parent (view oscillation). +#[test] +fn advertisement_not_re_sent_when_peer_re_enters_view() { + let validator_count = 6; + let group_size = 3; + let config = TestConfig { + validator_count, + group_size, + local_validator: true, + async_backing_params: None, + }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + let peer_c = PeerId::random(); + let peer_d = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); + + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + local_para, + test_leaf.para_data(local_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let other_group_validators = state.group_validators(local_validator.group_index, true); + let target_group_validators = + state.group_validators((local_validator.group_index.0 + 1).into(), true); + let v_a = other_group_validators[0]; + let v_b = other_group_validators[1]; + let v_c = target_group_validators[0]; + let v_d = target_group_validators[1]; + + // peer A is in group, has relay parent in view. + // peer B is in group, has no relay parent in view. + // peer C is not in group, has relay parent in view. + // peer D is not in group, has no relay parent in view. + { + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(v_a)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_b.clone(), + Some(vec![state.discovery_id(v_b)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_c.clone(), + Some(vec![state.discovery_id(v_c)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_d.clone(), + Some(vec![state.discovery_id(v_d)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + // Send gossip topology. + send_new_topology(&mut overseer, state.make_dummy_topology()).await; + + // Confirm the candidate locally so that we don't send out requests. + { + let statement = state + .sign_full_statement( + local_validator.validator_index, + Statement::Seconded(candidate.clone()), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + pvd.clone(), + ) + .clone(); + + overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::Share(relay_parent, statement), + }) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] + ); + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + } + + // Send enough statements to make candidate backable, make sure announcements are sent. + + // Send statement from peer A. + { + let statement = state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST => { } + ); + } + + // Send statement from peer B. + { + let statement = state + .sign_statement( + v_b, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_b.clone(), + protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_b && r == BENEFIT_VALID_STATEMENT_FIRST => { } + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] + ); + } + + // Send Backed notification. + { + overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::Backed(candidate_hash), + }) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages:: NetworkBridgeTx( + NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::VStaging( + protocol_vstaging::ValidationProtocol::StatementDistribution( + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + ), + ), + ) + ) => { + assert_eq!(peers, vec![peer_c]); + assert_eq!(manifest, BackedCandidateManifest { + relay_parent, + candidate_hash, + group_index: local_validator.group_index, + para_id: local_para, + parent_head_data_hash: pvd.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + }); + } + ); + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + } + + // Peer leaves view. + send_peer_view_change(&mut overseer, peer_c.clone(), view![]).await; + + // Peer re-enters view. + send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; + + overseer + }); +} + +// Grid statements imported to backing once candidate enters hypothetical frontier. +#[test] +fn grid_statements_imported_to_backing() { + let validator_count = 6; + let group_size = 3; + let config = TestConfig { + validator_count, + group_size, + local_validator: true, + async_backing_params: None, + }; + + let relay_parent = Hash::repeat_byte(1); + let peer_c = PeerId::random(); + let peer_d = PeerId::random(); + let peer_e = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + + let other_group = + next_group_index(local_validator.group_index, validator_count, group_size); + let other_para = ParaId::from(other_group.0); + + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + other_para, + test_leaf.para_data(other_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let target_group_validators = state.group_validators(other_group, true); + let v_c = target_group_validators[0]; + let v_d = target_group_validators[1]; + let v_e = target_group_validators[2]; + + // Connect C, D, E + { + connect_peer( + &mut overseer, + peer_c.clone(), + Some(vec![state.discovery_id(v_c)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_d.clone(), + Some(vec![state.discovery_id(v_d)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_e.clone(), + Some(vec![state.discovery_id(v_e)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_d.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_e.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + // Send gossip topology. + send_new_topology(&mut overseer, state.make_dummy_topology()).await; + + // Receive an advertisement from C. + { + let manifest = BackedCandidateManifest { + relay_parent, + candidate_hash, + group_index: other_group, + para_id: other_para, + parent_head_data_hash: pvd.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + }; + + send_peer_message( + &mut overseer, + peer_c.clone(), + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + manifest.clone(), + ), + ) + .await; + } + + // Should send a request to C. + { + let statements = vec![ + state + .sign_statement( + v_d, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + state + .sign_statement( + v_e, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + ]; + + handle_sent_request( + &mut overseer, + peer_c, + candidate_hash, + StatementFilter::blank(group_size), + candidate.clone(), + pvd.clone(), + statements, + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_STATEMENT + ); + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_STATEMENT + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_RESPONSE + ); + } + + let hypothetical = HypotheticalCandidate::Complete { + candidate_hash, + receipt: Arc::new(candidate.clone()), + persisted_validation_data: pvd.clone(), + }; + let membership = vec![(relay_parent, vec![0])]; + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![(hypothetical, membership)], + None, + false, + ) + .await; + + // Receive messages from Backing subsystem. + { + assert_matches!( + overseer.recv().await, + AllMessages::CandidateBacking( + CandidateBackingMessage::Statement(hash, statement) + ) => { + assert_eq!(hash, relay_parent); + assert_matches!( + statement.payload(), + FullStatementWithPVD::Seconded(c, p) + if c == &candidate && p == &pvd + ); + } + ); + assert_matches!( + overseer.recv().await, + AllMessages::CandidateBacking( + CandidateBackingMessage::Statement(hash, statement) + ) => { + assert_eq!(hash, relay_parent); + assert_matches!( + statement.payload(), + FullStatementWithPVD::Seconded(c, p) + if c == &candidate && p == &pvd + ); + } + ); + } + + overseer + }); +} + +#[test] +fn advertisements_rejected_from_incorrect_peers() { + let validator_count = 6; + let group_size = 3; + let config = TestConfig { + validator_count, + group_size, + local_validator: true, + async_backing_params: None, + }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + let peer_c = PeerId::random(); + let peer_d = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + + let other_group = + next_group_index(local_validator.group_index, validator_count, group_size); + let other_para = ParaId::from(other_group.0); + + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + other_para, + test_leaf.para_data(other_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let other_group_validators = state.group_validators(local_validator.group_index, true); + let target_group_validators = state.group_validators(other_group, true); + let v_a = other_group_validators[0]; + let v_b = other_group_validators[1]; + let v_c = target_group_validators[0]; + let v_d = target_group_validators[1]; + + // peer A is in group, has relay parent in view. + // peer B is in group, has no relay parent in view. + // peer C is not in group, has relay parent in view. + // peer D is not in group, has no relay parent in view. + { + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(v_a)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_b.clone(), + Some(vec![state.discovery_id(v_b)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_c.clone(), + Some(vec![state.discovery_id(v_c)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_d.clone(), + Some(vec![state.discovery_id(v_d)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + // Send gossip topology. + send_new_topology(&mut overseer, state.make_dummy_topology()).await; + + let manifest = BackedCandidateManifest { + relay_parent, + candidate_hash, + group_index: other_group, + para_id: other_para, + parent_head_data_hash: pvd.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + }; + + // Receive an advertisement from A (our group). + { + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + manifest.clone(), + ), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == COST_UNEXPECTED_MANIFEST_DISALLOWED => { } + ); + } + + // Receive an advertisement from B (our group). + { + send_peer_message( + &mut overseer, + peer_b.clone(), + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_b && r == COST_UNEXPECTED_MANIFEST_DISALLOWED => { } + ); + } + + overseer + }); +} + +#[test] +fn manifest_rejected_with_unknown_relay_parent() { + let validator_count = 6; + let group_size = 3; + let config = TestConfig { + validator_count, + group_size, + local_validator: true, + async_backing_params: None, + }; + + let relay_parent = Hash::repeat_byte(1); + let unknown_parent = Hash::repeat_byte(2); + let peer_c = PeerId::random(); + let peer_d = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + + let other_group = + next_group_index(local_validator.group_index, validator_count, group_size); + let other_para = ParaId::from(other_group.0); + + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (candidate, pvd) = make_candidate( + unknown_parent, + 1, + other_para, + test_leaf.para_data(other_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let target_group_validators = state.group_validators(other_group, true); + let v_c = target_group_validators[0]; + let v_d = target_group_validators[1]; + + // peer C is not in group, has relay parent in view. + // peer D is not in group, has no relay parent in view. + { + connect_peer( + &mut overseer, + peer_c.clone(), + Some(vec![state.discovery_id(v_c)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_d.clone(), + Some(vec![state.discovery_id(v_d)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + // Send gossip topology. + send_new_topology(&mut overseer, state.make_dummy_topology()).await; + + let manifest = BackedCandidateManifest { + relay_parent: unknown_parent, + candidate_hash, + group_index: other_group, + para_id: other_para, + parent_head_data_hash: pvd.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + }; + + // Receive an advertisement from C. + { + send_peer_message( + &mut overseer, + peer_c.clone(), + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + manifest.clone(), + ), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE => { } + ); + } + + overseer + }); +} + +#[test] +fn manifest_rejected_when_not_a_validator() { + let validator_count = 6; + let group_size = 3; + let config = TestConfig { + validator_count, + group_size, + local_validator: false, + async_backing_params: None, + }; + + let relay_parent = Hash::repeat_byte(1); + let peer_c = PeerId::random(); + let peer_d = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let other_group = GroupIndex::from(0); + let other_para = ParaId::from(other_group.0); + + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + other_para, + test_leaf.para_data(other_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let target_group_validators = state.group_validators(other_group, true); + let v_c = target_group_validators[0]; + let v_d = target_group_validators[1]; + + // peer C is not in group, has relay parent in view. + // peer D is not in group, has no relay parent in view. + { + connect_peer( + &mut overseer, + peer_c.clone(), + Some(vec![state.discovery_id(v_c)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_d.clone(), + Some(vec![state.discovery_id(v_d)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + // Send gossip topology. + send_new_topology(&mut overseer, state.make_dummy_topology()).await; + + let manifest = BackedCandidateManifest { + relay_parent, + candidate_hash, + group_index: other_group, + para_id: other_para, + parent_head_data_hash: pvd.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + }; + + // Receive an advertisement from C. + { + send_peer_message( + &mut overseer, + peer_c.clone(), + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + manifest.clone(), + ), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == COST_UNEXPECTED_MANIFEST_MISSING_KNOWLEDGE => { } + ); + } + + overseer + }); +} + +#[test] +fn manifest_rejected_when_group_does_not_match_para() { + let validator_count = 6; + let group_size = 3; + let config = TestConfig { + validator_count, + group_size, + local_validator: true, + async_backing_params: None, + }; + + let relay_parent = Hash::repeat_byte(1); + let peer_c = PeerId::random(); + let peer_d = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + + let other_group = + next_group_index(local_validator.group_index, validator_count, group_size); + // Create a mismatch between group and para. + let other_para = next_group_index(other_group, validator_count, group_size); + let other_para = ParaId::from(other_para.0); + + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + other_para, + test_leaf.para_data(other_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let target_group_validators = state.group_validators(other_group, true); + let v_c = target_group_validators[0]; + let v_d = target_group_validators[1]; + + // peer C is not in group, has relay parent in view. + // peer D is not in group, has no relay parent in view. + { + connect_peer( + &mut overseer, + peer_c.clone(), + Some(vec![state.discovery_id(v_c)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_d.clone(), + Some(vec![state.discovery_id(v_d)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + // Send gossip topology. + send_new_topology(&mut overseer, state.make_dummy_topology()).await; + + let manifest = BackedCandidateManifest { + relay_parent, + candidate_hash, + group_index: other_group, + para_id: other_para, + parent_head_data_hash: pvd.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + }; + + // Receive an advertisement from C. + { + send_peer_message( + &mut overseer, + peer_c.clone(), + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + manifest.clone(), + ), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == COST_MALFORMED_MANIFEST => { } + ); + } + + overseer + }); +} + +#[test] +fn peer_reported_for_advertisement_conflicting_with_confirmed_candidate() { + let validator_count = 6; + let group_size = 3; + let config = TestConfig { + validator_count, + group_size, + local_validator: true, + async_backing_params: None, + }; + + let relay_parent = Hash::repeat_byte(1); + let peer_c = PeerId::random(); + let peer_d = PeerId::random(); + let peer_e = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + + let other_group = + next_group_index(local_validator.group_index, validator_count, group_size); + let other_para = ParaId::from(other_group.0); + + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + other_para, + test_leaf.para_data(other_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let target_group_validators = state.group_validators(other_group, true); + let v_c = target_group_validators[0]; + let v_d = target_group_validators[1]; + let v_e = target_group_validators[2]; + + // Connect C, D, E + { + connect_peer( + &mut overseer, + peer_c.clone(), + Some(vec![state.discovery_id(v_c)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_d.clone(), + Some(vec![state.discovery_id(v_d)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_e.clone(), + Some(vec![state.discovery_id(v_e)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_d.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_e.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + // Send gossip topology. + send_new_topology(&mut overseer, state.make_dummy_topology()).await; + + let manifest = BackedCandidateManifest { + relay_parent, + candidate_hash, + group_index: other_group, + para_id: other_para, + parent_head_data_hash: pvd.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], + }, + }; + + let statement_c = state + .sign_statement( + v_c, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + let statement_d = state + .sign_statement( + v_d, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + // Receive an advertisement from C. + { + send_peer_message( + &mut overseer, + peer_c.clone(), + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + manifest.clone(), + ), + ) + .await; + + // Should send a request to C. + let statements = vec![ + statement_c.clone(), + statement_d.clone(), + state + .sign_statement( + v_e, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + ]; + + handle_sent_request( + &mut overseer, + peer_c, + candidate_hash, + StatementFilter::blank(group_size), + candidate.clone(), + pvd.clone(), + statements, + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_STATEMENT + ); + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_STATEMENT + ); + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_STATEMENT + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_RESPONSE + ); + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + } + + // Receive conflicting advertisement from peer C after confirmation. + // + // NOTE: This causes a conflict because we track received manifests on a per-validator basis, + // and this is the second time we're getting a manifest from C. + { + let mut manifest = manifest.clone(); + manifest.statement_knowledge = StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }; + send_peer_message( + &mut overseer, + peer_c.clone(), + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == COST_CONFLICTING_MANIFEST + ); + } + + overseer + }); +} diff --git a/node/network/statement-distribution/src/vstaging/tests/mod.rs b/node/network/statement-distribution/src/vstaging/tests/mod.rs index 583d17616629..f290c860ac5c 100644 --- a/node/network/statement-distribution/src/vstaging/tests/mod.rs +++ b/node/network/statement-distribution/src/vstaging/tests/mod.rs @@ -14,15 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -// TODO [now]: Remove once some tests are written. -#![allow(unused)] - use super::*; use crate::*; use polkadot_node_network_protocol::{ + grid_topology::TopologyPeerInfo, request_response::{outgoing::Recipient, ReqProtocolNames}, view, ObservedRole, }; +use polkadot_node_primitives::Statement; use polkadot_node_subsystem::messages::{ network_bridge_event::NewGossipTopology, AllMessages, ChainApiMessage, FragmentTreeMembership, HypotheticalCandidate, NetworkBridgeEvent, ProspectiveParachainsMessage, RuntimeApiMessage, @@ -31,10 +30,9 @@ use polkadot_node_subsystem::messages::{ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_types::{jaeger, ActivatedLeaf, LeafStatus}; use polkadot_primitives::vstaging::{ - AssignmentId, AssignmentPair, AsyncBackingParameters, BlockNumber, CandidateCommitments, - CandidateDescriptor, CommittedCandidateReceipt, CoreState, GroupRotationInfo, HeadData, Header, - IndexedVec, PersistedValidationData, ScheduledCore, SessionIndex, SessionInfo, - ValidationCodeHash, ValidatorPair, + AssignmentPair, AsyncBackingParameters, BlockNumber, CommittedCandidateReceipt, CoreState, + GroupRotationInfo, HeadData, Header, IndexedVec, PersistedValidationData, ScheduledCore, + SessionIndex, SessionInfo, ValidatorPair, }; use sc_keystore::LocalKeystore; use sp_application_crypto::Pair as PairT; @@ -54,7 +52,7 @@ mod requests; type VirtualOverseer = test_helpers::TestSubsystemContextHandle; -const ASYNC_BACKING_PARAMETERS: AsyncBackingParameters = +const DEFAULT_ASYNC_BACKING_PARAMETERS: AsyncBackingParameters = AsyncBackingParameters { max_candidate_depth: 4, allowed_ancestry_len: 3 }; // Some deterministic genesis hash for req/res protocol names @@ -66,11 +64,11 @@ struct TestConfig { group_size: usize, // whether the local node should be a validator local_validator: bool, + async_backing_params: Option, } -#[derive(Clone)] +#[derive(Debug, Clone)] struct TestLocalValidator { - validator_id: ValidatorId, validator_index: ValidatorIndex, group_index: GroupIndex, } @@ -129,7 +127,6 @@ impl TestState { let local = if let Some(local_pos) = local_validator_pos { Some(TestLocalValidator { - validator_id: validators[local_pos].public().clone(), validator_index: ValidatorIndex(local_pos as _), group_index: GroupIndex((local_pos / config.group_size) as _), }) @@ -157,10 +154,46 @@ impl TestState { TestState { config, local, validators, session_info, req_sender } } + fn make_dummy_leaf(&self, relay_parent: Hash) -> TestLeaf { + TestLeaf { + number: 1, + hash: relay_parent, + parent_hash: Hash::repeat_byte(0), + session: 1, + availability_cores: self.make_availability_cores(|i| { + CoreState::Scheduled(ScheduledCore { + para_id: ParaId::from(i as u32), + collator: None, + }) + }), + para_data: (0..self.session_info.validator_groups.len()) + .map(|i| (ParaId::from(i as u32), PerParaData::new(1, vec![1, 2, 3].into()))) + .collect(), + } + } + fn make_availability_cores(&self, f: impl Fn(usize) -> CoreState) -> Vec { (0..self.session_info.validator_groups.len()).map(f).collect() } + fn make_dummy_topology(&self) -> NewGossipTopology { + let validator_count = self.config.validator_count; + NewGossipTopology { + session: 1, + topology: SessionGridTopology::new( + (0..validator_count).collect(), + (0..validator_count) + .map(|i| TopologyPeerInfo { + peer_ids: Vec::new(), + validator_index: ValidatorIndex(i as u32), + discovery_id: AuthorityDiscoveryPair::generate().0.public(), + }) + .collect(), + ), + local_index: self.local.as_ref().map(|local| local.validator_index), + } + } + fn group_validators( &self, group_index: GroupIndex, @@ -178,10 +211,6 @@ impl TestState { .collect() } - fn validator_id(&self, validator_index: ValidatorIndex) -> ValidatorId { - self.session_info.validators.get(validator_index).unwrap().clone() - } - fn discovery_id(&self, validator_index: ValidatorIndex) -> AuthorityDiscoveryId { self.session_info.discovery_keys[validator_index.0 as usize].clone() } @@ -200,6 +229,27 @@ impl TestState { .unwrap() } + fn sign_full_statement( + &self, + validator_index: ValidatorIndex, + statement: Statement, + context: &SigningContext, + pvd: PersistedValidationData, + ) -> SignedFullStatementWithPVD { + let payload = statement.to_compact().signing_payload(context); + let pair = &self.validators[validator_index.0 as usize]; + let signature = pair.sign(&payload[..]); + + SignedFullStatementWithPVD::new( + statement.supply_pvd(pvd), + validator_index, + signature, + context, + &pair.public(), + ) + .unwrap() + } + // send a request out, returning a future which expects a response. async fn send_request( &mut self, @@ -258,6 +308,11 @@ fn test_harness>( futures::executor::block_on(future::join( async move { let mut virtual_overseer = test_fut.await; + // Ensure we have handled all responses. + if let Ok(Some(msg)) = virtual_overseer.rx.try_next() { + panic!("Did not handle all responses: {:?}", msg); + } + // Conclude. virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; }, subsystem, @@ -295,7 +350,6 @@ impl TestLeaf { async fn activate_leaf( virtual_overseer: &mut VirtualOverseer, - para_id: ParaId, leaf: &TestLeaf, test_state: &TestState, expect_session_info_request: bool, @@ -313,32 +367,23 @@ async fn activate_leaf( )))) .await; - handle_leaf_activation( - virtual_overseer, - para_id, - leaf, - test_state, - expect_session_info_request, - ) - .await; + handle_leaf_activation(virtual_overseer, leaf, test_state, expect_session_info_request).await; } async fn handle_leaf_activation( virtual_overseer: &mut VirtualOverseer, - para_id: ParaId, leaf: &TestLeaf, test_state: &TestState, expect_session_info_request: bool, ) { let TestLeaf { number, hash, parent_hash, para_data, session, availability_cores } = leaf; - let PerParaData { min_relay_parent, head_data } = leaf.para_data(para_id); assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) ) if parent == *hash => { - tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); + tx.send(Ok(test_state.config.async_backing_params.unwrap_or(DEFAULT_ASYNC_BACKING_PARAMETERS))).unwrap(); } ); @@ -402,13 +447,46 @@ async fn handle_leaf_activation( assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionInfo(s, tx))) if s == *session => { + RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionInfo(s, tx))) if parent == *hash && s == *session => { tx.send(Ok(Some(test_state.session_info.clone()))).unwrap(); } ); } } +/// Intercepts an outgoing request, checks the fields, and sends the response. +async fn handle_sent_request( + virtual_overseer: &mut VirtualOverseer, + peer: PeerId, + candidate_hash: CandidateHash, + mask: StatementFilter, + candidate_receipt: CommittedCandidateReceipt, + persisted_validation_data: PersistedValidationData, + statements: Vec, +) { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendRequests(mut requests, IfDisconnected::ImmediateError)) => { + assert_eq!(requests.len(), 1); + assert_matches!( + requests.pop().unwrap(), + Requests::AttestedCandidateVStaging(outgoing) => { + assert_eq!(outgoing.peer, Recipient::Peer(peer)); + assert_eq!(outgoing.payload.candidate_hash, candidate_hash); + assert_eq!(outgoing.payload.mask, mask); + + let res = AttestedCandidateResponse { + candidate_receipt, + persisted_validation_data, + statements, + }; + outgoing.pending_response.send(Ok(res.encode())).unwrap(); + } + ); + } + ); +} + async fn answer_expected_hypothetical_depth_request( virtual_overseer: &mut VirtualOverseer, responses: Vec<(HypotheticalCandidate, FragmentTreeMembership)>, @@ -430,7 +508,7 @@ async fn answer_expected_hypothetical_depth_request( ); } - tx.send(responses); + tx.send(responses).unwrap(); } ) } @@ -458,6 +536,8 @@ async fn connect_peer( .await; } +// TODO: Add some tests using this? +#[allow(dead_code)] async fn disconnect_peer(virtual_overseer: &mut VirtualOverseer, peer: PeerId) { virtual_overseer .send(FromOrchestra::Communication { @@ -501,3 +581,14 @@ async fn send_new_topology(virtual_overseer: &mut VirtualOverseer, topology: New }) .await; } + +fn next_group_index( + group_index: GroupIndex, + validator_count: usize, + group_size: usize, +) -> GroupIndex { + let next_group = group_index.0 + 1; + let num_groups = + validator_count / group_size + if validator_count % group_size > 0 { 1 } else { 0 }; + GroupIndex::from(next_group % num_groups as u32) +} diff --git a/node/network/statement-distribution/src/vstaging/tests/requests.rs b/node/network/statement-distribution/src/vstaging/tests/requests.rs index 5e624bd622eb..313f2831a992 100644 --- a/node/network/statement-distribution/src/vstaging/tests/requests.rs +++ b/node/network/statement-distribution/src/vstaging/tests/requests.rs @@ -14,14 +14,1559 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -// TODO [now]: peer reported for providing statements meant to be masked out +use super::*; -// TODO [now]: peer reported for not providing enough statements, request retried +use bitvec::order::Lsb0; +use parity_scale_codec::{Decode, Encode}; +use polkadot_node_network_protocol::{ + request_response::vstaging as request_vstaging, vstaging::BackedCandidateManifest, +}; +use polkadot_primitives_test_helpers::make_candidate; +use sc_network::config::{ + IncomingRequest as RawIncomingRequest, OutgoingResponse as RawOutgoingResponse, +}; -// TODO [now]: peer reported for providing duplicate statements +#[test] +fn cluster_peer_allowed_to_send_incomplete_statements() { + let group_size = 3; + let config = TestConfig { + validator_count: 20, + group_size, + local_validator: true, + async_backing_params: None, + }; -// TODO [now]: peer reported for providing statements with invalid signatures or wrong validator IDs + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + let peer_c = PeerId::random(); -// TODO [now]: local node sanity checks incoming requests + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); -// TODO [now]: local node respects statement mask + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + local_para, + test_leaf.para_data(local_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let other_group_validators = state.group_validators(local_validator.group_index, true); + let v_a = other_group_validators[0]; + let v_b = other_group_validators[1]; + + // peer A is in group, has relay parent in view. + // peer B is in group, has no relay parent in view. + // peer C is not in group, has relay parent in view. + { + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(other_group_validators[0])].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_b.clone(), + Some(vec![state.discovery_id(other_group_validators[1])].into_iter().collect()), + ) + .await; + + connect_peer(&mut overseer, peer_c.clone(), None).await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + // Peer in cluster sends a statement, triggering a request. + { + let a_seconded = state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::Statement( + relay_parent, + a_seconded, + ), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST => { } + ); + } + + // Send a request to peer and mock its response to include just one statement. + { + let b_seconded = state + .sign_statement( + v_b, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + let statements = vec![b_seconded.clone()]; + // `1` indicates statements NOT to request. + let mask = StatementFilter::blank(group_size); + handle_sent_request( + &mut overseer, + peer_a, + candidate_hash, + mask, + candidate.clone(), + pvd.clone(), + statements, + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_STATEMENT => { } + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_RESPONSE => { } + ); + + assert_matches!( + overseer.recv().await, + AllMessages:: NetworkBridgeTx( + NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::VStaging( + protocol_vstaging::ValidationProtocol::StatementDistribution( + protocol_vstaging::StatementDistributionMessage::Statement(hash, statement), + ), + ), + ) + ) => { + assert_eq!(peers, vec![peer_a]); + assert_eq!(hash, relay_parent); + assert_eq!(statement, b_seconded); + } + ); + } + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + + overseer + }); +} + +#[test] +fn peer_reported_for_providing_statements_meant_to_be_masked_out() { + let validator_count = 6; + let group_size = 3; + let config = TestConfig { + validator_count, + group_size, + local_validator: true, + async_backing_params: Some(AsyncBackingParameters { + // Makes `seconding_limit: 2` (easier to hit the limit). + max_candidate_depth: 1, + allowed_ancestry_len: 3, + }), + }; + + let relay_parent = Hash::repeat_byte(1); + let peer_c = PeerId::random(); + let peer_d = PeerId::random(); + let peer_e = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + + let other_group = + next_group_index(local_validator.group_index, validator_count, group_size); + let other_para = ParaId::from(other_group.0); + + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (candidate_1, pvd_1) = make_candidate( + relay_parent, + 1, + other_para, + test_leaf.para_data(other_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let (candidate_2, pvd_2) = make_candidate( + relay_parent, + 1, + other_para, + test_leaf.para_data(other_para).head_data.clone(), + vec![7, 8, 9].into(), + Hash::repeat_byte(43).into(), + ); + let (candidate_3, pvd_3) = make_candidate( + relay_parent, + 1, + other_para, + test_leaf.para_data(other_para).head_data.clone(), + vec![10, 11, 12].into(), + Hash::repeat_byte(44).into(), + ); + let candidate_hash_1 = candidate_1.hash(); + let candidate_hash_2 = candidate_2.hash(); + let candidate_hash_3 = candidate_3.hash(); + + let target_group_validators = state.group_validators(other_group, true); + let v_c = target_group_validators[0]; + let v_d = target_group_validators[1]; + let v_e = target_group_validators[2]; + + // Connect C, D, E + { + connect_peer( + &mut overseer, + peer_c.clone(), + Some(vec![state.discovery_id(v_c)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_d.clone(), + Some(vec![state.discovery_id(v_d)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_e.clone(), + Some(vec![state.discovery_id(v_e)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_d.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_e.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + // Send gossip topology. + send_new_topology(&mut overseer, state.make_dummy_topology()).await; + + // Peer C advertises candidate 1. + { + let manifest = BackedCandidateManifest { + relay_parent, + candidate_hash: candidate_hash_1, + group_index: other_group, + para_id: other_para, + parent_head_data_hash: pvd_1.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + }; + + send_peer_message( + &mut overseer, + peer_c.clone(), + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + manifest.clone(), + ), + ) + .await; + + let statements = vec![ + state + .sign_statement( + v_c, + CompactStatement::Seconded(candidate_hash_1), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + state + .sign_statement( + v_d, + CompactStatement::Seconded(candidate_hash_1), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + ]; + handle_sent_request( + &mut overseer, + peer_c, + candidate_hash_1, + StatementFilter::blank(group_size), + candidate_1.clone(), + pvd_1.clone(), + statements, + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_STATEMENT + ); + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_STATEMENT + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_RESPONSE + ); + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + } + + // Peer C advertises candidate 2. + { + let manifest = BackedCandidateManifest { + relay_parent, + candidate_hash: candidate_hash_2, + group_index: other_group, + para_id: other_para, + parent_head_data_hash: pvd_2.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 0, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + }; + + send_peer_message( + &mut overseer, + peer_c.clone(), + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + manifest.clone(), + ), + ) + .await; + + let statements = vec![ + state + .sign_statement( + v_d, + CompactStatement::Seconded(candidate_hash_2), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + state + .sign_statement( + v_e, + CompactStatement::Seconded(candidate_hash_2), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + ]; + handle_sent_request( + &mut overseer, + peer_c, + candidate_hash_2, + StatementFilter::blank(group_size), + candidate_2.clone(), + pvd_2.clone(), + statements, + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_STATEMENT + ); + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_STATEMENT + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_RESPONSE + ); + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + } + + // Peer C sends an announcement for candidate 3. Should hit seconding limit for validator 1. + // + // NOTE: The manifest is immediately rejected before a request is made due to + // "over-seconding" validator 1. On the other hand, if the manifest does not include + // validator 1 as a seconder, then including its Second statement in the response instead + // would fail with "Un-requested Statement In Response". + { + let manifest = BackedCandidateManifest { + relay_parent, + candidate_hash: candidate_hash_3, + group_index: other_group, + para_id: other_para, + parent_head_data_hash: pvd_3.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + }; + + send_peer_message( + &mut overseer, + peer_c.clone(), + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + manifest.clone(), + ), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == COST_EXCESSIVE_SECONDED + ); + } + + overseer + }); +} + +// Peer reported for not providing enough statements, request retried. +#[test] +fn peer_reported_for_not_enough_statements() { + let validator_count = 6; + let group_size = 3; + let config = TestConfig { + validator_count, + group_size, + local_validator: true, + async_backing_params: None, + }; + + let relay_parent = Hash::repeat_byte(1); + let peer_c = PeerId::random(); + let peer_d = PeerId::random(); + let peer_e = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + + let other_group = + next_group_index(local_validator.group_index, validator_count, group_size); + let other_para = ParaId::from(other_group.0); + + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + other_para, + test_leaf.para_data(other_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let target_group_validators = state.group_validators(other_group, true); + let v_c = target_group_validators[0]; + let v_d = target_group_validators[1]; + let v_e = target_group_validators[2]; + + // Connect C, D, E + { + connect_peer( + &mut overseer, + peer_c.clone(), + Some(vec![state.discovery_id(v_c)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_d.clone(), + Some(vec![state.discovery_id(v_d)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_e.clone(), + Some(vec![state.discovery_id(v_e)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_d.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_e.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + // Send gossip topology. + send_new_topology(&mut overseer, state.make_dummy_topology()).await; + + let manifest = BackedCandidateManifest { + relay_parent, + candidate_hash, + group_index: other_group, + para_id: other_para, + parent_head_data_hash: pvd.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], + }, + }; + + // Peer sends an announcement. + send_peer_message( + &mut overseer, + peer_c.clone(), + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + manifest.clone(), + ), + ) + .await; + let c_seconded = state + .sign_statement( + v_c, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + let statements = vec![c_seconded.clone()]; + // `1` indicates statements NOT to request. + let mask = StatementFilter::blank(group_size); + + // We send a request to peer. Mock its response to include just one statement. + { + handle_sent_request( + &mut overseer, + peer_c, + candidate_hash, + mask.clone(), + candidate.clone(), + pvd.clone(), + statements.clone(), + ) + .await; + + // The peer is reported for only sending one statement. + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == COST_INVALID_RESPONSE => { } + ); + } + + // We re-try the request. + { + let statements = vec![ + c_seconded, + state + .sign_statement( + v_d, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + state + .sign_statement( + v_e, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + ]; + handle_sent_request( + &mut overseer, + peer_c, + candidate_hash, + mask, + candidate.clone(), + pvd.clone(), + statements.clone(), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_STATEMENT + ); + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_STATEMENT + ); + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_STATEMENT + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_RESPONSE + ); + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + } + + overseer + }); +} + +// Test that a peer answering an `AttestedCandidateRequest` with duplicate statements is punished. +#[test] +fn peer_reported_for_duplicate_statements() { + let group_size = 3; + let config = TestConfig { + validator_count: 20, + group_size, + local_validator: true, + async_backing_params: None, + }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + let peer_c = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); + + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + local_para, + test_leaf.para_data(local_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let other_group_validators = state.group_validators(local_validator.group_index, true); + let v_a = other_group_validators[0]; + let v_b = other_group_validators[1]; + + // peer A is in group, has relay parent in view. + // peer B is in group, has no relay parent in view. + // peer C is not in group, has relay parent in view. + { + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(other_group_validators[0])].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_b.clone(), + Some(vec![state.discovery_id(other_group_validators[1])].into_iter().collect()), + ) + .await; + + connect_peer(&mut overseer, peer_c.clone(), None).await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + // Peer in cluster sends a statement, triggering a request. + { + let a_seconded = state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::Statement( + relay_parent, + a_seconded, + ), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST => { } + ); + } + + // Send a request to peer and mock its response to include two identical statements. + { + let b_seconded = state + .sign_statement( + v_b, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + let statements = vec![b_seconded.clone(), b_seconded.clone()]; + + handle_sent_request( + &mut overseer, + peer_a, + candidate_hash, + StatementFilter::blank(group_size), + candidate.clone(), + pvd.clone(), + statements, + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_STATEMENT => { } + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == COST_UNREQUESTED_RESPONSE_STATEMENT => { } + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_RESPONSE => { } + ); + + assert_matches!( + overseer.recv().await, + AllMessages:: NetworkBridgeTx( + NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::VStaging( + protocol_vstaging::ValidationProtocol::StatementDistribution( + protocol_vstaging::StatementDistributionMessage::Statement(hash, statement), + ), + ), + ) + ) => { + assert_eq!(peers, vec![peer_a]); + assert_eq!(hash, relay_parent); + assert_eq!(statement, b_seconded); + } + ); + } + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + + overseer + }); +} + +#[test] +fn peer_reported_for_providing_statements_with_invalid_signatures() { + let group_size = 3; + let config = TestConfig { + validator_count: 20, + group_size, + local_validator: true, + async_backing_params: None, + }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + let peer_c = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); + + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + local_para, + test_leaf.para_data(local_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let other_group_validators = state.group_validators(local_validator.group_index, true); + state.group_validators((local_validator.group_index.0 + 1).into(), true); + let v_a = other_group_validators[0]; + let v_b = other_group_validators[1]; + + // peer A is in group, has relay parent in view. + // peer B is in group, has no relay parent in view. + // peer C is not in group, has relay parent in view. + { + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(other_group_validators[0])].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_b.clone(), + Some(vec![state.discovery_id(other_group_validators[1])].into_iter().collect()), + ) + .await; + + connect_peer(&mut overseer, peer_c.clone(), None).await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + // Peer in cluster sends a statement, triggering a request. + { + let a_seconded = state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::Statement( + relay_parent, + a_seconded, + ), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST => { } + ); + } + + // Send a request to peer and mock its response to include invalid statements. + { + // Sign statement with wrong signing context, leading to bad signature. + let b_seconded_invalid = state + .sign_statement( + v_b, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: Hash::repeat_byte(42), session_index: 1 }, + ) + .as_unchecked() + .clone(); + let statements = vec![b_seconded_invalid.clone()]; + + handle_sent_request( + &mut overseer, + peer_a, + candidate_hash, + StatementFilter::blank(group_size), + candidate.clone(), + pvd.clone(), + statements, + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == COST_INVALID_SIGNATURE => { } + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_RESPONSE => { } + ); + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + } + + overseer + }); +} + +#[test] +fn peer_reported_for_providing_statements_with_wrong_validator_id() { + let group_size = 3; + let config = TestConfig { + validator_count: 20, + group_size, + local_validator: true, + async_backing_params: None, + }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + let peer_c = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); + + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + local_para, + test_leaf.para_data(local_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let other_group_validators = state.group_validators(local_validator.group_index, true); + let next_group_validators = + state.group_validators((local_validator.group_index.0 + 1).into(), true); + let v_a = other_group_validators[0]; + let v_c = next_group_validators[0]; + + // peer A is in group, has relay parent in view. + // peer B is in group, has no relay parent in view. + // peer C is not in group, has relay parent in view. + { + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(other_group_validators[0])].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_b.clone(), + Some(vec![state.discovery_id(other_group_validators[1])].into_iter().collect()), + ) + .await; + + connect_peer(&mut overseer, peer_c.clone(), None).await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + // Peer in cluster sends a statement, triggering a request. + { + let a_seconded = state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::Statement( + relay_parent, + a_seconded, + ), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST => { } + ); + } + + // Send a request to peer and mock its response to include a wrong validator ID. + { + let c_seconded_invalid = state + .sign_statement( + v_c, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + let statements = vec![c_seconded_invalid.clone()]; + + handle_sent_request( + &mut overseer, + peer_a, + candidate_hash, + StatementFilter::blank(group_size), + candidate.clone(), + pvd.clone(), + statements, + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == COST_UNREQUESTED_RESPONSE_STATEMENT => { } + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_RESPONSE => { } + ); + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + } + + overseer + }); +} + +#[test] +fn local_node_sanity_checks_incoming_requests() { + let config = TestConfig { + validator_count: 20, + group_size: 3, + local_validator: true, + async_backing_params: None, + }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + let peer_c = PeerId::random(); + let peer_d = PeerId::random(); + + test_harness(config, |mut state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); + + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + local_para, + test_leaf.para_data(local_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + // peer A is in group, has relay parent in view. + // peer B is in group, has no relay parent in view. + // peer C is not in group, has relay parent in view. + { + let other_group_validators = state.group_validators(local_validator.group_index, true); + + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(other_group_validators[0])].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_b.clone(), + Some(vec![state.discovery_id(other_group_validators[1])].into_iter().collect()), + ) + .await; + + connect_peer(&mut overseer, peer_c.clone(), None).await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + let mask = StatementFilter::blank(state.config.group_size); + + // Should drop requests for unknown candidates. + { + let (pending_response, rx) = oneshot::channel(); + state + .req_sender + .send(RawIncomingRequest { + // Request from peer that received manifest. + peer: peer_c, + payload: request_vstaging::AttestedCandidateRequest { + candidate_hash: candidate.hash(), + mask: mask.clone(), + } + .encode(), + pending_response, + }) + .await + .unwrap(); + + assert_matches!(rx.await, Err(oneshot::Canceled)); + } + + // Confirm candidate. + { + let full_signed = state + .sign_statement( + local_validator.validator_index, + CompactStatement::Seconded(candidate_hash), + &SigningContext { session_index: 1, parent_hash: relay_parent }, + ) + .convert_to_superpayload(StatementWithPVD::Seconded(candidate.clone(), pvd.clone())) + .unwrap(); + + overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::Share(relay_parent, full_signed), + }) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::VStaging(protocol_vstaging::ValidationProtocol::StatementDistribution( + protocol_vstaging::StatementDistributionMessage::Statement( + r, + s, + ) + )) + )) => { + assert_eq!(peers, vec![peer_a.clone()]); + assert_eq!(r, relay_parent); + assert_eq!(s.unchecked_payload(), &CompactStatement::Seconded(candidate_hash)); + assert_eq!(s.unchecked_validator_index(), local_validator.validator_index); + } + ); + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + } + + // Should drop requests from unknown peers. + { + let (pending_response, rx) = oneshot::channel(); + state + .req_sender + .send(RawIncomingRequest { + // Request from peer that received manifest. + peer: peer_d, + payload: request_vstaging::AttestedCandidateRequest { + candidate_hash: candidate.hash(), + mask: mask.clone(), + } + .encode(), + pending_response, + }) + .await + .unwrap(); + + assert_matches!(rx.await, Err(oneshot::Canceled)); + } + + // Should drop requests with bitfields of the wrong size. + { + let mask = StatementFilter::blank(state.config.group_size + 1); + let response = state + .send_request( + peer_c, + request_vstaging::AttestedCandidateRequest { + candidate_hash: candidate.hash(), + mask, + }, + ) + .await + .await; + + assert_matches!( + response, + RawOutgoingResponse { + result, + reputation_changes, + sent_feedback + } => { + assert_matches!(result, Err(())); + assert_eq!(reputation_changes, vec![COST_INVALID_REQUEST_BITFIELD_SIZE.into_base_rep()]); + assert_matches!(sent_feedback, None); + } + ); + } + + // Local node should reject requests if we did not send a manifest to that peer. + { + let response = state + .send_request( + peer_c, + request_vstaging::AttestedCandidateRequest { + candidate_hash: candidate.hash(), + mask: mask.clone(), + }, + ) + .await + .await; + + // Should get `COST_UNEXPECTED_REQUEST` response. + assert_matches!( + response, + RawOutgoingResponse { + result, + reputation_changes, + sent_feedback + } => { + assert_matches!(result, Err(())); + assert_eq!(reputation_changes, vec![COST_UNEXPECTED_REQUEST.into_base_rep()]); + assert_matches!(sent_feedback, None); + } + ); + } + + overseer + }); +} + +#[test] +fn local_node_respects_statement_mask() { + let validator_count = 6; + let group_size = 3; + let config = TestConfig { + validator_count, + group_size, + local_validator: true, + async_backing_params: None, + }; + + let relay_parent = Hash::repeat_byte(1); + let peer_a = PeerId::random(); + let peer_b = PeerId::random(); + let peer_c = PeerId::random(); + let peer_d = PeerId::random(); + + test_harness(config, |mut state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + let local_para = ParaId::from(local_validator.group_index.0); + + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (candidate, pvd) = make_candidate( + relay_parent, + 1, + local_para, + test_leaf.para_data(local_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let candidate_hash = candidate.hash(); + + let other_group_validators = state.group_validators(local_validator.group_index, true); + let target_group_validators = + state.group_validators((local_validator.group_index.0 + 1).into(), true); + let v_a = other_group_validators[0]; + let v_b = other_group_validators[1]; + let v_c = target_group_validators[0]; + let v_d = target_group_validators[1]; + + // peer A is in group, has relay parent in view. + // peer B is in group, has no relay parent in view. + // peer C is not in group, has relay parent in view. + // peer D is not in group, has no relay parent in view. + { + connect_peer( + &mut overseer, + peer_a.clone(), + Some(vec![state.discovery_id(v_a)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_b.clone(), + Some(vec![state.discovery_id(v_b)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_c.clone(), + Some(vec![state.discovery_id(v_c)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_d.clone(), + Some(vec![state.discovery_id(v_d)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_a.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + // Send gossip topology. + send_new_topology(&mut overseer, state.make_dummy_topology()).await; + + // Confirm the candidate locally so that we don't send out requests. + { + let statement = state + .sign_full_statement( + local_validator.validator_index, + Statement::Seconded(candidate.clone()), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + pvd.clone(), + ) + .clone(); + + overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::Share(relay_parent, statement), + }) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] + ); + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + } + + // Send enough statements to make candidate backable, make sure announcements are sent. + + // Send statement from peer A. + { + let statement = state + .sign_statement( + v_a, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + + send_peer_message( + &mut overseer, + peer_a.clone(), + protocol_vstaging::StatementDistributionMessage::Statement(relay_parent, statement), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_a && r == BENEFIT_VALID_STATEMENT_FIRST => { } + ); + } + + // Send statement from peer B. + let statement_b = state + .sign_statement( + v_b, + CompactStatement::Seconded(candidate_hash), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(); + { + send_peer_message( + &mut overseer, + peer_b.clone(), + protocol_vstaging::StatementDistributionMessage::Statement( + relay_parent, + statement_b.clone(), + ), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_b && r == BENEFIT_VALID_STATEMENT_FIRST => { } + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendValidationMessage(peers, _)) if peers == vec![peer_a] + ); + } + + // Send Backed notification. + { + overseer + .send(FromOrchestra::Communication { + msg: StatementDistributionMessage::Backed(candidate_hash), + }) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages:: NetworkBridgeTx( + NetworkBridgeTxMessage::SendValidationMessage( + peers, + Versioned::VStaging( + protocol_vstaging::ValidationProtocol::StatementDistribution( + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest(manifest), + ), + ), + ) + ) => { + assert_eq!(peers, vec![peer_c]); + assert_eq!(manifest, BackedCandidateManifest { + relay_parent, + candidate_hash, + group_index: local_validator.group_index, + para_id: local_para, + parent_head_data_hash: pvd.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }, + }); + } + ); + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + } + + // `1` indicates statements NOT to request. + let mask = StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 0, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 0, 0, 0], + }; + + // Incoming request to local node. Local node should send statements, respecting mask. + { + let response = state + .send_request( + peer_c, + request_vstaging::AttestedCandidateRequest { + candidate_hash: candidate.hash(), + mask, + }, + ) + .await + .await; + + let expected_statements = vec![statement_b]; + assert_matches!(response, full_response => { + // Response is the same for vstaging. + let request_vstaging::AttestedCandidateResponse { candidate_receipt, persisted_validation_data, statements } = + request_vstaging::AttestedCandidateResponse::decode( + &mut full_response.result.expect("We should have a proper answer").as_ref(), + ).expect("Decoding should work"); + assert_eq!(candidate_receipt, candidate); + assert_eq!(persisted_validation_data, pvd); + assert_eq!(statements, expected_statements); + }); + } + + overseer + }); +} diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index fa15297afcc3..bfe12981c30c 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -88,9 +88,10 @@ pub enum CandidateBackingMessage { /// Note that the Candidate Backing subsystem should second the given candidate in the context of the /// given relay-parent (ref. by hash). This candidate must be validated. Second(Hash, CandidateReceipt, PersistedValidationData, PoV), - /// Note a validator's statement about a particular candidate. Disagreements about validity must be escalated - /// to a broader check by the Disputes Subsystem, though that escalation is deferred until the approval voting - /// stage to guarantee availability. Agreements are simply tallied until a quorum is reached. + /// Note a validator's statement about a particular candidate in the context of the given + /// relay-parent. Disagreements about validity must be escalated to a broader check by the + /// Disputes Subsystem, though that escalation is deferred until the approval voting stage to + /// guarantee availability. Agreements are simply tallied until a quorum is reached. Statement(Hash, SignedFullStatementWithPVD), } From 6dfbd2018931bee1fa3183953372d38ef7f6e482 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Wed, 15 Mar 2023 20:05:38 +0100 Subject: [PATCH 43/76] Fix some clippy lints in tests --- node/network/statement-distribution/src/legacy_v1/tests.rs | 2 ++ node/network/statement-distribution/src/vstaging/tests/mod.rs | 4 +++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/node/network/statement-distribution/src/legacy_v1/tests.rs b/node/network/statement-distribution/src/legacy_v1/tests.rs index 0764040921cd..1b0660ebfdaf 100644 --- a/node/network/statement-distribution/src/legacy_v1/tests.rs +++ b/node/network/statement-distribution/src/legacy_v1/tests.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +#![allow(clippy::clone_on_copy)] + use super::*; use crate::{metrics::Metrics, *}; diff --git a/node/network/statement-distribution/src/vstaging/tests/mod.rs b/node/network/statement-distribution/src/vstaging/tests/mod.rs index f290c860ac5c..a3d8d2221ce6 100644 --- a/node/network/statement-distribution/src/vstaging/tests/mod.rs +++ b/node/network/statement-distribution/src/vstaging/tests/mod.rs @@ -14,6 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . +#![allow(clippy::clone_on_copy)] + use super::*; use crate::*; use polkadot_node_network_protocol::{ @@ -502,7 +504,7 @@ async fn answer_expected_hypothetical_depth_request( assert_eq!(req.backed_in_path_only, expected_backed_in_path_only); for (i, (candidate, _)) in responses.iter().enumerate() { assert!( - req.candidates.iter().find(|c| c == &candidate).is_some(), + req.candidates.iter().any(|c| &c == &candidate), "did not receive request for hypothetical candidate {}", i, ); From c9552b7971953c2225945a202eec6eab04f08358 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Tue, 21 Mar 2023 09:58:24 +0100 Subject: [PATCH 44/76] Async backing: minor fixes (#6920) --- node/core/backing/src/lib.rs | 2 +- .../src/fragment_tree.rs | 67 ++++++++++--------- node/subsystem-types/src/messages.rs | 2 +- .../src/backing_implicit_view.rs | 2 +- .../src/inclusion_emulator/staging.rs | 25 +++---- .../backing/statement-distribution-legacy.md | 2 +- .../node/backing/statement-distribution.md | 13 ++-- 7 files changed, 57 insertions(+), 56 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index ea66c53bc892..5ed486e4ca64 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -1494,7 +1494,7 @@ async fn import_statement( // we need to create an entry in the `PerCandidateState` map. // // If the relay parent supports prospective parachains, we also need - // to inform the prospective parachains subsystem of the seconded candidate + // to inform the prospective parachains subsystem of the seconded candidate. // If `ProspectiveParachainsMessage::Second` fails, then we return // Error::RejectedByProspectiveParachains. // diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index c6f388f851b5..cbed7cf3f9dc 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -381,8 +381,8 @@ impl Scope { } } -// We use indices into a flat vector to refer to nodes in the tree. -// Every tree also has an implicit root. +/// We use indices into a flat vector to refer to nodes in the tree. +/// Every tree also has an implicit root. #[derive(Debug, Clone, Copy, PartialEq)] enum NodePointer { Root, @@ -618,7 +618,7 @@ impl FragmentTree { let max_depth = self.scope.max_depth; let mut depths = bitvec![u16, Msb0; 0; max_depth + 1]; - // iterate over all nodes < max_depth where parent head-data matches, + // iterate over all nodes where parent head-data matches, // relay-parent number is <= candidate, and depth < max_depth. let node_pointers = (0..self.nodes.len()).map(NodePointer::Storage); for parent_pointer in std::iter::once(NodePointer::Root).chain(node_pointers) { @@ -665,46 +665,46 @@ impl FragmentTree { }; let parent_head_hash = candidate.parent_head_data_hash(); - if parent_head_hash == child_constraints.required_parent.hash() { - // We do additional checks for complete candidates. - if let HypotheticalCandidate::Complete { - ref receipt, - ref persisted_validation_data, - } = candidate - { - let prospective_candidate = ProspectiveCandidate { - commitments: Cow::Borrowed(&receipt.commitments), - collator: receipt.descriptor().collator.clone(), - collator_signature: receipt.descriptor().signature.clone(), - persisted_validation_data: persisted_validation_data.as_ref().clone(), - pov_hash: receipt.descriptor().pov_hash, - validation_code_hash: receipt.descriptor().validation_code_hash, - }; + if parent_head_hash != child_constraints.required_parent.hash() { + continue + } - if Fragment::new( - candidate_relay_parent.clone(), - child_constraints, - prospective_candidate, - ) - .is_err() - { - continue - } - } + // We do additional checks for complete candidates. + if let HypotheticalCandidate::Complete { ref receipt, ref persisted_validation_data } = + candidate + { + let prospective_candidate = ProspectiveCandidate { + commitments: Cow::Borrowed(&receipt.commitments), + collator: receipt.descriptor().collator.clone(), + collator_signature: receipt.descriptor().signature.clone(), + persisted_validation_data: persisted_validation_data.as_ref().clone(), + pov_hash: receipt.descriptor().pov_hash, + validation_code_hash: receipt.descriptor().validation_code_hash, + }; - // Check that the path only contains backed candidates, if necessary. - if !backed_in_path_only || - self.path_contains_backed_only_candidates(parent_pointer, candidate_storage) + if Fragment::new( + candidate_relay_parent.clone(), + child_constraints, + prospective_candidate, + ) + .is_err() { - depths.set(child_depth, true); + continue } } + + // Check that the path only contains backed candidates, if necessary. + if !backed_in_path_only || + self.path_contains_backed_only_candidates(parent_pointer, candidate_storage) + { + depths.set(child_depth, true); + } } depths.iter_ones().collect() } - /// Select a candidate after the given `required_path` which pass + /// Select a candidate after the given `required_path` which passes /// the predicate. /// /// If there are multiple possibilities, this will select the first one. @@ -1160,6 +1160,7 @@ mod tests { assert!(storage.head_data_by_hash(&output_head_hash).is_none()); } + // [`FragmentTree::populate`] should pick up candidates that build on other candidates. #[test] fn populate_works_recursively() { let mut storage = CandidateStorage::new(); diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index bfe12981c30c..37a11ecbc949 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -529,7 +529,7 @@ pub enum ChainApiMessage { /// Request the last finalized block number. /// This request always succeeds. FinalizedBlockNumber(ChainApiResponseChannel), - /// Request the `k` ancestors block hashes of a block with the given hash. + /// Request the `k` ancestor block hashes of a block with the given hash. /// The response channel may return a `Vec` of size up to `k` /// filled with ancestors hashes with the following order: /// `parent`, `grandparent`, ... up to the hash of genesis block diff --git a/node/subsystem-util/src/backing_implicit_view.rs b/node/subsystem-util/src/backing_implicit_view.rs index 6fd273b1b212..e1966dddea9e 100644 --- a/node/subsystem-util/src/backing_implicit_view.rs +++ b/node/subsystem-util/src/backing_implicit_view.rs @@ -115,7 +115,7 @@ impl View { /// /// This returns a list of para-ids which are relevant to the leaf, /// and the allowed relay parents for these paras under this leaf can be - /// queried with [`known_allowed_relay_parents_under`]. + /// queried with [`View::known_allowed_relay_parents_under`]. /// /// No-op for known leaves. pub async fn activate_leaf( diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 80ba0fb12eaf..66868f16925d 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -71,19 +71,20 @@ //! //! ### Pruning Fragment Trees //! -//! When the relay-chain advances, we want to compare the new constraints -//! of that relay-parent to the roots of the fragment trees we have. There are 3 cases. +//! When the relay-chain advances, we want to compare the new constraints of that relay-parent to +//! the roots of the fragment trees we have. There are 3 cases: //! -//! 1. The root fragment is still valid under the new constraints. In this case, we do nothing. -//! This is the "prediction still uncertain" case. -//! 2. The root fragment is invalid under the new constraints because it has been subsumed by the relay-chain. -//! in this case, we can discard the root and split & re-root the fragment tree -//! under its descendents and compare to the new constraints again. -//! This is the "prediction came true" case. -//! 3. The root fragment is invalid under the new constraints because a competing parachain block has been included -//! or it would never be accepted for some other reason. In this case we can discard the entire -//! fragment tree. -//! This is the "prediction came false" case. +//! 1. The root fragment is still valid under the new constraints. In this case, we do nothing. This +//! is the "prediction still uncertain" case. +//! +//! 2. The root fragment is invalid under the new constraints because it has been subsumed by the +//! relay-chain. In this case, we can discard the root and split & re-root the fragment tree under +//! its descendents and compare to the new constraints again. This is the "prediction came true" +//! case. +//! +//! 3. The root fragment is invalid under the new constraints because a competing parachain block +//! has been included or it would never be accepted for some other reason. In this case we can +//! discard the entire fragment tree. This is the "prediction came false" case. //! //! This is all a bit of a simplification because it assumes that the relay-chain advances without //! forks and is finalized instantly. In practice, the set of fragment-trees needs to be observable diff --git a/roadmap/implementers-guide/src/node/backing/statement-distribution-legacy.md b/roadmap/implementers-guide/src/node/backing/statement-distribution-legacy.md index 67dcaf9053a5..5cbc875d8a73 100644 --- a/roadmap/implementers-guide/src/node/backing/statement-distribution-legacy.md +++ b/roadmap/implementers-guide/src/node/backing/statement-distribution-legacy.md @@ -33,7 +33,7 @@ Output: Implemented as a gossip protocol. Handles updates to our view and peers' views. Neighbor packets are used to inform peers which chain heads we are interested in data for. -The Statement Distribution Subsystem is responsible for distributing signed statements that we have generated and for forwarding statements generated by other validators. It also detects a variety of Validator misbehaviors for reporting to [Misbehavior Arbitration](../utility/misbehavior-arbitration.md). During the Backing stage of the inclusion pipeline, Statement Distribution is the main point of contact with peer nodes. On receiving a signed statement from a peer in the same backing group, assuming the peer receipt state machine is in an appropriate state, it sends the Candidate Receipt to the [Candidate Backing subsystem](candidate-backing.md) to handle the validator's statement. On receiving `StatementDistributionMessage::Share` we make sure to send messages to our backing group in addition to random other peers, to ensure a fast backing process and getting all statements quickly for distribution. +The Statement Distribution Subsystem is responsible for distributing signed statements that we have generated and for forwarding statements generated by other validators. It also detects a variety of Validator misbehaviors for reporting to the [Provisioner Subsystem](../utility/provisioner.md). During the Backing stage of the inclusion pipeline, Statement Distribution is the main point of contact with peer nodes. On receiving a signed statement from a peer in the same backing group, assuming the peer receipt state machine is in an appropriate state, it sends the Candidate Receipt to the [Candidate Backing subsystem](candidate-backing.md) to handle the validator's statement. On receiving `StatementDistributionMessage::Share` we make sure to send messages to our backing group in addition to random other peers, to ensure a fast backing process and getting all statements quickly for distribution. This subsystem tracks equivocating validators and stops accepting information from them. It establishes a data-dependency order: diff --git a/roadmap/implementers-guide/src/node/backing/statement-distribution.md b/roadmap/implementers-guide/src/node/backing/statement-distribution.md index 3b1915052e4d..9259acf7387d 100644 --- a/roadmap/implementers-guide/src/node/backing/statement-distribution.md +++ b/roadmap/implementers-guide/src/node/backing/statement-distribution.md @@ -22,8 +22,7 @@ reasons. As a result, all validators must have a up to date view of all possible parachain candidates + backing statements that could be placed on-chain in the next block. -[This blog -post](https://polkadot.network/blog/polkadot-v1-0-sharding-and-economic-security) +[This blog post](https://polkadot.network/blog/polkadot-v1-0-sharding-and-economic-security) puts it another way: "Validators who aren't assigned to the parachain still listen for the attestations [statements] because whichever validator ends up being the author of the relay-chain block needs to bundle up attested parachain @@ -155,11 +154,11 @@ backing subsystem itself. - Note that requesting is not an implicit acknowledgement, and an explicit acknowledgement must be sent upon receipt. -## Statement distribution messages +## Messages -### Input +### Incoming -- `ActiveLeavesUpdate` +- `ActiveLeaves` - Notification of a change in the set of active leaves. - `StatementDistributionMessage::Share` - Notification of a locally-originating statement. That is, this statement @@ -186,7 +185,7 @@ backing subsystem itself. - Acknowledgement. - Handled by `handle_incoming_acknowledgement` -### Output +### Outgoing - `NetworkBridgeTxMessage::SendValidationMessages` - Sends a peer all pending messages / acknowledgements / statements for a @@ -202,7 +201,7 @@ backing subsystem itself. - Gets the hypothetical frontier membership of candidates under active leaves' fragment trees. - `NetworkBridgeTxMessage::SendRequests` - - Sends requests, initiating request/response protocol. + - Sends requests, initiating the request/response protocol. ## Request/Response From 49ca776cb2e925b2bff03c680777a2571136f31f Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Tue, 21 Mar 2023 21:03:28 +0400 Subject: [PATCH 45/76] bitfield-distribution test --- node/network/bitfield-distribution/src/tests.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/node/network/bitfield-distribution/src/tests.rs b/node/network/bitfield-distribution/src/tests.rs index 8b572825a427..c9e61ea11121 100644 --- a/node/network/bitfield-distribution/src/tests.rs +++ b/node/network/bitfield-distribution/src/tests.rs @@ -1033,7 +1033,6 @@ fn network_protocol_versioning() { ValidatorIndex(0), &validator, ) - .await .ok() .flatten() .expect("should be signed"); From e03fb90a5c02fb9410619507918a4a37a3791b45 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Tue, 21 Mar 2023 21:18:40 +0400 Subject: [PATCH 46/76] implicit view tests --- node/subsystem-util/src/backing_implicit_view.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/subsystem-util/src/backing_implicit_view.rs b/node/subsystem-util/src/backing_implicit_view.rs index e1966dddea9e..adf7fbd54258 100644 --- a/node/subsystem-util/src/backing_implicit_view.rs +++ b/node/subsystem-util/src/backing_implicit_view.rs @@ -413,7 +413,7 @@ mod tests { make_subsystem_context, TestSubsystemContextHandle, }; use polkadot_overseer::SubsystemContext; - use polkadot_primitives::v2::Header; + use polkadot_primitives::Header; use sp_core::testing::TaskExecutor; use std::time::Duration; From 2259262750e33cc81abd338a542d0a59afb4bd57 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Tue, 28 Mar 2023 20:39:40 +0400 Subject: [PATCH 47/76] Refactor parameters -> params --- Cargo.lock | 481 ++++++++++-------- node/core/backing/src/tests/mod.rs | 2 +- .../src/tests/prospective_parachains.rs | 6 +- node/core/prospective-parachains/src/tests.rs | 10 +- node/core/runtime-api/src/cache.rs | 18 +- node/core/runtime-api/src/lib.rs | 16 +- .../src/collator_side/tests/mod.rs | 4 +- .../tests/prospective_parachains.rs | 6 +- .../src/validator_side/tests/mod.rs | 40 +- .../tests/prospective_parachains.rs | 6 +- .../src/legacy_v1/tests.rs | 26 +- .../src/vstaging/tests/cluster.rs | 2 +- .../src/vstaging/tests/mod.rs | 10 +- .../src/vstaging/tests/requests.rs | 2 +- node/subsystem-types/src/messages.rs | 2 +- node/subsystem-types/src/runtime_client.rs | 10 +- node/subsystem-util/src/lib.rs | 2 +- node/subsystem-util/src/runtime/mod.rs | 13 +- primitives/src/runtime_api.rs | 2 +- runtime/parachains/src/paras_inherent/mod.rs | 4 +- .../src/runtime_api_impl/vstaging.rs | 13 +- runtime/rococo/src/lib.rs | 4 +- runtime/westend/src/lib.rs | 4 +- 23 files changed, 373 insertions(+), 310 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0e10c9684c11..3a8687ac8ad1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -512,16 +512,28 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitvec" +version = "0.20.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7774144344a4faa177370406a7ff5f1da24303817368584c6206c8303eb07848" +dependencies = [ + "funty 1.1.0", + "radium 0.6.2", + "tap", + "wyz 0.2.0", +] + [[package]] name = "bitvec" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ - "funty", - "radium", + "funty 2.0.0", + "radium 0.7.0", "tap", - "wyz", + "wyz 0.5.1", ] [[package]] @@ -631,7 +643,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a071c348a5ef6da1d3a87166b408170b46002382b1dda83992b5c2208cefb370" dependencies = [ "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "serde", ] @@ -2283,7 +2295,7 @@ dependencies = [ "futures-timer", "log", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "parking_lot 0.12.1", "scale-info", ] @@ -2366,7 +2378,7 @@ name = "fork-tree" version = "3.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#46bdb43422007619c4efa4b03e32ad7b2f9b282e" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.4.0", ] [[package]] @@ -2394,7 +2406,7 @@ dependencies = [ "frame-system", "linregress", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "paste", "scale-info", "serde", @@ -2428,7 +2440,7 @@ dependencies = [ "lazy_static", "linked-hash-map", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "rand 0.8.5", "rand_pcg", "sc-block-builder", @@ -2475,7 +2487,7 @@ dependencies = [ "frame-election-provider-solution-type", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-arithmetic", "sp-core", @@ -2492,7 +2504,7 @@ dependencies = [ "frame-support", "frame-system", "frame-try-runtime", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-core", "sp-io", @@ -2508,7 +2520,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df6bb8542ef006ef0de09a5c4420787d79823c0ed7924225822362fd2bf2ff2d" dependencies = [ "cfg-if", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "serde", ] @@ -2520,7 +2532,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#46bdb4342200 dependencies = [ "futures", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "serde", "sp-core", "sp-io", @@ -2542,7 +2554,7 @@ dependencies = [ "k256", "log", "once_cell", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "paste", "scale-info", "serde", @@ -2608,7 +2620,7 @@ dependencies = [ "frame-support", "frame-support-test-pallet", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "pretty_assertions", "rustversion", "scale-info", @@ -2630,7 +2642,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#46bdb4342200 dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", ] @@ -2641,7 +2653,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#46bdb4342200 dependencies = [ "frame-support", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "serde", "sp-core", @@ -2660,7 +2672,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-core", "sp-runtime", @@ -2672,7 +2684,7 @@ name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#46bdb43422007619c4efa4b03e32ad7b2f9b282e" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.4.0", "sp-api", ] @@ -2682,7 +2694,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#46bdb43422007619c4efa4b03e32ad7b2f9b282e" dependencies = [ "frame-support", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "sp-api", "sp-runtime", "sp-std", @@ -2721,6 +2733,12 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394" +[[package]] +name = "funty" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" + [[package]] name = "funty" version = "2.0.0" @@ -3365,7 +3383,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.4.0", ] [[package]] @@ -3668,7 +3686,7 @@ checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" name = "kusama-runtime" version = "0.9.39" dependencies = [ - "bitvec", + "bitvec 1.0.1", "frame-benchmarking", "frame-election-provider-support", "frame-executive", @@ -3728,7 +3746,7 @@ dependencies = [ "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-parachains", @@ -4657,7 +4675,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#46bdb4342200 dependencies = [ "futures", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "sc-client-api", "sc-offchain", "sp-api", @@ -4676,7 +4694,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#46bdb4342200 dependencies = [ "anyhow", "jsonrpsee", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "serde", "sp-api", "sp-blockchain", @@ -5251,7 +5269,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-core", "sp-runtime", @@ -5266,7 +5284,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-application-crypto", "sp-authority-discovery", @@ -5282,7 +5300,7 @@ dependencies = [ "frame-support", "frame-system", "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-runtime", "sp-std", @@ -5300,7 +5318,7 @@ dependencies = [ "pallet-authorship", "pallet-session", "pallet-timestamp", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-application-crypto", "sp-consensus-babe", @@ -5323,7 +5341,7 @@ dependencies = [ "frame-system", "log", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-core", "sp-io", @@ -5360,7 +5378,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-runtime", "sp-std", @@ -5375,7 +5393,7 @@ dependencies = [ "frame-system", "pallet-authorship", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "serde", "sp-consensus-beefy", @@ -5398,7 +5416,7 @@ dependencies = [ "pallet-beefy", "pallet-mmr", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "serde", "sp-api", @@ -5419,7 +5437,7 @@ dependencies = [ "frame-system", "log", "pallet-treasury", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-core", "sp-io", @@ -5438,7 +5456,7 @@ dependencies = [ "log", "pallet-bounties", "pallet-treasury", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-core", "sp-io", @@ -5455,7 +5473,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-core", "sp-io", @@ -5472,7 +5490,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "serde", "sp-io", @@ -5489,7 +5507,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "serde", "sp-core", @@ -5509,7 +5527,7 @@ dependencies = [ "frame-system", "log", "pallet-election-provider-support-benchmarking", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "rand 0.8.5", "scale-info", "sp-arithmetic", @@ -5529,7 +5547,7 @@ dependencies = [ "frame-benchmarking", "frame-election-provider-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "sp-npos-elections", "sp-runtime", ] @@ -5543,7 +5561,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-core", "sp-io", @@ -5562,7 +5580,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-io", "sp-runtime", @@ -5581,7 +5599,7 @@ dependencies = [ "log", "pallet-authorship", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-application-crypto", "sp-consensus-grandpa", @@ -5602,7 +5620,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-io", "sp-runtime", @@ -5619,7 +5637,7 @@ dependencies = [ "frame-system", "log", "pallet-authorship", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-application-crypto", "sp-core", @@ -5637,7 +5655,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-core", "sp-io", @@ -5655,7 +5673,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-core", "sp-io", @@ -5671,7 +5689,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-core", "sp-io", @@ -5689,7 +5707,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-io", "sp-runtime", @@ -5704,7 +5722,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-arithmetic", "sp-core", @@ -5720,7 +5738,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-core", "sp-io", @@ -5741,7 +5759,7 @@ dependencies = [ "pallet-bags-list", "pallet-nomination-pools", "pallet-staking", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-runtime", "sp-runtime-interface", @@ -5755,7 +5773,7 @@ version = "1.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#46bdb43422007619c4efa4b03e32ad7b2f9b282e" dependencies = [ "pallet-nomination-pools", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "sp-api", "sp-std", ] @@ -5769,7 +5787,7 @@ dependencies = [ "frame-system", "log", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "serde", "sp-runtime", @@ -5794,7 +5812,7 @@ dependencies = [ "pallet-offences", "pallet-session", "pallet-staking", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-runtime", "sp-staking", @@ -5810,7 +5828,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-core", "sp-io", @@ -5826,7 +5844,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-io", "sp-runtime", @@ -5842,7 +5860,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-arithmetic", "sp-core", @@ -5859,7 +5877,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-io", "sp-runtime", @@ -5876,7 +5894,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "serde", "sp-arithmetic", @@ -5894,7 +5912,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-io", "sp-runtime", @@ -5912,7 +5930,7 @@ dependencies = [ "impl-trait-for-tuples", "log", "pallet-timestamp", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-core", "sp-io", @@ -5946,7 +5964,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#46bdb4342200 dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "rand_chacha 0.2.2", "scale-info", "sp-runtime", @@ -5965,7 +5983,7 @@ dependencies = [ "log", "pallet-authorship", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "rand_chacha 0.2.2", "scale-info", "serde", @@ -6001,7 +6019,7 @@ name = "pallet-staking-runtime-api" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#46bdb43422007619c4efa4b03e32ad7b2f9b282e" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.4.0", "sp-api", ] @@ -6014,7 +6032,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-core", "sp-io", @@ -6029,7 +6047,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#46bdb4342200 dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-io", "sp-runtime", @@ -6045,7 +6063,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-inherents", "sp-io", @@ -6064,7 +6082,7 @@ dependencies = [ "frame-system", "log", "pallet-treasury", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "serde", "sp-core", @@ -6080,7 +6098,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#46bdb4342200 dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "serde", "sp-core", @@ -6096,7 +6114,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#46bdb4342200 dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "sp-api", "sp-blockchain", "sp-core", @@ -6111,7 +6129,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#46bdb43422007619c4efa4b03e32ad7b2f9b282e" dependencies = [ "pallet-transaction-payment", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "sp-api", "sp-runtime", "sp-weights", @@ -6127,7 +6145,7 @@ dependencies = [ "frame-system", "impl-trait-for-tuples", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "serde", "sp-runtime", @@ -6143,7 +6161,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-runtime", "sp-std", @@ -6157,7 +6175,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-core", "sp-io", @@ -6174,7 +6192,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-runtime", "sp-std", @@ -6188,7 +6206,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-api", "sp-runtime", @@ -6205,7 +6223,7 @@ dependencies = [ "frame-system", "log", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-parachain", "polkadot-runtime-parachains", "scale-info", @@ -6230,7 +6248,7 @@ dependencies = [ "pallet-assets", "pallet-balances", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-primitives", "polkadot-runtime-common", "scale-info", @@ -6264,6 +6282,19 @@ dependencies = [ "snap", ] +[[package]] +name = "parity-scale-codec" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" +dependencies = [ + "arrayvec 0.7.2", + "bitvec 0.20.4", + "byte-slice-cast", + "impl-trait-for-tuples", + "serde", +] + [[package]] name = "parity-scale-codec" version = "3.4.0" @@ -6271,7 +6302,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "637935964ff85a605d114591d4d2c13c5d1ba2806dae97cea6bf180238a749ac" dependencies = [ "arrayvec 0.7.2", - "bitvec", + "bitvec 1.0.1", "byte-slice-cast", "bytes", "impl-trait-for-tuples", @@ -6580,8 +6611,9 @@ dependencies = [ name = "polkadot-availability-bitfield-distribution" version = "0.9.39" dependencies = [ + "always-assert", "assert_matches", - "bitvec", + "bitvec 1.0.1", "env_logger 0.9.0", "futures", "log", @@ -6611,7 +6643,7 @@ dependencies = [ "futures", "futures-timer", "lru 0.9.0", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-erasure-coding", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -6641,7 +6673,7 @@ dependencies = [ "futures-timer", "log", "lru 0.9.0", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-erasure-coding", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -6737,13 +6769,13 @@ version = "0.9.39" dependencies = [ "always-assert", "assert_matches", - "bitvec", + "bitvec 1.0.1", "env_logger 0.9.0", "fatality", "futures", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -6751,6 +6783,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "sc-keystore", "sc-network", "sp-core", "sp-keyring", @@ -6764,7 +6797,7 @@ dependencies = [ name = "polkadot-core-primitives" version = "0.9.39" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-core", "sp-runtime", @@ -6784,7 +6817,7 @@ dependencies = [ "indexmap", "lazy_static", "lru 0.9.0", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-erasure-coding", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -6808,7 +6841,7 @@ name = "polkadot-erasure-coding" version = "0.9.39" dependencies = [ "criterion", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-node-primitives", "polkadot-primitives", "reed-solomon-novelpoly", @@ -6855,7 +6888,7 @@ dependencies = [ "fatality", "futures", "futures-timer", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "parking_lot 0.12.1", "polkadot-node-metrics", "polkadot-node-network-protocol", @@ -6878,7 +6911,7 @@ name = "polkadot-node-collation-generation" version = "0.9.39" dependencies = [ "futures", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-erasure-coding", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -6898,7 +6931,7 @@ version = "0.9.39" dependencies = [ "assert_matches", "async-trait", - "bitvec", + "bitvec 1.0.1", "derive_more", "futures", "futures-timer", @@ -6906,7 +6939,7 @@ dependencies = [ "kvdb-memorydb", "lru 0.9.0", "merlin", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "parking_lot 0.12.1", "polkadot-node-jaeger", "polkadot-node-primitives", @@ -6936,14 +6969,14 @@ name = "polkadot-node-core-av-store" version = "0.9.39" dependencies = [ "assert_matches", - "bitvec", + "bitvec 1.0.1", "env_logger 0.9.0", "futures", "futures-timer", "kvdb", "kvdb-memorydb", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "parking_lot 0.12.1", "polkadot-erasure-coding", "polkadot-node-primitives", @@ -6965,7 +6998,7 @@ name = "polkadot-node-core-backing" version = "0.9.39" dependencies = [ "assert_matches", - "bitvec", + "bitvec 1.0.1", "fatality", "futures", "polkadot-erasure-coding", @@ -7010,7 +7043,7 @@ dependencies = [ "async-trait", "futures", "futures-timer", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-node-core-pvf", "polkadot-node-metrics", "polkadot-node-primitives", @@ -7032,7 +7065,7 @@ version = "0.9.39" dependencies = [ "futures", "maplit", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-node-metrics", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -7054,7 +7087,7 @@ dependencies = [ "futures-timer", "kvdb", "kvdb-memorydb", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "parking_lot 0.12.1", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -7077,7 +7110,7 @@ dependencies = [ "kvdb", "kvdb-memorydb", "lru 0.9.0", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", @@ -7110,11 +7143,36 @@ dependencies = [ "tracing-gum", ] +[[package]] +name = "polkadot-node-core-prospective-parachains" +version = "0.9.16" +dependencies = [ + "assert_matches", + "bitvec 1.0.1", + "fatality", + "futures", + "parity-scale-codec 2.3.1", + "polkadot-node-primitives", + "polkadot-node-subsystem", + "polkadot-node-subsystem-test-helpers", + "polkadot-node-subsystem-types", + "polkadot-node-subsystem-util", + "polkadot-primitives", + "polkadot-primitives-test-helpers", + "sc-keystore", + "sp-application-crypto", + "sp-core", + "sp-keyring", + "sp-keystore", + "thiserror", + "tracing-gum", +] + [[package]] name = "polkadot-node-core-provisioner" version = "0.9.39" dependencies = [ - "bitvec", + "bitvec 1.0.1", "fatality", "futures", "futures-timer", @@ -7142,7 +7200,7 @@ dependencies = [ "futures-timer", "hex-literal", "libc", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "pin-project", "polkadot-core-primitives", "polkadot-node-metrics", @@ -7220,7 +7278,7 @@ dependencies = [ "lazy_static", "log", "mick-jaeger", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "parking_lot 0.12.1", "polkadot-node-primitives", "polkadot-primitives", @@ -7240,7 +7298,7 @@ dependencies = [ "futures-timer", "hyper", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-primitives", "polkadot-test-service", "prioritized-metered-channel", @@ -7262,11 +7320,12 @@ name = "polkadot-node-network-protocol" version = "0.9.39" dependencies = [ "async-trait", + "bitvec 1.0.1", "derive_more", "fatality", "futures", "hex", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-node-jaeger", "polkadot-node-primitives", "polkadot-primitives", @@ -7285,7 +7344,7 @@ version = "0.9.39" dependencies = [ "bounded-vec", "futures", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-erasure-coding", "polkadot-parachain", "polkadot-primitives", @@ -7370,7 +7429,7 @@ dependencies = [ "log", "lru 0.9.0", "parity-db", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "parking_lot 0.11.2", "pin-project", "polkadot-node-jaeger", @@ -7425,7 +7484,7 @@ dependencies = [ "bounded-collections", "derive_more", "frame-support", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-core-primitives", "scale-info", "serde", @@ -7453,9 +7512,9 @@ dependencies = [ name = "polkadot-primitives" version = "0.9.39" dependencies = [ - "bitvec", + "bitvec 1.0.1", "hex-literal", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-core-primitives", "polkadot-parachain", "scale-info", @@ -7521,7 +7580,7 @@ dependencies = [ name = "polkadot-runtime" version = "0.9.39" dependencies = [ - "bitvec", + "bitvec 1.0.1", "frame-benchmarking", "frame-election-provider-support", "frame-executive", @@ -7576,7 +7635,7 @@ dependencies = [ "pallet-vesting", "pallet-whitelist", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-constants", @@ -7622,7 +7681,7 @@ dependencies = [ name = "polkadot-runtime-common" version = "0.9.39" dependencies = [ - "bitvec", + "bitvec 1.0.1", "frame-benchmarking", "frame-election-provider-support", "frame-support", @@ -7644,7 +7703,7 @@ dependencies = [ "pallet-transaction-payment", "pallet-treasury", "pallet-vesting", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-runtime-parachains", @@ -7687,7 +7746,7 @@ version = "0.9.39" dependencies = [ "bs58", "frame-benchmarking", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-primitives", "sp-std", "sp-tracing", @@ -7699,7 +7758,7 @@ version = "0.9.39" dependencies = [ "assert_matches", "bitflags", - "bitvec", + "bitvec 1.0.1", "derive_more", "frame-benchmarking", "frame-support", @@ -7716,7 +7775,7 @@ dependencies = [ "pallet-staking", "pallet-timestamp", "pallet-vesting", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-parachain", "polkadot-primitives", "polkadot-primitives-test-helpers", @@ -7789,6 +7848,7 @@ dependencies = [ "polkadot-node-core-chain-selection", "polkadot-node-core-dispute-coordinator", "polkadot-node-core-parachains-inherent", + "polkadot-node-core-prospective-parachains", "polkadot-node-core-provisioner", "polkadot-node-core-pvf-checker", "polkadot-node-core-runtime-api", @@ -7868,18 +7928,21 @@ version = "0.9.39" dependencies = [ "arrayvec 0.5.2", "assert_matches", + "bitvec 1.0.1", "fatality", "futures", "futures-timer", "indexmap", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", + "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "rand_chacha 0.3.1", "sc-keystore", "sc-network", "sp-application-crypto", @@ -7897,7 +7960,7 @@ dependencies = [ name = "polkadot-statement-table" version = "0.9.39" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-primitives", "sp-core", ] @@ -7907,7 +7970,7 @@ name = "polkadot-test-client" version = "0.9.39" dependencies = [ "futures", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-node-subsystem", "polkadot-primitives", "polkadot-test-runtime", @@ -7960,7 +8023,7 @@ dependencies = [ name = "polkadot-test-runtime" version = "0.9.39" dependencies = [ - "bitvec", + "bitvec 1.0.1", "frame-election-provider-support", "frame-executive", "frame-support", @@ -7984,7 +8047,7 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "pallet-vesting", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-parachain", "polkadot-primitives", "polkadot-runtime-common", @@ -8460,6 +8523,12 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "radium" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" + [[package]] name = "radium" version = "0.7.0" @@ -8878,7 +8947,7 @@ dependencies = [ "pallet-vesting", "pallet-xcm", "pallet-xcm-benchmarks", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-parachain", "polkadot-primitives", "polkadot-runtime-common", @@ -9154,7 +9223,7 @@ dependencies = [ "ip_network", "libp2p", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "prost", "prost-build", "rand 0.8.5", @@ -9179,7 +9248,7 @@ dependencies = [ "futures", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "sc-block-builder", "sc-client-api", "sc-proposer-metrics", @@ -9199,7 +9268,7 @@ name = "sc-block-builder" version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#46bdb43422007619c4efa4b03e32ad7b2f9b282e" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.4.0", "sc-client-api", "sp-api", "sp-block-builder", @@ -9252,7 +9321,7 @@ dependencies = [ "libp2p", "log", "names", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "rand 0.8.5", "regex", "rpassword", @@ -9287,7 +9356,7 @@ dependencies = [ "fnv", "futures", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "parking_lot 0.12.1", "sc-executor", "sc-transaction-pool-api", @@ -9317,7 +9386,7 @@ dependencies = [ "linked-hash-map", "log", "parity-db", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "parking_lot 0.12.1", "sc-client-api", "sc-state-db", @@ -9369,7 +9438,7 @@ dependencies = [ "num-bigint", "num-rational", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "parking_lot 0.12.1", "sc-client-api", "sc-consensus", @@ -9427,7 +9496,7 @@ dependencies = [ "fnv", "futures", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "parking_lot 0.12.1", "sc-client-api", "sc-consensus", @@ -9460,7 +9529,7 @@ dependencies = [ "futures", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "parking_lot 0.12.1", "sc-consensus-beefy", "sc-rpc", @@ -9477,7 +9546,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#46bdb43422007619c4efa4b03e32ad7b2f9b282e" dependencies = [ "fork-tree", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "sc-client-api", "sc-consensus", "sp-blockchain", @@ -9498,7 +9567,7 @@ dependencies = [ "futures", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "parking_lot 0.12.1", "rand 0.8.5", "sc-block-builder", @@ -9533,7 +9602,7 @@ dependencies = [ "futures", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "sc-client-api", "sc-consensus-grandpa", "sc-rpc", @@ -9553,7 +9622,7 @@ dependencies = [ "futures", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "sc-client-api", "sc-consensus", "sc-telemetry", @@ -9573,7 +9642,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#46bdb43422007619c4efa4b03e32ad7b2f9b282e" dependencies = [ "lru 0.8.1", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "parking_lot 0.12.1", "sc-executor-common", "sc-executor-wasmi", @@ -9686,7 +9755,7 @@ dependencies = [ "log", "lru 0.8.1", "mockall", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "parking_lot 0.12.1", "pin-project", "rand 0.8.5", @@ -9742,7 +9811,7 @@ dependencies = [ "futures", "futures-timer", "libp2p", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "prost-build", "sc-consensus", "sc-peerset", @@ -9786,7 +9855,7 @@ dependencies = [ "futures", "libp2p", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "prost", "prost-build", "sc-client-api", @@ -9813,7 +9882,7 @@ dependencies = [ "log", "lru 0.8.1", "mockall", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "prost", "prost-build", "sc-client-api", @@ -9842,7 +9911,7 @@ dependencies = [ "futures", "libp2p", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "pin-project", "sc-network", "sc-network-common", @@ -9868,7 +9937,7 @@ dependencies = [ "libp2p", "num_cpus", "once_cell", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "parking_lot 0.12.1", "rand 0.8.5", "sc-client-api", @@ -9914,7 +9983,7 @@ dependencies = [ "futures", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "parking_lot 0.12.1", "sc-block-builder", "sc-chain-spec", @@ -9942,7 +10011,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#46bdb43422007619c4efa4b03e32ad7b2f9b282e" dependencies = [ "jsonrpsee", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "sc-chain-spec", "sc-transaction-pool-api", "scale-info", @@ -9981,7 +10050,7 @@ dependencies = [ "hex", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "parking_lot 0.12.1", "sc-chain-spec", "sc-client-api", @@ -10008,7 +10077,7 @@ dependencies = [ "futures-timer", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "parking_lot 0.12.1", "pin-project", "rand 0.8.5", @@ -10068,7 +10137,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#46bdb43422007619c4efa4b03e32ad7b2f9b282e" dependencies = [ "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "parking_lot 0.12.1", "sp-core", ] @@ -10095,7 +10164,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#46bdb43422007619c4efa4b03e32ad7b2f9b282e" dependencies = [ "jsonrpsee", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "sc-chain-spec", "sc-client-api", "sc-consensus-babe", @@ -10199,7 +10268,7 @@ dependencies = [ "linked-hash-map", "log", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "parking_lot 0.12.1", "sc-client-api", "sc-transaction-pool-api", @@ -10250,10 +10319,10 @@ version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c46be926081c9f4dd5dd9b6f1d3e3229f2360bc6502dd8836f84a93b7c75e99a" dependencies = [ - "bitvec", + "bitvec 1.0.1", "cfg-if", "derive_more", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info-derive", "serde", ] @@ -10697,7 +10766,7 @@ name = "slot-range-helper" version = "0.9.39" dependencies = [ "enumn", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "paste", "sp-runtime", "sp-std", @@ -10775,7 +10844,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#46bdb4342200 dependencies = [ "hash-db", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "sp-api-proc-macro", "sp-core", "sp-runtime", @@ -10805,7 +10874,7 @@ name = "sp-application-crypto" version = "7.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#46bdb43422007619c4efa4b03e32ad7b2f9b282e" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "serde", "sp-core", @@ -10820,7 +10889,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#46bdb4342200 dependencies = [ "integer-sqrt", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "serde", "sp-std", @@ -10832,7 +10901,7 @@ name = "sp-authority-discovery" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#46bdb43422007619c4efa4b03e32ad7b2f9b282e" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-api", "sp-application-crypto", @@ -10845,7 +10914,7 @@ name = "sp-block-builder" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#46bdb43422007619c4efa4b03e32ad7b2f9b282e" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.4.0", "sp-api", "sp-inherents", "sp-runtime", @@ -10860,7 +10929,7 @@ dependencies = [ "futures", "log", "lru 0.8.1", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "parking_lot 0.12.1", "sp-api", "sp-consensus", @@ -10891,7 +10960,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#46bdb43422007619c4efa4b03e32ad7b2f9b282e" dependencies = [ "async-trait", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-api", "sp-application-crypto", @@ -10910,7 +10979,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#46bdb4342200 dependencies = [ "async-trait", "merlin", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "serde", "sp-api", @@ -10932,7 +11001,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#46bdb43422007619c4efa4b03e32ad7b2f9b282e" dependencies = [ "lazy_static", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "serde", "sp-api", @@ -10952,7 +11021,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#46bdb4342200 dependencies = [ "finality-grandpa", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "serde", "sp-api", @@ -10968,7 +11037,7 @@ name = "sp-consensus-slots" version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#46bdb43422007619c4efa4b03e32ad7b2f9b282e" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "serde", "sp-std", @@ -10980,7 +11049,7 @@ name = "sp-consensus-vrf" version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#46bdb43422007619c4efa4b03e32ad7b2f9b282e" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "schnorrkel", "sp-core", @@ -11008,7 +11077,7 @@ dependencies = [ "libsecp256k1", "log", "merlin", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "parking_lot 0.12.1", "primitive-types", "rand 0.8.5", @@ -11081,7 +11150,7 @@ version = "0.13.0" source = "git+https://github.com/paritytech/substrate?branch=master#46bdb43422007619c4efa4b03e32ad7b2f9b282e" dependencies = [ "environmental", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "sp-std", "sp-storage", ] @@ -11093,7 +11162,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#46bdb4342200 dependencies = [ "async-trait", "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-core", "sp-runtime", @@ -11112,7 +11181,7 @@ dependencies = [ "futures", "libsecp256k1", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "secp256k1", "sp-core", "sp-externalities", @@ -11144,7 +11213,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#46bdb4342200 dependencies = [ "futures", "merlin", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "parking_lot 0.12.1", "schnorrkel", "serde", @@ -11169,7 +11238,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#46bdb4342200 dependencies = [ "ckb-merkle-mountain-range", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "serde", "sp-api", @@ -11185,7 +11254,7 @@ name = "sp-npos-elections" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#46bdb43422007619c4efa4b03e32ad7b2f9b282e" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "serde", "sp-arithmetic", @@ -11233,7 +11302,7 @@ dependencies = [ "hash256-std-hasher", "impl-trait-for-tuples", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "paste", "rand 0.8.5", "scale-info", @@ -11253,7 +11322,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#46bdb4342200 dependencies = [ "bytes", "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "primitive-types", "sp-externalities", "sp-runtime-interface-proc-macro", @@ -11281,7 +11350,7 @@ name = "sp-session" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#46bdb43422007619c4efa4b03e32ad7b2f9b282e" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-api", "sp-core", @@ -11295,7 +11364,7 @@ name = "sp-staking" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#46bdb43422007619c4efa4b03e32ad7b2f9b282e" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-core", "sp-runtime", @@ -11309,7 +11378,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#46bdb4342200 dependencies = [ "hash-db", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "parking_lot 0.12.1", "rand 0.8.5", "smallvec", @@ -11333,7 +11402,7 @@ version = "7.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#46bdb43422007619c4efa4b03e32ad7b2f9b282e" dependencies = [ "impl-serde", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "ref-cast", "serde", "sp-debug-derive", @@ -11348,7 +11417,7 @@ dependencies = [ "async-trait", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "sp-inherents", "sp-runtime", "sp-std", @@ -11360,7 +11429,7 @@ name = "sp-tracing" version = "6.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#46bdb43422007619c4efa4b03e32ad7b2f9b282e" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.4.0", "sp-std", "tracing", "tracing-core", @@ -11383,7 +11452,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#46bdb4342200 dependencies = [ "async-trait", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "sp-core", "sp-inherents", @@ -11403,7 +11472,7 @@ dependencies = [ "lazy_static", "memory-db", "nohash-hasher", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "parking_lot 0.12.1", "scale-info", "schnellru", @@ -11421,7 +11490,7 @@ version = "5.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#46bdb43422007619c4efa4b03e32ad7b2f9b282e" dependencies = [ "impl-serde", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "parity-wasm", "scale-info", "serde", @@ -11437,7 +11506,7 @@ name = "sp-version-proc-macro" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#46bdb43422007619c4efa4b03e32ad7b2f9b282e" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.4.0", "proc-macro2", "quote", "syn 1.0.109", @@ -11451,7 +11520,7 @@ dependencies = [ "anyhow", "impl-trait-for-tuples", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "sp-std", "wasmi", "wasmtime", @@ -11462,7 +11531,7 @@ name = "sp-weights" version = "4.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#46bdb43422007619c4efa4b03e32ad7b2f9b282e" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "serde", "smallvec", @@ -11538,7 +11607,7 @@ dependencies = [ "pallet-election-provider-multi-phase", "pallet-staking", "pallet-transaction-payment", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "paste", "polkadot-core-primitives", "polkadot-runtime", @@ -11704,7 +11773,7 @@ dependencies = [ "futures", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "sc-rpc-api", "sc-transaction-pool-api", "sp-api", @@ -11746,7 +11815,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#46bdb4342200 dependencies = [ "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "sc-client-api", "sc-rpc-api", "scale-info", @@ -11766,7 +11835,7 @@ dependencies = [ "array-bytes", "async-trait", "futures", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "sc-client-api", "sc-client-db", "sc-consensus", @@ -11970,7 +12039,7 @@ name = "test-parachain-adder" version = "0.9.39" dependencies = [ "dlmalloc", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-parachain", "sp-io", "sp-std", @@ -11986,7 +12055,7 @@ dependencies = [ "futures", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-cli", "polkadot-node-core-pvf", "polkadot-node-primitives", @@ -12017,7 +12086,7 @@ version = "0.9.39" dependencies = [ "dlmalloc", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-parachain", "sp-io", "sp-std", @@ -12033,7 +12102,7 @@ dependencies = [ "futures", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-cli", "polkadot-node-core-pvf", "polkadot-node-primitives", @@ -12055,7 +12124,7 @@ dependencies = [ name = "test-parachains" version = "0.9.39" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.4.0", "sp-core", "test-parachain-adder", "test-parachain-halt", @@ -12645,7 +12714,7 @@ dependencies = [ "frame-try-runtime", "hex", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "sc-cli", "sc-executor", "sc-service", @@ -13581,7 +13650,7 @@ dependencies = [ name = "westend-runtime" version = "0.9.39" dependencies = [ - "bitvec", + "bitvec 1.0.1", "frame-benchmarking", "frame-election-provider-support", "frame-executive", @@ -13635,7 +13704,7 @@ dependencies = [ "pallet-vesting", "pallet-xcm", "pallet-xcm-benchmarks", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-parachain", "polkadot-primitives", "polkadot-runtime-common", @@ -13915,6 +13984,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "wyz" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" + [[package]] name = "wyz" version = "0.5.1" @@ -13993,7 +14068,7 @@ dependencies = [ "hex-literal", "impl-trait-for-tuples", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "scale-info", "serde", "sp-io", @@ -14013,7 +14088,7 @@ dependencies = [ "pallet-balances", "pallet-transaction-payment", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-parachain", "polkadot-runtime-parachains", "primitive-types", @@ -14035,7 +14110,7 @@ dependencies = [ "frame-support", "impl-trait-for-tuples", "log", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "sp-arithmetic", "sp-core", "sp-io", @@ -14080,7 +14155,7 @@ name = "xcm-simulator" version = "0.9.39" dependencies = [ "frame-support", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "paste", "polkadot-core-primitives", "polkadot-parachain", @@ -14101,7 +14176,7 @@ dependencies = [ "pallet-balances", "pallet-uniques", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-core-primitives", "polkadot-parachain", "polkadot-runtime-parachains", @@ -14127,7 +14202,7 @@ dependencies = [ "honggfuzz", "pallet-balances", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "polkadot-core-primitives", "polkadot-parachain", "polkadot-runtime-parachains", @@ -14192,7 +14267,7 @@ version = "0.9.39" dependencies = [ "futures-util", "lazy_static", - "parity-scale-codec", + "parity-scale-codec 3.4.0", "reqwest", "serde", "serde_json", diff --git a/node/core/backing/src/tests/mod.rs b/node/core/backing/src/tests/mod.rs index d68bc1d1f802..ca4e53e4487c 100644 --- a/node/core/backing/src/tests/mod.rs +++ b/node/core/backing/src/tests/mod.rs @@ -243,7 +243,7 @@ async fn test_startup(virtual_overseer: &mut VirtualOverseer, test_state: &TestS assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx)) ) if parent == test_state.relay_parent => { tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); } diff --git a/node/core/backing/src/tests/prospective_parachains.rs b/node/core/backing/src/tests/prospective_parachains.rs index 4dc346f880ee..b0feb63af1ea 100644 --- a/node/core/backing/src/tests/prospective_parachains.rs +++ b/node/core/backing/src/tests/prospective_parachains.rs @@ -24,8 +24,8 @@ use polkadot_primitives::{vstaging as vstaging_primitives, BlockNumber, Header, use super::*; -const ASYNC_BACKING_PARAMETERS: vstaging_primitives::AsyncBackingParameters = - vstaging_primitives::AsyncBackingParameters { max_candidate_depth: 4, allowed_ancestry_len: 3 }; +const ASYNC_BACKING_PARAMETERS: vstaging_primitives::AsyncBackingParams = + vstaging_primitives::AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; struct TestLeaf { activated: ActivatedLeaf, @@ -56,7 +56,7 @@ async fn activate_leaf( assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx)) ) if parent == leaf_hash => { tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); } diff --git a/node/core/prospective-parachains/src/tests.rs b/node/core/prospective-parachains/src/tests.rs index cd1f2d494cc4..8b38064ad818 100644 --- a/node/core/prospective-parachains/src/tests.rs +++ b/node/core/prospective-parachains/src/tests.rs @@ -26,7 +26,7 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_types::{jaeger, ActivatedLeaf, LeafStatus}; use polkadot_primitives::{ - vstaging::{AsyncBackingParameters, BackingState, Constraints, InboundHrmpLimitations}, + vstaging::{AsyncBackingParams, BackingState, Constraints, InboundHrmpLimitations}, CommittedCandidateReceipt, HeadData, Header, PersistedValidationData, ScheduledCore, ValidationCodeHash, }; @@ -34,8 +34,8 @@ use polkadot_primitives_test_helpers::make_candidate; use std::sync::Arc; const ALLOWED_ANCESTRY_LEN: u32 = 3; -const ASYNC_BACKING_PARAMETERS: AsyncBackingParameters = - AsyncBackingParameters { max_candidate_depth: 4, allowed_ancestry_len: ALLOWED_ANCESTRY_LEN }; +const ASYNC_BACKING_PARAMETERS: AsyncBackingParams = + AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: ALLOWED_ANCESTRY_LEN }; const ASYNC_BACKING_DISABLED_ERROR: RuntimeApiError = RuntimeApiError::NotSupported { runtime_api_name: "test-runtime" }; @@ -214,7 +214,7 @@ async fn handle_leaf_activation( assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx)) ) if parent == *hash => { tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); } @@ -480,7 +480,7 @@ fn should_do_no_work_if_async_backing_disabled_for_leaf() { assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx)) ) if parent == hash => { tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); } diff --git a/node/core/runtime-api/src/cache.rs b/node/core/runtime-api/src/cache.rs index 048a7a049ad1..2e92e360cdac 100644 --- a/node/core/runtime-api/src/cache.rs +++ b/node/core/runtime-api/src/cache.rs @@ -66,7 +66,7 @@ pub(crate) struct RequestResultCache { disputes: LruCache)>>, staging_para_backing_state: LruCache<(Hash, ParaId), Option>, - staging_async_backing_parameters: LruCache, + staging_async_backing_params: LruCache, } impl Default for RequestResultCache { @@ -96,7 +96,7 @@ impl Default for RequestResultCache { disputes: LruCache::new(DEFAULT_CACHE_CAP), staging_para_backing_state: LruCache::new(DEFAULT_CACHE_CAP), - staging_async_backing_parameters: LruCache::new(DEFAULT_CACHE_CAP), + staging_async_backing_params: LruCache::new(DEFAULT_CACHE_CAP), } } } @@ -408,19 +408,19 @@ impl RequestResultCache { self.staging_para_backing_state.put(key, value); } - pub(crate) fn staging_async_backing_parameters( + pub(crate) fn staging_async_backing_params( &mut self, key: &Hash, - ) -> Option<&vstaging_primitives::AsyncBackingParameters> { - self.staging_async_backing_parameters.get(key) + ) -> Option<&vstaging_primitives::AsyncBackingParams> { + self.staging_async_backing_params.get(key) } - pub(crate) fn cache_staging_async_backing_parameters( + pub(crate) fn cache_staging_async_backing_params( &mut self, key: Hash, - value: vstaging_primitives::AsyncBackingParameters, + value: vstaging_primitives::AsyncBackingParams, ) { - self.staging_async_backing_parameters.put(key, value); + self.staging_async_backing_params.put(key, value); } } @@ -461,5 +461,5 @@ pub(crate) enum RequestResult { Disputes(Hash, Vec<(SessionIndex, CandidateHash, DisputeState)>), StagingParaBackingState(Hash, ParaId, Option), - StagingAsyncBackingParameters(Hash, vstaging_primitives::AsyncBackingParameters), + StagingAsyncBackingParams(Hash, vstaging_primitives::AsyncBackingParams), } diff --git a/node/core/runtime-api/src/lib.rs b/node/core/runtime-api/src/lib.rs index 614193d170c9..d0d3b9fd699c 100644 --- a/node/core/runtime-api/src/lib.rs +++ b/node/core/runtime-api/src/lib.rs @@ -161,8 +161,8 @@ where StagingParaBackingState(relay_parent, para_id, constraints) => self .requests_cache .cache_staging_para_backing_state((relay_parent, para_id), constraints), - StagingAsyncBackingParameters(relay_parent, params) => - self.requests_cache.cache_staging_async_backing_parameters(relay_parent, params), + StagingAsyncBackingParams(relay_parent, params) => + self.requests_cache.cache_staging_async_backing_params(relay_parent, params), } } @@ -280,9 +280,9 @@ where Request::StagingParaBackingState(para, sender) => query!(staging_para_backing_state(para), sender) .map(|sender| Request::StagingParaBackingState(para, sender)), - Request::StagingAsyncBackingParameters(sender) => - query!(staging_async_backing_parameters(), sender) - .map(|sender| Request::StagingAsyncBackingParameters(sender)), + Request::StagingAsyncBackingParams(sender) => + query!(staging_async_backing_params(), sender) + .map(|sender| Request::StagingAsyncBackingParams(sender)), } } @@ -510,10 +510,10 @@ where sender ) }, - Request::StagingAsyncBackingParameters(sender) => { + Request::StagingAsyncBackingParams(sender) => { query!( - StagingAsyncBackingParameters, - staging_async_backing_parameters(), + StagingAsyncBackingParams, + staging_async_backing_params(), ver = Request::STAGING_BACKING_STATE, sender ) diff --git a/node/network/collator-protocol/src/collator_side/tests/mod.rs b/node/network/collator-protocol/src/collator_side/tests/mod.rs index 489421fde5c5..68b966b51f51 100644 --- a/node/network/collator-protocol/src/collator_side/tests/mod.rs +++ b/node/network/collator-protocol/src/collator_side/tests/mod.rs @@ -195,7 +195,7 @@ impl TestState { overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( relay_parent, - RuntimeApiRequest::StagingAsyncBackingParameters(tx) + RuntimeApiRequest::StagingAsyncBackingParams(tx) )) => { assert_eq!(relay_parent, self.relay_parent); tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); @@ -326,7 +326,7 @@ async fn setup_system(virtual_overseer: &mut VirtualOverseer, test_state: &TestS overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( relay_parent, - RuntimeApiRequest::StagingAsyncBackingParameters(tx) + RuntimeApiRequest::StagingAsyncBackingParams(tx) )) => { assert_eq!(relay_parent, test_state.relay_parent); tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); diff --git a/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs b/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs index 321d25ced9b7..7278947c3734 100644 --- a/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs +++ b/node/network/collator-protocol/src/collator_side/tests/prospective_parachains.rs @@ -21,8 +21,8 @@ use super::*; use polkadot_node_subsystem::messages::{ChainApiMessage, ProspectiveParachainsMessage}; use polkadot_primitives::{vstaging as vstaging_primitives, Header, OccupiedCore}; -const ASYNC_BACKING_PARAMETERS: vstaging_primitives::AsyncBackingParameters = - vstaging_primitives::AsyncBackingParameters { max_candidate_depth: 4, allowed_ancestry_len: 3 }; +const ASYNC_BACKING_PARAMETERS: vstaging_primitives::AsyncBackingParams = + vstaging_primitives::AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; fn get_parent_hash(hash: Hash) -> Hash { Hash::from_low_u64_be(hash.to_low_u64_be() + 1) @@ -52,7 +52,7 @@ async fn update_view( overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( parent, - RuntimeApiRequest::StagingAsyncBackingParameters(tx), + RuntimeApiRequest::StagingAsyncBackingParams(tx), )) => { tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); (parent, new_view.get(&parent).copied().expect("Unknown parent requested")) diff --git a/node/network/collator-protocol/src/validator_side/tests/mod.rs b/node/network/collator-protocol/src/validator_side/tests/mod.rs index 0b3d1a2cf38c..9f855cce4e23 100644 --- a/node/network/collator-protocol/src/validator_side/tests/mod.rs +++ b/node/network/collator-protocol/src/validator_side/tests/mod.rs @@ -433,15 +433,12 @@ async fn advertise_collation( .await; } -async fn assert_async_backing_parameters_request( - virtual_overseer: &mut VirtualOverseer, - hash: Hash, -) { +async fn assert_async_backing_params_request(virtual_overseer: &mut VirtualOverseer, hash: Hash) { assert_matches!( overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( relay_parent, - RuntimeApiRequest::StagingAsyncBackingParameters(tx) + RuntimeApiRequest::StagingAsyncBackingParams(tx) )) => { assert_eq!(relay_parent, hash); tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)).unwrap(); @@ -468,8 +465,7 @@ fn act_on_advertisement() { ) .await; - assert_async_backing_parameters_request(&mut virtual_overseer, test_state.relay_parent) - .await; + assert_async_backing_params_request(&mut virtual_overseer, test_state.relay_parent).await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; let peer_b = PeerId::random(); @@ -518,8 +514,7 @@ fn act_on_advertisement_vstaging() { ) .await; - assert_async_backing_parameters_request(&mut virtual_overseer, test_state.relay_parent) - .await; + assert_async_backing_params_request(&mut virtual_overseer, test_state.relay_parent).await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; let peer_b = PeerId::random(); @@ -572,8 +567,7 @@ fn collator_reporting_works() { ) .await; - assert_async_backing_parameters_request(&mut virtual_overseer, test_state.relay_parent) - .await; + assert_async_backing_params_request(&mut virtual_overseer, test_state.relay_parent).await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; @@ -691,7 +685,7 @@ fn fetch_one_collation_at_a_time() { // Iter over view since the order may change due to sorted invariant. for hash in our_view.iter() { - assert_async_backing_parameters_request(&mut virtual_overseer, *hash).await; + assert_async_backing_params_request(&mut virtual_overseer, *hash).await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; } @@ -789,7 +783,7 @@ fn fetches_next_collation() { .await; for hash in our_view.iter() { - assert_async_backing_parameters_request(&mut virtual_overseer, *hash).await; + assert_async_backing_params_request(&mut virtual_overseer, *hash).await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; } @@ -908,8 +902,7 @@ fn reject_connection_to_next_group() { ) .await; - assert_async_backing_parameters_request(&mut virtual_overseer, test_state.relay_parent) - .await; + assert_async_backing_params_request(&mut virtual_overseer, test_state.relay_parent).await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; let peer_b = PeerId::random(); @@ -961,7 +954,7 @@ fn fetch_next_collation_on_invalid_collation() { .await; for hash in our_view.iter() { - assert_async_backing_parameters_request(&mut virtual_overseer, *hash).await; + assert_async_backing_params_request(&mut virtual_overseer, *hash).await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; } @@ -1072,7 +1065,7 @@ fn inactive_disconnected() { ) .await; - assert_async_backing_parameters_request(&mut virtual_overseer, hash_a).await; + assert_async_backing_params_request(&mut virtual_overseer, hash_a).await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; let peer_b = PeerId::random(); @@ -1127,7 +1120,7 @@ fn activity_extends_life() { .await; for hash in our_view.iter() { - assert_async_backing_parameters_request(&mut virtual_overseer, *hash).await; + assert_async_backing_params_request(&mut virtual_overseer, *hash).await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; } @@ -1201,8 +1194,7 @@ fn disconnect_if_no_declare() { ) .await; - assert_async_backing_parameters_request(&mut virtual_overseer, test_state.relay_parent) - .await; + assert_async_backing_params_request(&mut virtual_overseer, test_state.relay_parent).await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; let peer_b = PeerId::random(); @@ -1241,8 +1233,7 @@ fn disconnect_if_wrong_declare() { ) .await; - assert_async_backing_parameters_request(&mut virtual_overseer, test_state.relay_parent) - .await; + assert_async_backing_params_request(&mut virtual_overseer, test_state.relay_parent).await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; let peer_b = PeerId::random(); @@ -1305,8 +1296,7 @@ fn view_change_clears_old_collators() { ) .await; - assert_async_backing_parameters_request(&mut virtual_overseer, test_state.relay_parent) - .await; + assert_async_backing_params_request(&mut virtual_overseer, test_state.relay_parent).await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; let peer_b = PeerId::random(); @@ -1331,7 +1321,7 @@ fn view_change_clears_old_collators() { .await; test_state.group_rotation_info = test_state.group_rotation_info.bump_rotation(); - assert_async_backing_parameters_request(&mut virtual_overseer, hash_b).await; + assert_async_backing_params_request(&mut virtual_overseer, hash_b).await; respond_to_core_info_queries(&mut virtual_overseer, &test_state).await; assert_collator_disconnect(&mut virtual_overseer, peer_b.clone()).await; diff --git a/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs b/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs index e3705e5e8720..f601ab20a285 100644 --- a/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs +++ b/node/network/collator-protocol/src/validator_side/tests/prospective_parachains.rs @@ -24,8 +24,8 @@ use polkadot_primitives::{ Header, SigningContext, ValidatorId, }; -const ASYNC_BACKING_PARAMETERS: vstaging_primitives::AsyncBackingParameters = - vstaging_primitives::AsyncBackingParameters { max_candidate_depth: 4, allowed_ancestry_len: 3 }; +const ASYNC_BACKING_PARAMETERS: vstaging_primitives::AsyncBackingParams = + vstaging_primitives::AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; fn get_parent_hash(hash: Hash) -> Hash { Hash::from_low_u64_be(hash.to_low_u64_be() + 1) @@ -97,7 +97,7 @@ async fn update_view( overseer_recv(virtual_overseer).await, AllMessages::RuntimeApi(RuntimeApiMessage::Request( parent, - RuntimeApiRequest::StagingAsyncBackingParameters(tx), + RuntimeApiRequest::StagingAsyncBackingParams(tx), )) => { tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); (parent, new_view.get(&parent).copied().expect("Unknown parent requested")) diff --git a/node/network/statement-distribution/src/legacy_v1/tests.rs b/node/network/statement-distribution/src/legacy_v1/tests.rs index a6d83fe80aea..e32e356d65c5 100644 --- a/node/network/statement-distribution/src/legacy_v1/tests.rs +++ b/node/network/statement-distribution/src/legacy_v1/tests.rs @@ -38,7 +38,7 @@ use polkadot_node_primitives::{ use polkadot_node_subsystem::{ jaeger, messages::{network_bridge_event, AllMessages, RuntimeApiMessage, RuntimeApiRequest}, - ActivatedLeaf, LeafStatus, + ActivatedLeaf, LeafStatus, RuntimeApiError, }; use polkadot_node_subsystem_test_helpers::mock::make_ferdie_keystore; use polkadot_primitives::{ @@ -56,6 +56,10 @@ use std::{iter::FromIterator as _, sync::Arc, time::Duration}; // Some deterministic genesis hash for protocol names const GENESIS_HASH: Hash = Hash::repeat_byte(0xff); + +const ASYNC_BACKING_DISABLED_ERROR: RuntimeApiError = + RuntimeApiError::NotSupported { runtime_api_name: "test-runtime" }; + fn dummy_pvd() -> PersistedValidationData { PersistedValidationData { parent_head: HeadData(vec![7, 8, 9]), @@ -791,11 +795,11 @@ fn receiving_from_one_sends_to_another_and_to_candidate_backing() { assert_matches!( handle.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) + RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx)) ) if r == hash_a => { - let _ = tx.send(Err(polkadot_node_subsystem::RuntimeApiError::NotSupported{runtime_api_name: "async_backing_parameters"})); + let _ = tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)); } ); @@ -1024,11 +1028,11 @@ fn receiving_large_statement_from_one_sends_to_another_and_to_candidate_backing( assert_matches!( handle.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) + RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx)) ) if r == hash_a => { - let _ = tx.send(Err(polkadot_node_subsystem::RuntimeApiError::NotSupported{runtime_api_name: "async_backing_parameters"})); + let _ = tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)); } ); @@ -1563,11 +1567,11 @@ fn share_prioritizes_backing_group() { assert_matches!( handle.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) + RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx)) ) if r == hash_a => { - let _ = tx.send(Err(polkadot_node_subsystem::RuntimeApiError::NotSupported{runtime_api_name: "async_backing_parameters"})); + let _ = tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)); } ); @@ -1878,11 +1882,11 @@ fn peer_cant_flood_with_large_statements() { assert_matches!( handle.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) + RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx)) ) if r == hash_a => { - let _ = tx.send(Err(polkadot_node_subsystem::RuntimeApiError::NotSupported{runtime_api_name: "async_backing_parameters"})); + let _ = tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)); } ); @@ -2096,11 +2100,11 @@ fn handle_multiple_seconded_statements() { assert_matches!( handle.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) + RuntimeApiMessage::Request(r, RuntimeApiRequest::StagingAsyncBackingParams(tx)) ) if r == relay_parent_hash => { - let _ = tx.send(Err(polkadot_node_subsystem::RuntimeApiError::NotSupported{runtime_api_name: "async_backing_parameters"})); + let _ = tx.send(Err(ASYNC_BACKING_DISABLED_ERROR)); } ); diff --git a/node/network/statement-distribution/src/vstaging/tests/cluster.rs b/node/network/statement-distribution/src/vstaging/tests/cluster.rs index 88fa13d98dc3..905e2813139c 100644 --- a/node/network/statement-distribution/src/vstaging/tests/cluster.rs +++ b/node/network/statement-distribution/src/vstaging/tests/cluster.rs @@ -1055,7 +1055,7 @@ fn ensure_seconding_limit_is_respected() { validator_count: 20, group_size: 4, local_validator: true, - async_backing_params: Some(AsyncBackingParameters { + async_backing_params: Some(AsyncBackingParams { max_candidate_depth: 1, allowed_ancestry_len: 3, }), diff --git a/node/network/statement-distribution/src/vstaging/tests/mod.rs b/node/network/statement-distribution/src/vstaging/tests/mod.rs index a08c0497c492..c28ea743c9ba 100644 --- a/node/network/statement-distribution/src/vstaging/tests/mod.rs +++ b/node/network/statement-distribution/src/vstaging/tests/mod.rs @@ -32,7 +32,7 @@ use polkadot_node_subsystem::messages::{ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_types::{jaeger, ActivatedLeaf, LeafStatus}; use polkadot_primitives::vstaging::{ - AssignmentPair, AsyncBackingParameters, BlockNumber, CommittedCandidateReceipt, CoreState, + AssignmentPair, AsyncBackingParams, BlockNumber, CommittedCandidateReceipt, CoreState, GroupRotationInfo, HeadData, Header, IndexedVec, PersistedValidationData, ScheduledCore, SessionIndex, SessionInfo, ValidatorPair, }; @@ -54,8 +54,8 @@ mod requests; type VirtualOverseer = test_helpers::TestSubsystemContextHandle; -const DEFAULT_ASYNC_BACKING_PARAMETERS: AsyncBackingParameters = - AsyncBackingParameters { max_candidate_depth: 4, allowed_ancestry_len: 3 }; +const DEFAULT_ASYNC_BACKING_PARAMETERS: AsyncBackingParams = + AsyncBackingParams { max_candidate_depth: 4, allowed_ancestry_len: 3 }; // Some deterministic genesis hash for req/res protocol names const GENESIS_HASH: Hash = Hash::repeat_byte(0xff); @@ -66,7 +66,7 @@ struct TestConfig { group_size: usize, // whether the local node should be a validator local_validator: bool, - async_backing_params: Option, + async_backing_params: Option, } #[derive(Debug, Clone)] @@ -383,7 +383,7 @@ async fn handle_leaf_activation( assert_matches!( virtual_overseer.recv().await, AllMessages::RuntimeApi( - RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParameters(tx)) + RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx)) ) if parent == *hash => { tx.send(Ok(test_state.config.async_backing_params.unwrap_or(DEFAULT_ASYNC_BACKING_PARAMETERS))).unwrap(); } diff --git a/node/network/statement-distribution/src/vstaging/tests/requests.rs b/node/network/statement-distribution/src/vstaging/tests/requests.rs index 313f2831a992..602b3d45b4a9 100644 --- a/node/network/statement-distribution/src/vstaging/tests/requests.rs +++ b/node/network/statement-distribution/src/vstaging/tests/requests.rs @@ -192,7 +192,7 @@ fn peer_reported_for_providing_statements_meant_to_be_masked_out() { validator_count, group_size, local_validator: true, - async_backing_params: Some(AsyncBackingParameters { + async_backing_params: Some(AsyncBackingParams { // Makes `seconding_limit: 2` (easier to hit the limit). max_candidate_depth: 1, allowed_ancestry_len: 3, diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index f25001796700..29fd971bfc1a 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -645,7 +645,7 @@ pub enum RuntimeApiRequest { /// Get candidate's acceptance limitations for asynchronous backing for a relay parent. /// /// If it's not supported by the Runtime, the async backing is said to be disabled. - StagingAsyncBackingParameters(RuntimeApiSender), + StagingAsyncBackingParams(RuntimeApiSender), } impl RuntimeApiRequest { diff --git a/node/subsystem-types/src/runtime_client.rs b/node/subsystem-types/src/runtime_client.rs index 985d3d8d4e07..7b369f1d8fe2 100644 --- a/node/subsystem-types/src/runtime_client.rs +++ b/node/subsystem-types/src/runtime_client.rs @@ -206,10 +206,10 @@ pub trait RuntimeApiSubsystemClient { // === Asynchronous backing API === /// Returns candidate's acceptance limitations for asynchronous backing for a relay parent. - async fn staging_async_backing_parameters( + async fn staging_async_backing_params( &self, at: Hash, - ) -> Result; + ) -> Result; /// Get the execution environment parameter set by parent hash, if stored async fn session_executor_params( @@ -400,10 +400,10 @@ where } /// Returns candidate's acceptance limitations for asynchronous backing for a relay parent. - async fn staging_async_backing_parameters( + async fn staging_async_backing_params( &self, at: Hash, - ) -> Result { - self.runtime_api().staging_async_backing_parameters(at) + ) -> Result { + self.runtime_api().staging_async_backing_params(at) } } diff --git a/node/subsystem-util/src/lib.rs b/node/subsystem-util/src/lib.rs index 25b4e9d7b368..e1e42e854b66 100644 --- a/node/subsystem-util/src/lib.rs +++ b/node/subsystem-util/src/lib.rs @@ -221,7 +221,7 @@ specialize_requests! { fn request_validation_code_hash(para_id: ParaId, assumption: OccupiedCoreAssumption) -> Option; ValidationCodeHash; fn request_on_chain_votes() -> Option; FetchOnChainVotes; - fn request_staging_async_backing_parameters() -> vstaging_primitives::AsyncBackingParameters; StagingAsyncBackingParameters; + fn request_staging_async_backing_params() -> vstaging_primitives::AsyncBackingParams; StagingAsyncBackingParams; fn request_session_executor_params(session_index: SessionIndex) -> Option; SessionExecutorParams; } diff --git a/node/subsystem-util/src/runtime/mod.rs b/node/subsystem-util/src/runtime/mod.rs index 5cfe4ed8586d..50191a24a15d 100644 --- a/node/subsystem-util/src/runtime/mod.rs +++ b/node/subsystem-util/src/runtime/mod.rs @@ -37,9 +37,8 @@ use polkadot_primitives::{ use crate::{ request_availability_cores, request_candidate_events, request_on_chain_votes, - request_session_index_for_child, request_session_info, - request_staging_async_backing_parameters, request_validation_code_by_hash, - request_validator_groups, + request_session_index_for_child, request_session_info, request_staging_async_backing_params, + request_validation_code_by_hash, request_validator_groups, }; /// Errors that can happen on runtime fetches. @@ -387,7 +386,7 @@ where Sender: SubsystemSender, { let result = - recv_runtime(request_staging_async_backing_parameters(relay_parent, sender).await).await; + recv_runtime(request_staging_async_backing_params(relay_parent, sender).await).await; if let Err(error::Error::RuntimeRequest(RuntimeApiError::NotSupported { runtime_api_name })) = &result @@ -401,10 +400,8 @@ where Ok(ProspectiveParachainsMode::Disabled) } else { - let vstaging_primitives::AsyncBackingParameters { - max_candidate_depth, - allowed_ancestry_len, - } = result?; + let vstaging_primitives::AsyncBackingParams { max_candidate_depth, allowed_ancestry_len } = + result?; Ok(ProspectiveParachainsMode::Enabled { max_candidate_depth: max_candidate_depth as _, allowed_ancestry_len: allowed_ancestry_len as _, diff --git a/primitives/src/runtime_api.rs b/primitives/src/runtime_api.rs index c5afeee6f61d..0b7de44ea043 100644 --- a/primitives/src/runtime_api.rs +++ b/primitives/src/runtime_api.rs @@ -228,6 +228,6 @@ sp_api::decl_runtime_apis! { /// Returns candidate's acceptance limitations for asynchronous backing for a relay parent. #[api_version(99)] - fn staging_async_backing_parameters() -> vstaging::AsyncBackingParameters; + fn staging_async_backing_params() -> vstaging::AsyncBackingParams; } } diff --git a/runtime/parachains/src/paras_inherent/mod.rs b/runtime/parachains/src/paras_inherent/mod.rs index 147db1ba871a..bcb6c0b7ebc7 100644 --- a/runtime/parachains/src/paras_inherent/mod.rs +++ b/runtime/parachains/src/paras_inherent/mod.rs @@ -343,7 +343,7 @@ impl Pallet { parent_hash, parent_storage_root, parent_number, - config.async_backing_parameters.allowed_ancestry_len, + config.async_backing_params.allowed_ancestry_len, ); }); } @@ -611,7 +611,7 @@ impl Pallet { parent_hash, parent_storage_root, parent_number, - config.async_backing_parameters.allowed_ancestry_len, + config.async_backing_params.allowed_ancestry_len, ); tracker diff --git a/runtime/parachains/src/runtime_api_impl/vstaging.rs b/runtime/parachains/src/runtime_api_impl/vstaging.rs index de82044aee8f..3e3623975797 100644 --- a/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -19,7 +19,7 @@ use crate::{configuration, dmp, hrmp, initializer, paras, shared, ump}; use primitives::{ vstaging::{ - AsyncBackingParameters, BackingState, CandidatePendingAvailability, Constraints, + AsyncBackingParams, BackingState, CandidatePendingAvailability, Constraints, InboundHrmpLimitations, OutboundHrmpChannelLimitations, }, Id as ParaId, @@ -38,10 +38,7 @@ pub fn backing_state( // Thus, minimum relay parent is ensured to have asynchronous backing enabled. let now = >::block_number(); let min_relay_parent_number = >::allowed_relay_parents() - .hypothetical_earliest_block_number( - now, - config.async_backing_parameters.allowed_ancestry_len, - ); + .hypothetical_earliest_block_number(now, config.async_backing_params.allowed_ancestry_len); let required_parent = >::para_head(para_id)?; let validation_code_hash = >::current_code_hash(para_id)?; @@ -115,7 +112,7 @@ pub fn backing_state( Some(BackingState { constraints, pending_availability }) } -/// Implementation for `StagingAsyncBackingParameters` function from the runtime API -pub fn async_backing_parameters() -> AsyncBackingParameters { - >::config().async_backing_parameters +/// Implementation for `StagingAsyncBackingParams` function from the runtime API +pub fn async_backing_params() -> AsyncBackingParams { + >::config().async_backing_params } diff --git a/runtime/rococo/src/lib.rs b/runtime/rococo/src/lib.rs index fd906cd9ece5..9a3f173387e8 100644 --- a/runtime/rococo/src/lib.rs +++ b/runtime/rococo/src/lib.rs @@ -1769,8 +1769,8 @@ sp_api::impl_runtime_apis! { runtime_parachains::runtime_api_impl::vstaging::backing_state::(para_id) } - fn staging_async_backing_parameters() -> primitives::vstaging::AsyncBackingParameters { - runtime_parachains::runtime_api_impl::vstaging::async_backing_parameters::() + fn staging_async_backing_params() -> primitives::vstaging::AsyncBackingParams { + runtime_parachains::runtime_api_impl::vstaging::async_backing_params::() } } diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs index 108694f7f3e1..64c001a2f0f7 100644 --- a/runtime/westend/src/lib.rs +++ b/runtime/westend/src/lib.rs @@ -1471,8 +1471,8 @@ sp_api::impl_runtime_apis! { runtime_parachains::runtime_api_impl::vstaging::backing_state::(para_id) } - fn staging_async_backing_parameters() -> primitives::vstaging::AsyncBackingParameters { - runtime_parachains::runtime_api_impl::vstaging::async_backing_parameters::() + fn staging_async_backing_params() -> primitives::vstaging::AsyncBackingParams { + runtime_parachains::runtime_api_impl::vstaging::async_backing_params::() } } From a9a06aa53cedfdde420707ddee9d35e9516e0707 Mon Sep 17 00:00:00 2001 From: Chris Sosnin <48099298+slumber@users.noreply.github.com> Date: Mon, 10 Apr 2023 17:39:03 +0300 Subject: [PATCH 48/76] scheduler: update storage migration (#6963) * update scheduler migration * Adjust weight to account for storage read --- runtime/kusama/src/lib.rs | 2 + runtime/parachains/src/scheduler.rs | 12 ++--- runtime/parachains/src/scheduler/migration.rs | 49 +++++++++++-------- runtime/polkadot/src/lib.rs | 2 + runtime/rococo/src/lib.rs | 6 ++- runtime/westend/src/lib.rs | 2 + 6 files changed, 43 insertions(+), 30 deletions(-) diff --git a/runtime/kusama/src/lib.rs b/runtime/kusama/src/lib.rs index e1e9c5dcfb64..139d3ed1f18e 100644 --- a/runtime/kusama/src/lib.rs +++ b/runtime/kusama/src/lib.rs @@ -1491,6 +1491,8 @@ pub type Migrations = ( // Unreleased - add new migrations here: pallet_nomination_pools::migration::v5::MigrateToV5, parachains_configuration::migration::v5::MigrateToV5, + /* Asynchronous backing mirgration */ + parachains_scheduler::migration::v1::MigrateToV1, ); /// Unchecked extrinsic type as expected by this runtime. diff --git a/runtime/parachains/src/scheduler.rs b/runtime/parachains/src/scheduler.rs index 1b6679dcd784..f2c2eaee82e5 100644 --- a/runtime/parachains/src/scheduler.rs +++ b/runtime/parachains/src/scheduler.rs @@ -36,7 +36,6 @@ //! over time. use frame_support::pallet_prelude::*; -use frame_system::pallet_prelude::*; use primitives::{ CollatorId, CoreIndex, CoreOccupied, GroupIndex, GroupRotationInfo, Id as ParaId, ParathreadClaim, ParathreadEntry, ScheduledCore, ValidatorIndex, @@ -52,7 +51,9 @@ pub use pallet::*; #[cfg(test)] mod tests; -mod migration; +pub mod migration; + +const LOG_TARGET: &str = "runtime::scheduler"; /// A queued parathread entry, pre-assigned to a core. #[derive(Encode, Decode, TypeInfo)] @@ -166,13 +167,6 @@ pub mod pallet { #[pallet::config] pub trait Config: frame_system::Config + configuration::Config + paras::Config {} - #[pallet::hooks] - impl Hooks> for Pallet { - fn on_runtime_upgrade() -> Weight { - migration::on_runtime_upgrade::() - } - } - /// All the validator groups. One for each core. Indices are into `ActiveValidators` - not the /// broader set of Polkadot validators, but instead just the subset used for parachains during /// this session. diff --git a/runtime/parachains/src/scheduler/migration.rs b/runtime/parachains/src/scheduler/migration.rs index 3960dd238637..88569dfec11b 100644 --- a/runtime/parachains/src/scheduler/migration.rs +++ b/runtime/parachains/src/scheduler/migration.rs @@ -16,29 +16,14 @@ //! A module that is responsible for migration of storage. -use crate::scheduler::{self, AssignmentKind, Config, Pallet, Scheduled}; -use frame_support::{pallet_prelude::*, traits::StorageVersion, weights::Weight}; -use parity_scale_codec::{Decode, Encode}; +use frame_support::traits::StorageVersion; /// The current storage version. pub const STORAGE_VERSION: StorageVersion = StorageVersion::new(1); -/// Call this during the next runtime upgrade for this module. -pub fn on_runtime_upgrade() -> Weight { - let mut weight: Weight = Weight::zero(); - - if StorageVersion::get::>() == 0 { - weight = weight - .saturating_add(v1::migrate::()) - .saturating_add(T::DbWeight::get().writes(1)); - StorageVersion::new(1).put::>(); - } - - weight -} - mod v0 { - use super::*; + use crate::scheduler::{self, AssignmentKind}; + use parity_scale_codec::{Decode, Encode}; use primitives::{CoreIndex, GroupIndex, Id as ParaId}; #[derive(Encode, Decode)] @@ -58,11 +43,35 @@ mod v0 { /// V1: Group index is dropped from the core assignment, it's explicitly computed during /// candidates processing. -mod v1 { +pub mod v1 { use super::*; + use crate::scheduler::{self, Config, Pallet, Scheduled}; + use frame_support::{pallet_prelude::*, traits::OnRuntimeUpgrade, weights::Weight}; use sp_std::vec::Vec; - pub fn migrate() -> Weight { + pub struct MigrateToV1(sp_std::marker::PhantomData); + impl OnRuntimeUpgrade for MigrateToV1 { + fn on_runtime_upgrade() -> Weight { + let mut weight: Weight = T::DbWeight::get().reads(1); + + if StorageVersion::get::>() < STORAGE_VERSION { + log::info!(target: scheduler::LOG_TARGET, "Migrating scheduler storage to v1"); + weight = weight + .saturating_add(migrate::()) + .saturating_add(T::DbWeight::get().writes(1)); + STORAGE_VERSION.put::>(); + } else { + log::info!( + target: scheduler::LOG_TARGET, + "Scheduler storage up to date - no need for migration" + ); + } + + weight + } + } + + fn migrate() -> Weight { let _ = Scheduled::::translate(|scheduled: Option>| { scheduled.map(|scheduled| { scheduled.into_iter().map(|old| scheduler::CoreAssignment::from(old)).collect() diff --git a/runtime/polkadot/src/lib.rs b/runtime/polkadot/src/lib.rs index 1710c23c7450..12a23d265f2b 100644 --- a/runtime/polkadot/src/lib.rs +++ b/runtime/polkadot/src/lib.rs @@ -1427,6 +1427,8 @@ pub type Migrations = ( // Unreleased - add new migrations here: pallet_nomination_pools::migration::v5::MigrateToV5, parachains_configuration::migration::v5::MigrateToV5, + /* Asynchronous backing mirgration */ + parachains_scheduler::migration::v1::MigrateToV1, ); /// Unchecked extrinsic type as expected by this runtime. diff --git a/runtime/rococo/src/lib.rs b/runtime/rococo/src/lib.rs index 9a3f173387e8..02653570cd58 100644 --- a/runtime/rococo/src/lib.rs +++ b/runtime/rococo/src/lib.rs @@ -1495,7 +1495,11 @@ pub type UncheckedExtrinsic = /// /// This contains the combined migrations of the last 10 releases. It allows to skip runtime /// upgrades in case governance decides to do so. -pub type Migrations = parachains_configuration::migration::v5::MigrateToV5; +pub type Migrations = ( + parachains_configuration::migration::v5::MigrateToV5, + /* Asynchronous backing mirgration */ + parachains_scheduler::migration::v1::MigrateToV1, +); /// Executive: handles dispatch to the various modules. pub type Executive = frame_executive::Executive< diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs index 64c001a2f0f7..e5513cb3a295 100644 --- a/runtime/westend/src/lib.rs +++ b/runtime/westend/src/lib.rs @@ -1224,6 +1224,8 @@ pub type Migrations = ( // Unreleased - add new migrations here: pallet_nomination_pools::migration::v5::MigrateToV5, parachains_configuration::migration::v5::MigrateToV5, + /* Asynchronous backing mirgration */ + parachains_scheduler::migration::v1::MigrateToV1, ); /// Unchecked extrinsic type as expected by this runtime. From 664d6b2093e5a20229a25c1683e9154fe1e24ce5 Mon Sep 17 00:00:00 2001 From: Bradley Olson <34992650+BradleyOlson64@users.noreply.github.com> Date: Mon, 10 Apr 2023 12:04:55 -0700 Subject: [PATCH 49/76] Statement Distribution Guide Edits (#7025) * Statement distribution guide edits * Addressed Marcin's comments --- node/core/prospective-parachains/Cargo.toml | 4 -- .../src/inclusion_emulator/staging.rs | 3 - .../node/backing/statement-distribution.md | 59 +++++++++++-------- 3 files changed, 34 insertions(+), 32 deletions(-) diff --git a/node/core/prospective-parachains/Cargo.toml b/node/core/prospective-parachains/Cargo.toml index 7a149e268ef4..b088202d3736 100644 --- a/node/core/prospective-parachains/Cargo.toml +++ b/node/core/prospective-parachains/Cargo.toml @@ -27,7 +27,3 @@ sc-keystore = { git = "https://github.com/paritytech/substrate", branch = "maste sp-application-crypto = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } sp-keystore = { git = "https://github.com/paritytech/substrate", branch = "master" } - -[features] -# If not enabled, the dispute coordinator will do nothing. -disputes = [] diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 66868f16925d..62a0078ba9e7 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -13,9 +13,6 @@ //! The implementation of the inclusion emulator for the 'staging' runtime version. //! -//! This is currently `v1` (`v2`?), but will evolve to `v3`. -// TODO https://github.com/paritytech/polkadot/issues/4803 -//! //! A set of utilities for node-side code to emulate the logic the runtime uses for checking //! parachain blocks in order to build prospective parachains that are produced ahead of the //! relay chain. These utilities allow the node-side to predict, with high accuracy, what diff --git a/roadmap/implementers-guide/src/node/backing/statement-distribution.md b/roadmap/implementers-guide/src/node/backing/statement-distribution.md index 9259acf7387d..5f779baed41d 100644 --- a/roadmap/implementers-guide/src/node/backing/statement-distribution.md +++ b/roadmap/implementers-guide/src/node/backing/statement-distribution.md @@ -1,6 +1,6 @@ # Statement Distribution -This subsystem is responsible for distributing signed statements that we have generated and forwarding them. This subsystem sends received Candidate Receipts and statements to the [Candidate Backing subsystem](candidate-backing.md) to handle the validator's statements. On receiving `StatementDistributionMessage::Share`, this distributes the message across the network to ensure a fast backing process and getting all statements quickly for distribution. +This subsystem is responsible for distributing signed statements that we have generated and forwarding statements generated by our peers. Received candidate receipts and statements are passed to the [Candidate Backing subsystem](candidate-backing.md) to handle producing local statements. On receiving `StatementDistributionMessage::Share`, this subsystem distributes the message across the network with redundency to ensure a fast backing process. ## Overview @@ -170,20 +170,23 @@ backing subsystem itself. - Handled by `handle_backed_candidate_message` - `StatementDistributionMessage::NetworkBridgeUpdate` - Handled by `handle_network_update` - - v1 compatibility - - `Statement` - - Notification of a signed statement. - - Handled by `handle_incoming_statement` - - `BackedCandidateManifest` - - Notification of a backed candidate being known by the sending node. - - For the candidate being requested by the receiving node if needed. - - Announcement - - Handled by `handle_incoming_manifest` - - `BackedCandidateKnown` - - Notification of a backed candidate being known by the sending node. - - For informing a receiving node which already has the candidate. - - Acknowledgement. - - Handled by `handle_incoming_acknowledgement` + +#### Network bridge events + +- v1 compatibility +- `Statement` + - Notification of a signed statement. + - Handled by `handle_incoming_statement` +- `BackedCandidateManifest` + - Notification of a backed candidate being known by the sending node. + - For the candidate being requested by the receiving node if needed. + - Announcement + - Handled by `handle_incoming_manifest` +- `BackedCandidateKnown` + - Notification of a backed candidate being known by the sending node. + - For informing a receiving node which already has the candidate. + - Acknowledgement. + - Handled by `handle_incoming_acknowledgement` ### Outgoing @@ -261,7 +264,7 @@ A manifest is a message about a known backed candidate, along with a description of the statements backing it. It can be one of two kinds: - `Full`: Contains information about the candidate and should be sent to peers - who may not have the candidate yet. + who may not have the candidate yet. This is also called an `Announcement`. - `Acknowledgement`: Omits information implicit in the candidate, and should be sent to peers which are guaranteed to have the candidate already. @@ -340,12 +343,13 @@ validators in the group, and pending statements. For the full protocol, see ## Grid Module The grid module provides distribution of backed candidates and late statements -outside the group. For the full protocol, see the "Protocol" section. +outside the backing group. For the full protocol, see the "Protocol" section. ### Grid Topology -For distributing outside our cluster we use a 2D grid topology. This limits the -amount of peers we send messages to, and handles view updates. +For distributing outside our cluster (aka backing group) we use a 2D grid +topology. This limits the amount of peers we send messages to, and handles +view updates. The basic operation of the grid topology is that: @@ -393,7 +397,10 @@ some hypothetical scenarios: candidates from group A from 9. 10 would propagate them to the nodes in {1, 4, 7} that are not in A. - **Scenario 2:** 6 is in group A instead of 9, and 7 is not in group A. 10 can - receive from 7 or 9. It would not propagate any further. + receive group A messages from 7 or 9. 10 will try to relay these messages, but + 7 and 9 together should have already propagated the message to all x/y + peers of 10. If so, then 10 will just receive acknowledgements in reply rather + than requests. - **Scenario 3:** 10 itself is in group A. 10 would not receive candidates from this group from any other nodes through the grid. It would itself send such candidates to all its neighbors that are not in A. @@ -430,10 +437,11 @@ description of the flow. See module-docs for full details. ## Glossary -- **Acknowledgement:** A notification that is sent to a validator that already - has the candidate, to inform them that the sending node knows the candidate. -- **Announcement:** A notification of a backed candidate being known by the - sending node. Is a full manifest and initiates manifest exchange. +- **Acknowledgement:** A partial manifest sent to a validator that already has the + candidate to inform them that the sending node also knows the candidate. + Concludes a manifest exchange. +- **Announcement:** A full manifest indicating that a backed candidate is known by + the sending node. Initiates a manifest exchange. - **Attestation:** See "Statement". - **Backable vs. Backed:** - Note that we sometimes use "backed" to refer to candidates that are @@ -446,7 +454,8 @@ description of the flow. See module-docs for full details. - **Fragment tree:** A parachain fragment not referenced by the relay-chain. It is a tree of prospective parachain blocks. - **Manifest:** A message about a known backed candidate, along with a - description of the statements backing it. See "Manifests" section. + description of the statements backing it. There are two kinds of manifest, + `Acknowledgement` and `Announcement`. See "Manifests" section. - **Peer:** Another validator that a validator is connected to. - **Request/response:** A protocol used to lazily request and receive heavy candidate data when needed. From ed9420b9d04130374e6b3bb06e9e36ca5a719b89 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Wed, 12 Apr 2023 14:40:19 +0200 Subject: [PATCH 50/76] Add attested candidate request retry timeouts (#6833) Co-authored-by: Chris Sosnin <48099298+slumber@users.noreply.github.com> Co-authored-by: asynchronous rob Co-authored-by: Robert Habermeier Co-authored-by: Chris Sosnin Fix async backing statement distribution tests (#6621) Resolve some todos in async backing statement-distribution branch (#6482) Fix clippy errors in statement distribution branch (#6720) --- .../network/statement-distribution/Cargo.toml | 1 + .../network/statement-distribution/src/lib.rs | 18 +- .../src/vstaging/mod.rs | 51 ++- .../src/vstaging/requests.rs | 106 ++++++- .../src/vstaging/tests/mod.rs | 9 + .../src/vstaging/tests/requests.rs | 299 ++++++++++++++++++ 6 files changed, 460 insertions(+), 24 deletions(-) diff --git a/node/network/statement-distribution/Cargo.toml b/node/network/statement-distribution/Cargo.toml index 7a54e46a89be..9f5e65fb2285 100644 --- a/node/network/statement-distribution/Cargo.toml +++ b/node/network/statement-distribution/Cargo.toml @@ -7,6 +7,7 @@ edition.workspace = true [dependencies] futures = "0.3.21" +futures-timer = "3" gum = { package = "tracing-gum", path = "../../gum" } polkadot-primitives = { path = "../../../primitives" } sp-staking = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } diff --git a/node/network/statement-distribution/src/lib.rs b/node/network/statement-distribution/src/lib.rs index 87e2402dfeac..2383042e092a 100644 --- a/node/network/statement-distribution/src/lib.rs +++ b/node/network/statement-distribution/src/lib.rs @@ -103,6 +103,9 @@ enum MuxedMessage { Responder(Option), /// Messages from answered requests. Response(vstaging::UnhandledResponse), + /// Message that a request is ready to be retried. This just acts as a signal that we should + /// dispatch all pending requests again. + RetryRequest(()), } #[overseer::contextbounds(StatementDistribution, prefix = self::overseer)] @@ -114,19 +117,22 @@ impl MuxedMessage { from_v1_responder: &mut mpsc::Receiver, from_responder: &mut mpsc::Receiver, ) -> MuxedMessage { + let (request_manager, response_manager) = state.request_and_response_managers(); // We are only fusing here to make `select` happy, in reality we will quit if one of those // streams end: let from_orchestra = ctx.recv().fuse(); let from_v1_requester = from_v1_requester.next(); let from_v1_responder = from_v1_responder.next(); let from_responder = from_responder.next(); - let receive_response = vstaging::receive_response(state).fuse(); + let receive_response = vstaging::receive_response(response_manager).fuse(); + let retry_request = vstaging::next_retry(request_manager).fuse(); futures::pin_mut!( from_orchestra, from_v1_requester, from_v1_responder, from_responder, - receive_response + receive_response, + retry_request, ); futures::select! { msg = from_orchestra => MuxedMessage::Subsystem(msg.map_err(FatalError::SubsystemReceive)), @@ -134,6 +140,7 @@ impl MuxedMessage { msg = from_v1_responder => MuxedMessage::V1Responder(msg), msg = from_responder => MuxedMessage::Responder(msg), msg = receive_response => MuxedMessage::Response(msg), + msg = retry_request => MuxedMessage::RetryRequest(msg), } } } @@ -190,6 +197,7 @@ impl StatementDistributionSubsystem { .map_err(FatalError::SpawnTask)?; loop { + // Wait for the next message. let message = MuxedMessage::receive( &mut ctx, &mut state, @@ -198,6 +206,7 @@ impl StatementDistributionSubsystem { &mut res_receiver, ) .await; + match message { MuxedMessage::Subsystem(result) => { let result = self @@ -244,6 +253,11 @@ impl StatementDistributionSubsystem { MuxedMessage::Response(result) => { vstaging::handle_response(&mut ctx, &mut state, result).await; }, + MuxedMessage::RetryRequest(()) => { + // A pending request is ready to retry. This is only a signal to call + // `dispatch_requests` again. + () + }, }; vstaging::dispatch_requests(&mut ctx, &mut state).await; diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 96565d064876..711ad0df33b2 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -58,9 +58,12 @@ use futures::{ SinkExt, StreamExt, }; -use std::collections::{ - hash_map::{Entry, HashMap}, - HashSet, +use std::{ + collections::{ + hash_map::{Entry, HashMap}, + HashSet, + }, + time::{Duration, Instant}, }; use crate::{ @@ -74,7 +77,7 @@ use groups::Groups; use requests::{CandidateIdentifier, RequestProperties}; use statement_store::{StatementOrigin, StatementStore}; -pub use requests::{RequestManager, UnhandledResponse}; +pub use requests::{RequestManager, ResponseManager, UnhandledResponse}; mod candidates; mod cluster; @@ -121,6 +124,9 @@ const BENEFIT_VALID_STATEMENT: Rep = Rep::BenefitMajor("Peer provided a valid st const BENEFIT_VALID_STATEMENT_FIRST: Rep = Rep::BenefitMajorFirst("Peer was the first to provide a given valid statement"); +/// The amount of time to wait before retrying when the node sends a request and it is dropped. +pub(crate) const REQUEST_RETRY_DELAY: Duration = Duration::from_secs(1); + struct PerRelayParentState { local_validator: Option, statement_store: StatementStore, @@ -200,6 +206,7 @@ pub(crate) struct State { keystore: KeystorePtr, authorities: HashMap, request_manager: RequestManager, + response_manager: ResponseManager, } impl State { @@ -214,8 +221,15 @@ impl State { keystore, authorities: HashMap::new(), request_manager: RequestManager::new(), + response_manager: ResponseManager::new(), } } + + pub(crate) fn request_and_response_managers( + &mut self, + ) -> (&mut RequestManager, &mut ResponseManager) { + (&mut self.request_manager, &mut self.response_manager) + } } // For the provided validator index, if there is a connected peer controlling the given authority @@ -2312,6 +2326,10 @@ async fn apply_post_confirmation( /// Dispatch pending requests for candidate data & statements. #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut State) { + if !state.request_manager.has_pending_requests() { + return + } + let peers = &state.peers; let peer_advertised = |identifier: &CandidateIdentifier, peer: &_| { let peer_data = peers.get(peer)?; @@ -2373,7 +2391,11 @@ pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut St }) }; - while let Some(request) = state.request_manager.next_request(request_props, peer_advertised) { + while let Some(request) = state.request_manager.next_request( + &mut state.response_manager, + request_props, + peer_advertised, + ) { // Peer is supposedly connected. ctx.send_message(NetworkBridgeTxMessage::SendRequests( vec![Requests::AttestedCandidateVStaging(request)], @@ -2386,13 +2408,24 @@ pub(crate) async fn dispatch_requests(ctx: &mut Context, state: &mut St /// Wait on the next incoming response. If there are no requests pending, this /// future never resolves. It is the responsibility of the user of this API /// to interrupt the future. -pub(crate) async fn receive_response(state: &mut State) -> UnhandledResponse { - match state.request_manager.await_incoming().await { +pub(crate) async fn receive_response(response_manager: &mut ResponseManager) -> UnhandledResponse { + match response_manager.incoming().await { Some(r) => r, None => futures::future::pending().await, } } +/// Wait on the next soonest retry on a pending request. If there are no retries pending, this +/// future never resolves. Note that this only signals that a request is ready to retry; the user of +/// this API must call `dispatch_requests`. +pub(crate) async fn next_retry(request_manager: &mut RequestManager) { + match request_manager.next_retry_time() { + Some(instant) => + futures_timer::Delay::new(instant.saturating_duration_since(Instant::now())).await, + None => futures::future::pending().await, + } +} + /// Handles an incoming response. This does the actual work of validating the response, /// importing statements, sending acknowledgements, etc. #[overseer::contextbounds(StatementDistribution, prefix=self::overseer)] @@ -2614,7 +2647,7 @@ pub(crate) fn answer_request(state: &mut State, message: ResponderMessage) { } /// Messages coming from the background respond task. -pub struct ResponderMessage { +pub(crate) struct ResponderMessage { request: IncomingRequest, sent_feedback: oneshot::Sender<()>, } @@ -2622,7 +2655,7 @@ pub struct ResponderMessage { /// A fetching task, taking care of fetching candidates via request/response. /// /// Runs in a background task and feeds request to [`answer_request`] through [`MuxedMessage`]. -pub async fn respond_task( +pub(crate) async fn respond_task( mut receiver: IncomingRequestReceiver, mut sender: mpsc::Sender, ) { diff --git a/node/network/statement-distribution/src/vstaging/requests.rs b/node/network/statement-distribution/src/vstaging/requests.rs index 507bbbb0ef18..355c2b0a85b3 100644 --- a/node/network/statement-distribution/src/vstaging/requests.rs +++ b/node/network/statement-distribution/src/vstaging/requests.rs @@ -29,6 +29,7 @@ use super::{ BENEFIT_VALID_RESPONSE, BENEFIT_VALID_STATEMENT, COST_IMPROPERLY_DECODED_RESPONSE, COST_INVALID_RESPONSE, COST_INVALID_SIGNATURE, COST_UNREQUESTED_RESPONSE_STATEMENT, + REQUEST_RETRY_DELAY, }; use crate::LOG_TARGET; @@ -49,9 +50,12 @@ use polkadot_primitives::vstaging::{ use futures::{future::BoxFuture, prelude::*, stream::FuturesUnordered}; -use std::collections::{ - hash_map::{Entry as HEntry, HashMap}, - HashSet, VecDeque, +use std::{ + collections::{ + hash_map::{Entry as HEntry, HashMap}, + HashSet, VecDeque, + }, + time::Instant, }; /// An identifier for a candidate. @@ -84,7 +88,27 @@ struct TaggedResponse { pub struct RequestedCandidate { priority: Priority, known_by: VecDeque, + /// Has the request been sent out and a response not yet received? in_flight: bool, + /// The timestamp for the next time we should retry, if the response failed. + next_retry_time: Option, +} + +impl RequestedCandidate { + fn is_pending(&self) -> bool { + if self.in_flight { + return false + } + + if let Some(next_retry_time) = self.next_retry_time { + let can_retry = Instant::now() >= next_retry_time; + if !can_retry { + return false + } + } + + true + } } #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] @@ -130,7 +154,6 @@ impl<'a> Entry<'a> { /// A manager for outgoing requests. pub struct RequestManager { - pending_responses: FuturesUnordered>, requests: HashMap, // sorted by priority. by_priority: Vec<(Priority, CandidateIdentifier)>, @@ -142,7 +165,6 @@ impl RequestManager { /// Create a new [`RequestManager`]. pub fn new() -> Self { RequestManager { - pending_responses: FuturesUnordered::new(), requests: HashMap::new(), by_priority: Vec::new(), unique_identifiers: HashMap::new(), @@ -166,6 +188,7 @@ impl RequestManager { priority: Priority { attempts: 0, origin: Origin::Unspecified }, known_by: VecDeque::new(), in_flight: false, + next_retry_time: None, }), true, ), @@ -241,6 +264,30 @@ impl RequestManager { } } + /// Returns true if there are pending requests that are dispatchable. + pub fn has_pending_requests(&self) -> bool { + for (_id, entry) in &self.requests { + if entry.is_pending() { + return true + } + } + + false + } + + /// Returns an instant at which the next request to be retried will be ready. + pub fn next_retry_time(&mut self) -> Option { + let mut next = None; + for (_id, request) in &self.requests { + if let Some(next_retry_time) = request.next_retry_time { + if next.map_or(true, |next| next_retry_time < next) { + next = Some(next_retry_time); + } + } + } + next + } + /// Yields the next request to dispatch, if there is any. /// /// This function accepts two closures as an argument. @@ -254,10 +301,11 @@ impl RequestManager { /// threshold and returns `None` if the peer is no longer connected. pub fn next_request( &mut self, + response_manager: &mut ResponseManager, request_props: impl Fn(&CandidateIdentifier) -> Option, peer_advertised: impl Fn(&CandidateIdentifier, &PeerId) -> Option, ) -> Option> { - if self.pending_responses.len() >= MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS as usize { + if response_manager.len() >= MAX_PARALLEL_ATTESTED_CANDIDATE_REQUESTS as usize { return None } @@ -282,7 +330,7 @@ impl RequestManager { Some(e) => e, }; - if entry.in_flight { + if !entry.is_pending() { continue } @@ -313,7 +361,7 @@ impl RequestManager { ); let stored_id = id.clone(); - self.pending_responses.push(Box::pin(async move { + response_manager.push(Box::pin(async move { TaggedResponse { identifier: stored_id, requested_peer: target, @@ -343,15 +391,34 @@ impl RequestManager { res } +} + +/// A manager for pending responses. +pub struct ResponseManager { + pending_responses: FuturesUnordered>, +} + +impl ResponseManager { + pub fn new() -> Self { + Self { pending_responses: FuturesUnordered::new() } + } /// Await the next incoming response to a sent request, or immediately /// return `None` if there are no pending responses. - pub async fn await_incoming(&mut self) -> Option { + pub async fn incoming(&mut self) -> Option { self.pending_responses .next() .await .map(|response| UnhandledResponse { response }) } + + fn len(&self) -> usize { + self.pending_responses.len() + } + + fn push(&mut self, response: BoxFuture<'static, TaggedResponse>) { + self.pending_responses.push(response); + } } /// Properties used in target selection and validation of a request. @@ -484,6 +551,8 @@ impl UnhandledResponse { Err(_) => unreachable!("requested candidates always have a priority entry; qed"), }; + // Set the next retry time before clearing the `in_flight` flag. + entry.next_retry_time = Some(Instant::now() + REQUEST_RETRY_DELAY); entry.in_flight = false; entry.priority.attempts += 1; @@ -884,6 +953,7 @@ mod tests { #[test] fn handle_outdated_response_due_to_requests_for_different_identifiers() { let mut request_manager = RequestManager::new(); + let mut response_manager = ResponseManager::new(); let relay_parent = Hash::from_low_u64_le(1); let mut candidate_receipt = test_helpers::dummy_committed_candidate_receipt(relay_parent); @@ -924,9 +994,13 @@ mod tests { let peer_advertised = |_identifier: &CandidateIdentifier, _peer: &_| { Some(StatementFilter::full(group_size)) }; - let outgoing = request_manager.next_request(request_props, peer_advertised).unwrap(); + let outgoing = request_manager + .next_request(&mut response_manager, request_props, peer_advertised) + .unwrap(); assert_eq!(outgoing.payload.candidate_hash, candidate); - let outgoing = request_manager.next_request(request_props, peer_advertised).unwrap(); + let outgoing = request_manager + .next_request(&mut response_manager, request_props, peer_advertised) + .unwrap(); assert_eq!(outgoing.payload.candidate_hash, candidate); } @@ -1009,6 +1083,7 @@ mod tests { #[test] fn handle_outdated_response_due_to_garbage_collection() { let mut request_manager = RequestManager::new(); + let mut response_manager = ResponseManager::new(); let relay_parent = Hash::from_low_u64_le(1); let mut candidate_receipt = test_helpers::dummy_committed_candidate_receipt(relay_parent); @@ -1038,7 +1113,9 @@ mod tests { { let request_props = |_identifier: &CandidateIdentifier| Some((&request_properties).clone()); - let outgoing = request_manager.next_request(request_props, peer_advertised).unwrap(); + let outgoing = request_manager + .next_request(&mut response_manager, request_props, peer_advertised) + .unwrap(); assert_eq!(outgoing.payload.candidate_hash, candidate); } @@ -1083,6 +1160,7 @@ mod tests { #[test] fn should_clean_up_after_successful_requests() { let mut request_manager = RequestManager::new(); + let mut response_manager = ResponseManager::new(); let relay_parent = Hash::from_low_u64_le(1); let mut candidate_receipt = test_helpers::dummy_committed_candidate_receipt(relay_parent); @@ -1115,7 +1193,9 @@ mod tests { { let request_props = |_identifier: &CandidateIdentifier| Some((&request_properties).clone()); - let outgoing = request_manager.next_request(request_props, peer_advertised).unwrap(); + let outgoing = request_manager + .next_request(&mut response_manager, request_props, peer_advertised) + .unwrap(); assert_eq!(outgoing.payload.candidate_hash, candidate); } diff --git a/node/network/statement-distribution/src/vstaging/tests/mod.rs b/node/network/statement-distribution/src/vstaging/tests/mod.rs index c28ea743c9ba..ae20718a4df9 100644 --- a/node/network/statement-distribution/src/vstaging/tests/mod.rs +++ b/node/network/statement-distribution/src/vstaging/tests/mod.rs @@ -31,6 +31,7 @@ use polkadot_node_subsystem::messages::{ }; use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_types::{jaeger, ActivatedLeaf, LeafStatus}; +use polkadot_node_subsystem_util::TimeoutExt; use polkadot_primitives::vstaging::{ AssignmentPair, AsyncBackingParams, BlockNumber, CommittedCandidateReceipt, CoreState, GroupRotationInfo, HeadData, Header, IndexedVec, PersistedValidationData, ScheduledCore, @@ -584,6 +585,14 @@ async fn send_new_topology(virtual_overseer: &mut VirtualOverseer, topology: New .await; } +async fn overseer_recv_with_timeout( + overseer: &mut VirtualOverseer, + timeout: Duration, +) -> Option { + gum::trace!("waiting for message..."); + overseer.recv().timeout(timeout).await +} + fn next_group_index( group_index: GroupIndex, validator_count: usize, diff --git a/node/network/statement-distribution/src/vstaging/tests/requests.rs b/node/network/statement-distribution/src/vstaging/tests/requests.rs index 602b3d45b4a9..6259a8fcc688 100644 --- a/node/network/statement-distribution/src/vstaging/tests/requests.rs +++ b/node/network/statement-distribution/src/vstaging/tests/requests.rs @@ -573,6 +573,7 @@ fn peer_reported_for_not_enough_statements() { ), ) .await; + let c_seconded = state .sign_statement( v_c, @@ -1570,3 +1571,301 @@ fn local_node_respects_statement_mask() { overseer }); } + +#[test] +fn should_delay_before_retrying_dropped_requests() { + let validator_count = 6; + let group_size = 3; + let config = TestConfig { + validator_count, + group_size, + local_validator: true, + async_backing_params: None, + }; + + let relay_parent = Hash::repeat_byte(1); + let peer_c = PeerId::random(); + let peer_d = PeerId::random(); + let peer_e = PeerId::random(); + + test_harness(config, |state, mut overseer| async move { + let local_validator = state.local.clone().unwrap(); + + let other_group = + next_group_index(local_validator.group_index, validator_count, group_size); + let other_para = ParaId::from(other_group.0); + + let test_leaf = state.make_dummy_leaf(relay_parent); + + let (candidate_1, pvd_1) = make_candidate( + relay_parent, + 1, + other_para, + test_leaf.para_data(other_para).head_data.clone(), + vec![4, 5, 6].into(), + Hash::repeat_byte(42).into(), + ); + let (candidate_2, pvd_2) = make_candidate( + relay_parent, + 1, + other_para, + test_leaf.para_data(other_para).head_data.clone(), + vec![7, 8, 9].into(), + Hash::repeat_byte(43).into(), + ); + let candidate_hash_1 = candidate_1.hash(); + let candidate_hash_2 = candidate_2.hash(); + + let target_group_validators = state.group_validators(other_group, true); + let v_c = target_group_validators[0]; + let v_d = target_group_validators[1]; + let v_e = target_group_validators[2]; + + // Connect C, D, E + { + connect_peer( + &mut overseer, + peer_c.clone(), + Some(vec![state.discovery_id(v_c)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_d.clone(), + Some(vec![state.discovery_id(v_d)].into_iter().collect()), + ) + .await; + + connect_peer( + &mut overseer, + peer_e.clone(), + Some(vec![state.discovery_id(v_e)].into_iter().collect()), + ) + .await; + + send_peer_view_change(&mut overseer, peer_c.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_d.clone(), view![relay_parent]).await; + send_peer_view_change(&mut overseer, peer_e.clone(), view![relay_parent]).await; + } + + activate_leaf(&mut overseer, &test_leaf, &state, true).await; + + answer_expected_hypothetical_depth_request( + &mut overseer, + vec![], + Some(relay_parent), + false, + ) + .await; + + // Send gossip topology. + send_new_topology(&mut overseer, state.make_dummy_topology()).await; + + // `1` indicates statements NOT to request. + let mask = StatementFilter::blank(group_size); + + // Send a request about a candidate. + { + let manifest = BackedCandidateManifest { + relay_parent, + candidate_hash: candidate_hash_1, + group_index: other_group, + para_id: other_para, + parent_head_data_hash: pvd_1.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], + }, + }; + + // Peer sends an announcement. + send_peer_message( + &mut overseer, + peer_c.clone(), + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + manifest.clone(), + ), + ) + .await; + + // We send a request to peer. Drop the request without sending a response. + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::SendRequests(mut requests, IfDisconnected::ImmediateError)) => { + assert_eq!(requests.len(), 1); + assert_matches!( + requests.pop().unwrap(), + Requests::AttestedCandidateVStaging(outgoing) => { + assert_eq!(outgoing.peer, Recipient::Peer(peer_c)); + assert_eq!(outgoing.payload.candidate_hash, candidate_hash_1); + assert_eq!(outgoing.payload.mask, mask); + } + ); + } + ); + + assert_matches!( + overseer_recv_with_timeout(&mut overseer, Duration::from_millis(100)).await, + None + ); + } + + // We still send requests about different candidates as per usual. + { + let manifest = BackedCandidateManifest { + relay_parent, + candidate_hash: candidate_hash_2, + group_index: other_group, + para_id: other_para, + parent_head_data_hash: pvd_2.parent_head.hash(), + statement_knowledge: StatementFilter { + seconded_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 1], + validated_in_group: bitvec::bitvec![u8, Lsb0; 1, 1, 0], + }, + }; + + // Peer sends an announcement. + send_peer_message( + &mut overseer, + peer_c.clone(), + protocol_vstaging::StatementDistributionMessage::BackedCandidateManifest( + manifest.clone(), + ), + ) + .await; + + let statements = vec![ + state + .sign_statement( + v_c, + CompactStatement::Seconded(candidate_hash_2), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + state + .sign_statement( + v_d, + CompactStatement::Seconded(candidate_hash_2), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + state + .sign_statement( + v_e, + CompactStatement::Seconded(candidate_hash_2), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + ]; + + // Don't drop this request. + handle_sent_request( + &mut overseer, + peer_c, + candidate_hash_2, + mask.clone(), + candidate_2.clone(), + pvd_2.clone(), + statements.clone(), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_STATEMENT + ); + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_STATEMENT + ); + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_STATEMENT + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_RESPONSE + ); + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + } + + // Sleep for the given amount of time. This should reset the delay for the first candidate. + futures_timer::Delay::new(REQUEST_RETRY_DELAY).await; + + // We re-try the first request. + { + let statements = vec![ + state + .sign_statement( + v_c, + CompactStatement::Seconded(candidate_hash_1), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + state + .sign_statement( + v_d, + CompactStatement::Seconded(candidate_hash_1), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + state + .sign_statement( + v_e, + CompactStatement::Seconded(candidate_hash_1), + &SigningContext { parent_hash: relay_parent, session_index: 1 }, + ) + .as_unchecked() + .clone(), + ]; + handle_sent_request( + &mut overseer, + peer_c, + candidate_hash_1, + mask, + candidate_1.clone(), + pvd_1.clone(), + statements.clone(), + ) + .await; + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_STATEMENT + ); + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_STATEMENT + ); + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_STATEMENT + ); + + assert_matches!( + overseer.recv().await, + AllMessages::NetworkBridgeTx(NetworkBridgeTxMessage::ReportPeer(p, r)) + if p == peer_c && r == BENEFIT_VALID_RESPONSE + ); + + answer_expected_hypothetical_depth_request(&mut overseer, vec![], None, false).await; + } + + overseer + }); +} From 631b66d5daa642fad7ed0a9712194c5b85b96563 Mon Sep 17 00:00:00 2001 From: Marcin S Date: Thu, 13 Apr 2023 20:29:40 +0200 Subject: [PATCH 51/76] Async backing: add Prospective Parachains impl guide (#6933) Co-authored-by: Bradley Olson <34992650+BradleyOlson64@users.noreply.github.com> --- .../src/fragment_tree.rs | 65 +++++-- node/core/prospective-parachains/src/lib.rs | 3 +- .../src/inclusion_emulator/staging.rs | 4 + roadmap/implementers-guide/src/SUMMARY.md | 1 + .../node/backing/prospective-parachains.md | 158 ++++++++++++++++++ .../node/backing/statement-distribution.md | 19 ++- 6 files changed, 228 insertions(+), 22 deletions(-) create mode 100644 roadmap/implementers-guide/src/node/backing/prospective-parachains.md diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index cbed7cf3f9dc..943d3191909b 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -16,16 +16,33 @@ //! A tree utility for managing parachain fragments not referenced by the relay-chain. //! -//! This module exposes two main types: [`FragmentTree`] and [`CandidateStorage`] -//! which are meant to be used in close conjunction. Each tree is associated with a particular -//! relay-parent, and it's expected that higher-level code will have a tree for each -//! relay-chain block which might reasonably have blocks built upon it. +//! # Overview //! -//! Trees only store indices into the [`CandidateStorage`] and the storage is meant to -//! be pruned when trees are dropped by higher-level code. +//! This module exposes two main types: [`FragmentTree`] and [`CandidateStorage`] which are meant to +//! be used in close conjunction. Each fragment tree is associated with a particular relay-parent +//! and each node in the tree represents a candidate. Each parachain has a single candidate storage, +//! but can have multiple trees for each relay chain block in the view. //! -//! Each node in the tree represents a candidate. Nodes do not uniquely refer to a parachain -//! block for two reasons. +//! A tree has an associated [`Scope`] which defines limits on candidates within the tree. +//! Candidates themselves have their own [`Constraints`] which are either the constraints from the +//! scope, or, if there are previous nodes in the tree, a modified version of the previous +//! candidate's constraints. +//! +//! This module also makes use of types provided by the Inclusion Emulator module, such as +//! [`Fragment`] and [`Constraints`]. These perform the actual job of checking for validity of +//! prospective fragments. +//! +//! # Usage +//! +//! It's expected that higher-level code will have a tree for each relay-chain block which might +//! reasonably have blocks built upon it. +//! +//! Because a para only has a single candidate storage, trees only store indices into the storage. +//! The storage is meant to be pruned when trees are dropped by higher-level code. +//! +//! # Cycles +//! +//! Nodes do not uniquely refer to a parachain block for two reasons. //! 1. There's no requirement that head-data is unique //! for a parachain. Furthermore, a parachain is under no obligation to be acyclic, and this is mostly //! just because it's totally inefficient to enforce it. Practical use-cases are acyclic, but there is @@ -43,7 +60,21 @@ //! //! As an extreme example, a candidate which produces head-data which is the same as its parent //! can correspond to multiple nodes within the same [`FragmentTree`]. Such cycles are bounded -//! by the maximum depth allowed by the tree. +//! by the maximum depth allowed by the tree. An example with `max_depth: 4`: +//! +//! ```text +//! committed head +//! | +//! depth 0: head_a +//! | +//! depth 1: head_b +//! | +//! depth 2: head_a +//! | +//! depth 3: head_b +//! | +//! depth 4: head_a +//! ``` //! //! As long as the [`CandidateStorage`] has bounded input on the number of candidates supplied, //! [`FragmentTree`] complexity is bounded. This means that higher-level code needs to be selective @@ -82,6 +113,8 @@ pub enum CandidateStorageInsertionError { CandidateAlreadyKnown(CandidateHash), } +/// Stores candidates and information about them such as their relay-parents and their backing +/// states. pub(crate) struct CandidateStorage { // Index from head data hash to candidate hashes with that head data as a parent. by_parent_head: HashMap>, @@ -421,8 +454,10 @@ impl<'a> HypotheticalCandidate<'a> { } } -/// This is a tree of candidates based on some underlying storage of candidates -/// and a scope. +/// This is a tree of candidates based on some underlying storage of candidates and a scope. +/// +/// All nodes in the tree must be either pending availability or within the scope. Within the scope +/// means it's built off of the relay-parent or an ancestor. pub(crate) struct FragmentTree { scope: Scope, @@ -436,8 +471,10 @@ pub(crate) struct FragmentTree { } impl FragmentTree { - /// Create a new [`FragmentTree`] with given scope and populated from the - /// storage. + /// Create a new [`FragmentTree`] with given scope and populated from the storage. + /// + /// Can be populated recursively (i.e. `populate` will pick up candidates that build on other + /// candidates). pub fn populate(scope: Scope, storage: &CandidateStorage) -> Self { gum::trace!( target: LOG_TARGET, @@ -529,6 +566,8 @@ impl FragmentTree { } /// Add a candidate and recursively populate from storage. + /// + /// Candidates can be added either as children of the root or children of other candidates. pub(crate) fn add_and_populate(&mut self, hash: CandidateHash, storage: &CandidateStorage) { let candidate_entry = match storage.get(&hash) { None => return, diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index 5ef35bcaa628..91b15e96136c 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -24,8 +24,7 @@ //! This is primarily an implementation of "Fragment Trees", as described in //! [`polkadot_node_subsystem_util::inclusion_emulator::staging`]. //! -//! This also handles concerns such as the relay-chain being forkful, -//! session changes, predicting validator group assignments. +//! This subsystem also handles concerns such as the relay-chain being forkful and session changes. use std::{ borrow::Cow, diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index 62a0078ba9e7..fcf9ccad78a0 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -13,6 +13,8 @@ //! The implementation of the inclusion emulator for the 'staging' runtime version. //! +//! # Overview +//! //! A set of utilities for node-side code to emulate the logic the runtime uses for checking //! parachain blocks in order to build prospective parachains that are produced ahead of the //! relay chain. These utilities allow the node-side to predict, with high accuracy, what @@ -23,6 +25,8 @@ //! a parachain block, anchored to the relay-chain at a particular relay-chain block, known as the //! relay-parent. //! +//! ## Fragment Validity +//! //! Every relay-parent is implicitly associated with a unique set of [`Constraints`] that describe //! the properties that must be true for a block to be included in a direct child of that block, //! assuming there is no intermediate parachain block pending availability. diff --git a/roadmap/implementers-guide/src/SUMMARY.md b/roadmap/implementers-guide/src/SUMMARY.md index 56f72f3039a4..ef94ef3659c6 100644 --- a/roadmap/implementers-guide/src/SUMMARY.md +++ b/roadmap/implementers-guide/src/SUMMARY.md @@ -45,6 +45,7 @@ - [Collator Protocol](node/collators/collator-protocol.md) - [Backing Subsystems](node/backing/README.md) - [Candidate Backing](node/backing/candidate-backing.md) + - [Prospective Parachains](node/backing/prospective-parachains.md) - [Statement Distribution](node/backing/statement-distribution.md) - [Statement Distribution (Legacy)](node/backing/statement-distribution-legacy.md) - [Availability Subsystems](node/availability/README.md) diff --git a/roadmap/implementers-guide/src/node/backing/prospective-parachains.md b/roadmap/implementers-guide/src/node/backing/prospective-parachains.md new file mode 100644 index 000000000000..c25635fe2a17 --- /dev/null +++ b/roadmap/implementers-guide/src/node/backing/prospective-parachains.md @@ -0,0 +1,158 @@ +# Prospective Parachains + +## Overview + +**Purpose:** Tracks and handles prospective parachain fragments and informs +other backing-stage subsystems of work to be done. + +"prospective": +- [*prə'spɛktɪv*] adj. +- future, likely, potential + +Asynchronous backing changes the runtime to accept parachain candidates from a +certain allowed range of historic relay-parents. This means we can now build +*prospective parachains* – that is, trees of potential (but likely) future +parachain blocks. This is the subsystem responsible for doing so. + +Other subsystems such as Backing rely on Prospective Parachains, e.g. for +determining if a candidate can be seconded. This subsystem is the main +coordinator of work within the node for the collation and backing phases of +parachain consensus. + +Prospective Parachains is primarily an implementation of fragment trees. It also +handles concerns such as: + +- the relay-chain being forkful +- session changes + +See the following sections for more details. + +### Fragment Trees + +This subsystem builds up fragment trees, which are trees of prospective para +candidates. Each path through the tree represents a possible state transition +path for the para. Each potential candidate is a fragment, or a node, in the +tree. Candidates are validated against constraints as they are added. + +This subsystem builds up trees for each relay-chain block in the view, for each +para. These fragment trees are used for: + +- providing backable candidates to other subsystems +- sanity-checking that candidates can be seconded +- getting seconded candidates under active leaves +- etc. + +For example, here is a tree with several possible paths: + +``` +Para Head registered by the relay chain: included_head + ↲ ↳ +depth 0: head_0_a head_0_b + ↲ ↳ +depth 1: head_1_a head_1_b + ↲ | ↳ +depth 2: head_2_a1 head_2_a2 head_2_a3 +``` + +### The Relay-Chain Being Forkful + +We account for the same candidate possibly appearing in different forks. While +we still build fragment trees for each head in each fork, we are efficient with +how we reference candidates to save space. + +### Session Changes + +Allowed ancestry doesn't cross session boundary. That is, you can only build on +top of the freshest relay parent when the session starts. This is a current +limitation that may be lifted in the future. + +Also, runtime configuration values needed for constraints (such as +`max_pov_size`) are constant within a session. This is important when building +prospective validation data. This is unlikely to change. + +## Messages + +### Incoming + +- `ActiveLeaves` + - Notification of a change in the set of active leaves. + - Constructs fragment trees for each para for each new leaf. +- `ProspectiveParachainsMessage::IntroduceCandidate` + - Informs the subsystem of a new candidate. + - Sent by the Backing Subsystem when it is importing a statement for a + new candidate. +- `ProspectiveParachainsMessage::CandidateSeconded` + - Informs the subsystem that a previously introduced candidate has + been seconded. + - Sent by the Backing Subsystem when it is importing a statement for a + new candidate after it sends `IntroduceCandidate`, if that wasn't + rejected by Prospective Parachains. +- `ProspectiveParachainsMessage::CandidateBacked` + - Informs the subsystem that a previously introduced candidate has + been backed. + - Sent by the Backing Subsystem after it successfully imports a + statement giving a candidate the necessary quorum of backing votes. +- `ProspectiveParachainsMessage::GetBackableCandidate` + - Get a backable candidate hash for a given parachain, under a given + relay-parent hash, which is a descendant of given candidate hashes. + - Sent by the Provisioner when requesting backable candidates, when + selecting candidates for a given relay-parent. +- `ProspectiveParachainsMessage::GetHypotheticalFrontier` + - Gets the hypothetical frontier membership of candidates with the + given properties under the specified active leaves' fragment trees. + - Sent by the Backing Subsystem when sanity-checking whether a candidate can + be seconded based on its hypothetical frontiers. +- `ProspectiveParachainsMessage::GetTreeMembership` + - Gets the membership of the candidate in all fragment trees. + - Sent by the Backing Subsystem when it needs to update the candidates + seconded at various depths under new active leaves. +- `ProspectiveParachainsMessage::GetMinimumRelayParents` + - Gets the minimum accepted relay-parent number for each para in the + fragment tree for the given relay-chain block hash. + - That is, this returns the minimum relay-parent block number in the + same branch of the relay-chain which is accepted in the fragment + tree for each para-id. + - Sent by the Backing, Statement Distribution, and Collator Protocol + subsystems when activating leaves in the implicit view. +- `ProspectiveParachainsMessage::GetProspectiveValidationData` + - Gets the validation data of some prospective candidate. The + candidate doesn't need to be part of any fragment tree. + - Sent by the Collator Protocol subsystem (validator side) when + handling a fetched collation result. + +### Outgoing + +- `RuntimeApiRequest::StagingParaBackingState` + - Gets the backing state of the given para (the constraints of the para and + candidates pending availability). +- `RuntimeApiRequest::AvailabilityCores` + - Gets information on all availability cores. +- `ChainApiMessage::Ancestors` + - Requests the `k` ancestor block hashes of a block with the given + hash. +- `ChainApiMessage::BlockHeader` + - Requests the block header by hash. + +## Glossary + +- **Candidate storage:** Stores candidates and information about them + such as their relay-parents and their backing states. Is indexed in + various ways. +- **Constraints:** + - Constraints on the actions that can be taken by a new parachain + block. + - Exhaustively define the set of valid inputs and outputs to parachain + execution. +- **Fragment:** A prospective para block (that is, a block not yet referenced by + the relay-chain). Fragments are anchored to the relay-chain at a particular + relay-parent. +- **Fragment tree:** + - A tree of fragments. Together, these fragments define one or more + prospective paths a parachain's state may transition through. + - See the "Fragment Tree" section. +- **Inclusion emulation:** Emulation of the logic that the runtime uses + for checking parachain blocks. +- **Relay-parent:** A particular relay-chain block that a fragment is + anchored to. +- **Scope:** The scope of a fragment tree, defining limits on nodes + within the tree. diff --git a/roadmap/implementers-guide/src/node/backing/statement-distribution.md b/roadmap/implementers-guide/src/node/backing/statement-distribution.md index 5f779baed41d..2e0142848210 100644 --- a/roadmap/implementers-guide/src/node/backing/statement-distribution.md +++ b/roadmap/implementers-guide/src/node/backing/statement-distribution.md @@ -163,30 +163,35 @@ backing subsystem itself. - `StatementDistributionMessage::Share` - Notification of a locally-originating statement. That is, this statement comes from our node and should be distributed to other nodes. - - Handled by `share_local_statement` + - Sent by the Backing Subsystem after it successfully imports a + locally-originating statement. - `StatementDistributionMessage::Backed` - Notification of a candidate being backed (received enough validity votes from the backing group). - - Handled by `handle_backed_candidate_message` + - Sent by the Backing Subsystem after it successfully imports a statement for + the first time and after sending ~Share~. - `StatementDistributionMessage::NetworkBridgeUpdate` - - Handled by `handle_network_update` + - See next section. #### Network bridge events - v1 compatibility + - Messages for the v1 protocol are routed to the legacy statement + distribution. - `Statement` - Notification of a signed statement. - - Handled by `handle_incoming_statement` + - Sent by a peer's Statement Distribution subsystem when circulating + statements. - `BackedCandidateManifest` - Notification of a backed candidate being known by the sending node. - For the candidate being requested by the receiving node if needed. - - Announcement - - Handled by `handle_incoming_manifest` + - Announcement. + - Sent by a peer's Statement Distribution subsystem. - `BackedCandidateKnown` - Notification of a backed candidate being known by the sending node. - For informing a receiving node which already has the candidate. - Acknowledgement. - - Handled by `handle_incoming_acknowledgement` + - Sent by a peer's Statement Distribution subsystem. ### Outgoing From aee4ba91222a17726c13111d43925a70d21f5c4f Mon Sep 17 00:00:00 2001 From: Bradley Olson <34992650+BradleyOlson64@users.noreply.github.com> Date: Mon, 1 May 2023 14:13:41 -0700 Subject: [PATCH 52/76] Updates to Provisioner Guide for Async Backing (#7106) * Initial corrections and clarifications * Partial first draft * Finished first draft * Adding back wrongly removed test bit * fmt * Update roadmap/implementers-guide/src/node/utility/provisioner.md Co-authored-by: Marcin S. * Addressing comments * Reorganization * fmt --------- Co-authored-by: Marcin S. --- node/core/provisioner/src/tests.rs | 35 ++--- .../src/node/utility/provisioner.md | 144 +++++++++++++----- 2 files changed, 115 insertions(+), 64 deletions(-) diff --git a/node/core/provisioner/src/tests.rs b/node/core/provisioner/src/tests.rs index 25b48cca4537..27975e44c035 100644 --- a/node/core/provisioner/src/tests.rs +++ b/node/core/provisioner/src/tests.rs @@ -19,6 +19,8 @@ use ::test_helpers::{dummy_candidate_descriptor, dummy_hash}; use bitvec::bitvec; use polkadot_primitives::{OccupiedCore, ScheduledCore}; +const MOCK_GROUP_SIZE: usize = 5; + pub fn occupied_core(para_id: u32) -> CoreState { CoreState::Occupied(OccupiedCore { group_responsible: para_id.into(), @@ -46,8 +48,8 @@ where CoreState::Occupied(core) } -pub fn default_bitvec(n_cores: usize) -> CoreAvailability { - bitvec![u8, bitvec::order::Lsb0; 0; n_cores] +pub fn default_bitvec(size: usize) -> CoreAvailability { + bitvec![u8, bitvec::order::Lsb0; 0; size] } pub fn scheduled_core(id: u32) -> ScheduledCore { @@ -236,7 +238,7 @@ pub(crate) mod common { mod select_candidates { use super::{ super::*, build_occupied_core, common::test_harness, default_bitvec, occupied_core, - scheduled_core, + scheduled_core, MOCK_GROUP_SIZE, }; use ::test_helpers::{dummy_candidate_descriptor, dummy_hash}; use futures::channel::mpsc; @@ -400,7 +402,6 @@ mod select_candidates { #[test] fn selects_correct_candidates() { let mock_cores = mock_availability_cores(); - let n_cores = mock_cores.len(); let empty_hash = PersistedValidationData::::default().hash(); @@ -450,12 +451,12 @@ mod select_candidates { commitments: Default::default(), }, validity_votes: Vec::new(), - validator_indices: default_bitvec(n_cores), + validator_indices: default_bitvec(MOCK_GROUP_SIZE), }) .collect(); test_harness( - |r| mock_overseer(r, expected_backed, ProspectiveParachainsMode::Disabled), + |r| mock_overseer(r, expected_backed, prospective_parachains_mode), |mut tx: TestSubsystemSender| async move { let result = select_candidates( &mock_cores, @@ -482,7 +483,6 @@ mod select_candidates { #[test] fn selects_max_one_code_upgrade() { let mock_cores = mock_availability_cores(); - let n_cores = mock_cores.len(); let empty_hash = PersistedValidationData::::default().hash(); @@ -512,13 +512,15 @@ mod select_candidates { }) .collect(); + // Input to select_candidates let candidates: Vec<_> = committed_receipts.iter().map(|r| r.to_plain()).collect(); + // Build possible outputs from select_candidates let backed_candidates: Vec<_> = committed_receipts .iter() .map(|committed_receipt| BackedCandidate { candidate: committed_receipt.clone(), validity_votes: Vec::new(), - validator_indices: default_bitvec(n_cores), + validator_indices: default_bitvec(MOCK_GROUP_SIZE), }) .collect(); @@ -532,7 +534,7 @@ mod select_candidates { let prospective_parachains_mode = ProspectiveParachainsMode::Disabled; test_harness( - |r| mock_overseer(r, expected_backed, ProspectiveParachainsMode::Disabled), + |r| mock_overseer(r, expected_backed, prospective_parachains_mode), |mut tx: TestSubsystemSender| async move { let result = select_candidates( &mock_cores, @@ -561,8 +563,6 @@ mod select_candidates { #[test] fn request_from_prospective_parachains() { let mock_cores = mock_availability_cores(); - let n_cores = mock_cores.len(); - let empty_hash = PersistedValidationData::::default().hash(); let mut descriptor_template = dummy_candidate_descriptor(dummy_hash()); @@ -596,21 +596,12 @@ mod select_candidates { commitments: Default::default(), }, validity_votes: Vec::new(), - validator_indices: default_bitvec(n_cores), + validator_indices: default_bitvec(MOCK_GROUP_SIZE), }) .collect(); test_harness( - |r| { - mock_overseer( - r, - expected_backed, - ProspectiveParachainsMode::Enabled { - max_candidate_depth: 0, - allowed_ancestry_len: 0, - }, - ) - }, + |r| mock_overseer(r, expected_backed, prospective_parachains_mode), |mut tx: TestSubsystemSender| async move { let result = select_candidates( &mock_cores, diff --git a/roadmap/implementers-guide/src/node/utility/provisioner.md b/roadmap/implementers-guide/src/node/utility/provisioner.md index 36747678106e..4cb930b1a159 100644 --- a/roadmap/implementers-guide/src/node/utility/provisioner.md +++ b/roadmap/implementers-guide/src/node/utility/provisioner.md @@ -1,8 +1,6 @@ # Provisioner -Relay chain block authorship authority is governed by BABE and is beyond the scope of the Overseer and the rest of the subsystems. That said, ultimately the block author needs to select a set of backable parachain candidates and other consensus data, and assemble a block from them. This subsystem is responsible for providing the necessary data to all potential block authors. - -A major feature of the provisioner: this subsystem is responsible for ensuring that parachain block candidates are sufficiently available before sending them to potential block authors. +Relay chain block authorship authority is governed by BABE and is beyond the scope of the Overseer and the rest of the subsystems. That said, ultimately the block author needs to select a set of backable parachain candidates and other consensus data, and assemble a block from them. This subsystem is responsible for providing the necessary data to all potential block authors. ## Provisionable Data @@ -10,7 +8,7 @@ There are several distinct types of provisionable data, but they share this prop ### Backed Candidates -The block author can choose 0 or 1 backed parachain candidates per parachain; the only constraint is that each backed candidate has the appropriate relay parent. However, the choice of a backed candidate must be the block author's; the provisioner must ensure that block authors are aware of all available [`BackedCandidate`s](../../types/backing.md#backed-candidate). +The block author can choose 0 or 1 backed parachain candidates per parachain; the only constraint is that each backable candidate has the appropriate relay parent. However, the choice of a backed candidate must be the block author's. The provisioner subsystem is how those block authors make this choice in practice. ### Signed Bitfields @@ -30,6 +28,23 @@ Dispute resolution is complex and is explained in substantially more detail [her ## Protocol +The subsystem should maintain a set of handles to Block Authorship Provisioning iterations that are currently live. + +### On Overseer Signal + +- `ActiveLeavesUpdate`: + - For each `activated` head: + - spawn a Block Authorship Provisioning iteration with the given relay parent, storing a bidirectional channel with that iteration. + - For each `deactivated` head: + - terminate the Block Authorship Provisioning iteration for the given relay parent, if any. +- `Conclude`: Forward `Conclude` to all iterations, waiting a small amount of time for them to join, and then hard-exiting. + +### On `ProvisionerMessage` + +Forward the message to the appropriate Block Authorship Provisioning iteration, or discard if no appropriate iteration is currently active. + +### Per Provisioning Iteration + Input: [`ProvisionerMessage`](../../types/overseer-protocol.md#provisioner-message). Backed candidates come from the [Candidate Backing subsystem](../backing/candidate-backing.md), signed bitfields come from the [Bitfield Distribution subsystem](../availability/bitfield-distribution.md), and disputes come from the [Disputes Subsystem](../disputes/dispute-coordinator.md). Misbehavior reports are currently sent from the [Candidate Backing subsystem](../backing/candidate-backing.md) and contain the following misbehaviors: 1. `Misbehavior::ValidityDoubleVote` @@ -45,37 +60,17 @@ Block authors request the inherent data they should use for constructing the inh ## Block Production -When a validator is selected by BABE to author a block, it becomes a block producer. The provisioner is the subsystem best suited to choosing which specific backed candidates and availability bitfields should be assembled into the block. To engage this functionality, a `ProvisionerMessage::RequestInherentData` is sent; the response is a [`ParaInherentData`](../../types/runtime.md#parainherentdata). There are never two distinct parachain candidates included for the same parachain and that new parachain candidates cannot be backed until the previous one either gets declared available or expired. Appropriate bitfields, as outlined in the section on [bitfield selection](#bitfield-selection), and any dispute statements should be attached as well. +When a validator is selected by BABE to author a block, it becomes a block producer. The provisioner is the subsystem best suited to choosing which specific backed candidates and availability bitfields should be assembled into the block. To engage this functionality, a `ProvisionerMessage::RequestInherentData` is sent; the response is a [`ParaInherentData`](../../types/runtime.md#parainherentdata). Each relay chain block backs at most one backable parachain block candidate per parachain. Additionally no further block candidate can be backed until the previous one either gets declared available or expired. If bitfields indicate that candidate A, predecessor of B, should be declared available, then B can be backed in the same relay block. Appropriate bitfields, as outlined in the section on [bitfield selection](#bitfield-selection), and any dispute statements should be attached as well. ### Bitfield Selection Our goal with respect to bitfields is simple: maximize availability. However, it's not quite as simple as always including all bitfields; there are constraints which still need to be met: -- We cannot choose more than one bitfield per validator. -- Each bitfield must correspond to an occupied core. +- not more than one bitfield per validator +- each 1 bit must correspond to an occupied core Beyond that, a semi-arbitrary selection policy is fine. In order to meet the goal of maximizing availability, a heuristic of picking the bitfield with the greatest number of 1 bits set in the event of conflict is useful. -### Candidate Selection - -The goal of candidate selection is to determine which cores are free, and then to the degree possible, pick a candidate appropriate to each free core. - -To determine availability: - -- Get the list of core states from the runtime API -- For each core state: - - On `CoreState::Scheduled`, then we can make an `OccupiedCoreAssumption::Free`. - - On `CoreState::Occupied`, then we may be able to make an assumption: - - If the bitfields indicate availability and there is a scheduled `next_up_on_available`, then we can make an `OccupiedCoreAssumption::Included`. - - If the bitfields do not indicate availability, and there is a scheduled `next_up_on_time_out`, and `occupied_core.time_out_at == block_number_under_production`, then we can make an `OccupiedCoreAssumption::TimedOut`. - - If we did not make an `OccupiedCoreAssumption`, then continue on to the next core. - - Now compute the core's `validation_data_hash`: get the `PersistedValidationData` from the runtime, given the known `ParaId` and `OccupiedCoreAssumption`; - - Find an appropriate candidate for the core. - - There are two constraints: `backed_candidate.candidate.descriptor.para_id == scheduled_core.para_id && candidate.candidate.descriptor.validation_data_hash == computed_validation_data_hash`. - - In the event that more than one candidate meets the constraints, selection between the candidates is arbitrary. However, not more than one candidate can be selected per core. - -The end result of this process is a vector of `BackedCandidate`s, sorted in order of their core index. Furthermore, this process should select at maximum one candidate which upgrades the runtime validation code. - ### Dispute Statement Selection This is the point at which the block author provides further votes to active disputes or initiates new disputes in the runtime state. @@ -100,27 +95,92 @@ To compute bitfield availability, then: - Update the availability. Conceptually, assuming bit vectors: `availability[validator_index] |= bitfield[core_idx]` - Availability has a 2/3 threshold. Therefore: `3 * availability.count_ones() >= 2 * availability.len()` -### Notes +### Candidate Selection: Prospective Parachains Mode -See also: [Scheduler Module: Availability Cores](../../runtime/scheduler.md#availability-cores). +The state of the provisioner `PerRelayParent` tracks an important setting, `ProspectiveParachainsMode`. This setting determines which backable candidate selection method the provisioner uses. -## Functionality +`ProspectiveParachainsMode::Disabled` - The provisioner uses its own internal legacy candidate selection. +`ProspectiveParachainsMode::Enabled` - The provisioner requests that [prospective parachains](../backing/prospective-parachains.md) provide selected candidates. -The subsystem should maintain a set of handles to Block Authorship Provisioning Jobs that are currently live. +Candidates selected with `ProspectiveParachainsMode::Enabled` are able to benefit from the increased block production time asynchronous backing allows. For this reason all Polkadot protocol networks will eventually use prospective parachains candidate selection. Then legacy candidate selection will be removed as obsolete. -### On Overseer Signal +### Prospective Parachains Candidate Selection -- `ActiveLeavesUpdate`: - - For each `activated` head: - - spawn a Block Authorship Provisioning Job with the given relay parent, storing a bidirectional channel with that job. - - For each `deactivated` head: - - terminate the Block Authorship Provisioning Job for the given relay parent, if any. -- `Conclude`: Forward `Conclude` to all jobs, waiting a small amount of time for them to join, and then hard-exiting. +The goal of candidate selection is to determine which cores are free, and then to the degree possible, pick a candidate appropriate to each free core. In prospective parachains candidate selection the provisioner handles the former process while [prospective parachains](../backing/prospective-parachains.md) handles the latter. -### On `ProvisionerMessage` +To select backable candidates: + +- Get the list of core states from the runtime API +- For each core state: + - On `CoreState::Free` + - The core is unscheduled and doesn’t need to be provisioned with a candidate + - On `CoreState::Scheduled` + - The core is unoccupied and scheduled to accept a backed block for a particular `para_id`. + - The provisioner requests a backable candidate from [prospective parachains](../backing/prospective-parachains.md) with the desired relay parent, the core’s scheduled `para_id`, and an empty required path. + - On `CoreState::Occupied` + - The availability core is occupied by a parachain block candidate pending availability. A further candidate need not be provided by the provisioner unless the core will be vacated this block. This is the case when either bitfields indicate the current core occupant has been made available or a timeout is reached. + - If `bitfields_indicate_availability` + - If `Some(scheduled_core) = occupied_core.next_up_on_available`, the core will be vacated and in need of a provisioned candidate. The provisioner requests a backable candidate from [prospective parachains](../backing/prospective-parachains.md) with the core’s scheduled `para_id` and a required path with one entry. This entry corresponds to the parablock candidate previously occupying this core, which was made available and can be built upon even though it hasn’t been seen as included in a relay chain block yet. See the Required Path section below for more detail. + - If `occupied_core.next_up_on_available` is `None`, then the core being vacated is unscheduled and doesn’t need to be provisioned with a candidate. + - Else-if `occupied_core.time_out_at == block_number` + - If `Some(scheduled_core) = occupied_core.next_up_on_timeout`, the core will be vacated and in need of a provisioned candidate. A candidate is requested in exactly the same way as with `CoreState::Scheduled`. + - Else the core being vacated is unscheduled and doesn’t need to be provisioned with a candidate +The end result of this process is a vector of `CandidateHash`s, sorted in order of their core index. -Forward the message to the appropriate Block Authorship Provisioning Job, or discard if no appropriate job is currently active. +#### Required Path -## Block Authorship Provisioning Job +Required path is a parameter for `ProspectiveParachainsMessage::GetBackableCandidate`, which the provisioner sends in candidate selection. + +An empty required path indicates that the requested candidate should be a direct child of the most recently included parablock for the given `para_id` as of the given relay parent. + +In contrast, a required path with one or more entries prompts [prospective parachains](../backing/prospective-parachains.md) to step forward through its fragment tree for the given `para_id` and relay parent until the desired parablock is reached. We then select a direct child of that parablock to pass to the provisioner. + +The parablocks making up a required path do not need to have been previously seen as included in relay chain blocks. Thus the ability to provision backable candidates based on a required path effectively decouples backing from inclusion. + +### Legacy Candidate Selection + +Legacy candidate selection takes place in the provisioner. Thus the provisioner needs to keep an up to date record of all [backed_candidates](../../types/backing.md#backed-candidate) `PerRelayParent` to pick from. + +The goal of candidate selection is to determine which cores are free, and then to the degree possible, pick a candidate appropriate to each free core. + +To determine availability: + +- Get the list of core states from the runtime API +- For each core state: + - On `CoreState::Scheduled`, then we can make an `OccupiedCoreAssumption::Free`. + - On `CoreState::Occupied`, then we may be able to make an assumption: + - If the bitfields indicate availability and there is a scheduled `next_up_on_available`, then we can make an `OccupiedCoreAssumption::Included`. + - If the bitfields do not indicate availability, and there is a scheduled `next_up_on_time_out`, and `occupied_core.time_out_at == block_number_under_production`, then we can make an `OccupiedCoreAssumption::TimedOut`. + - If we did not make an `OccupiedCoreAssumption`, then continue on to the next core. + - Now compute the core's `validation_data_hash`: get the `PersistedValidationData` from the runtime, given the known `ParaId` and `OccupiedCoreAssumption`; + - Find an appropriate candidate for the core. + - There are two constraints: `backed_candidate.candidate.descriptor.para_id == scheduled_core.para_id && candidate.candidate.descriptor.validation_data_hash == computed_validation_data_hash`. + - In the event that more than one candidate meets the constraints, selection between the candidates is arbitrary. However, not more than one candidate can be selected per core. -Maintain the set of channels to block authors. On receiving provisionable data, send a copy over each channel. +The end result of this process is a vector of `CandidateHash`s, sorted in order of their core index. + +### Retrieving Full `BackedCandidate`s for Selected Hashes + +Legacy candidate selection and prospective parachains candidate selection both leave us with a vector of `CandidateHash`s. These are passed to the backing subsystem with `CandidateBackingMessage::GetBackedCandidates`. + +The response is a vector of `BackedCandidate`s, sorted in order of their core index and ready to be provisioned to block authoring. The candidate selection and retrieval process should select at maximum one candidate which upgrades the runtime validation code. + +## Glossary + +- **Relay-parent:** + - A particular relay-chain block which serves as an anchor and reference point for processes and data which depend on relay-chain state. +- **Active Leaf:** + - A relay chain block which is the head of an active fork of the relay chain. + - Block authorship provisioning jobs are spawned per active leaf and concluded for any leaves which become inactive. +- **Candidate Selection:** + - The process by which the provisioner selects backable parachain block candidates to pass to block authoring. + - Two versions, prospective parachains candidate selection and legacy candidate selection. See their respective protocol sections for details. +- **Availability Core:** + - Often referred to simply as "cores", availability cores are an abstraction used for resource management. For the provisioner, availability cores are most relevant in that core states determine which `para_id`s to provision backable candidates for. + - For more on availability cores see [Scheduler Module: Availability Cores](../../runtime/scheduler.md#availability-cores) +- **Availability Bitfield:** + - Often referred to simply as a "bitfield", an availability bitfield represents the view of parablock candidate availability from a particular validator's perspective. Each bit in the bitfield corresponds to a single [availability core](../runtime-api/availability-cores.md). + - For more on availability bitfields see [availability](../../types/availability.md) +- **Backable vs. Backed:** + - Note that we sometimes use "backed" to refer to candidates that are "backable", but not yet backed on chain. + - Backable means that a quorum of the candidate's assigned backing group have provided signed affirming statements. \ No newline at end of file From baf1365a0d6af4a4e71291dab31240efb5e3b9b0 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Wed, 24 May 2023 22:40:57 +0400 Subject: [PATCH 53/76] fmt --- .../dispute-coordinator/src/initialized.rs | 101 +++++++++--------- node/service/src/lib.rs | 6 +- runtime/parachains/src/inclusion/mod.rs | 16 ++- .../src/runtime_api_impl/vstaging.rs | 5 +- 4 files changed, 62 insertions(+), 66 deletions(-) diff --git a/node/core/dispute-coordinator/src/initialized.rs b/node/core/dispute-coordinator/src/initialized.rs index b5c548bf1ffd..81134a43a3a0 100644 --- a/node/core/dispute-coordinator/src/initialized.rs +++ b/node/core/dispute-coordinator/src/initialized.rs @@ -214,62 +214,61 @@ impl Initialized { gum::trace!(target: LOG_TARGET, "Waiting for message"); let mut overlay_db = OverlayedBackend::new(backend); let default_confirm = Box::new(|| Ok(())); - let confirm_write = match MuxedMessage::receive(ctx, &mut self.participation_receiver) - .await? - { - MuxedMessage::Participation(msg) => { - gum::trace!(target: LOG_TARGET, "MuxedMessage::Participation"); - let ParticipationStatement { - session, - candidate_hash, - candidate_receipt, - outcome, - } = self.participation.get_participation_result(ctx, msg).await?; - if let Some(valid) = outcome.validity() { - gum::trace!( - target: LOG_TARGET, - ?session, - ?candidate_hash, - ?valid, - "Issuing local statement based on participation outcome." - ); - self.issue_local_statement( - ctx, - &mut overlay_db, + let confirm_write = + match MuxedMessage::receive(ctx, &mut self.participation_receiver).await? { + MuxedMessage::Participation(msg) => { + gum::trace!(target: LOG_TARGET, "MuxedMessage::Participation"); + let ParticipationStatement { + session, candidate_hash, candidate_receipt, - session, - valid, - clock.now(), - ) - .await?; - } else { - gum::warn!(target: LOG_TARGET, ?outcome, "Dispute participation failed"); - } - default_confirm - }, - MuxedMessage::Subsystem(msg) => match msg { - FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()), - FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => { - gum::trace!(target: LOG_TARGET, "OverseerSignal::ActiveLeaves"); - self.process_active_leaves_update( - ctx, - &mut overlay_db, - update, - clock.now(), - ) - .await?; + outcome, + } = self.participation.get_participation_result(ctx, msg).await?; + if let Some(valid) = outcome.validity() { + gum::trace!( + target: LOG_TARGET, + ?session, + ?candidate_hash, + ?valid, + "Issuing local statement based on participation outcome." + ); + self.issue_local_statement( + ctx, + &mut overlay_db, + candidate_hash, + candidate_receipt, + session, + valid, + clock.now(), + ) + .await?; + } else { + gum::warn!(target: LOG_TARGET, ?outcome, "Dispute participation failed"); + } default_confirm }, - FromOrchestra::Signal(OverseerSignal::BlockFinalized(_, n)) => { - gum::trace!(target: LOG_TARGET, "OverseerSignal::BlockFinalized"); - self.scraper.process_finalized_block(&n); - default_confirm + MuxedMessage::Subsystem(msg) => match msg { + FromOrchestra::Signal(OverseerSignal::Conclude) => return Ok(()), + FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update)) => { + gum::trace!(target: LOG_TARGET, "OverseerSignal::ActiveLeaves"); + self.process_active_leaves_update( + ctx, + &mut overlay_db, + update, + clock.now(), + ) + .await?; + default_confirm + }, + FromOrchestra::Signal(OverseerSignal::BlockFinalized(_, n)) => { + gum::trace!(target: LOG_TARGET, "OverseerSignal::BlockFinalized"); + self.scraper.process_finalized_block(&n); + default_confirm + }, + FromOrchestra::Communication { msg } => + self.handle_incoming(ctx, &mut overlay_db, msg, clock.now()).await?, }, - FromOrchestra::Communication { msg } => - self.handle_incoming(ctx, &mut overlay_db, msg, clock.now()).await?, - }, - }; + }; if !overlay_db.is_empty() { let ops = overlay_db.into_write_ops(); diff --git a/node/service/src/lib.rs b/node/service/src/lib.rs index 44aaeb09e0f8..c19fd8b2576b 100644 --- a/node/service/src/lib.rs +++ b/node/service/src/lib.rs @@ -840,10 +840,10 @@ where net_config.add_request_response_protocol(cfg); let (collation_req_v1_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); - net_config.add_request_response_protocol(cfg); + net_config.add_request_response_protocol(cfg); let (collation_req_vstaging_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); - net_config.add_request_response_protocol(cfg); + net_config.add_request_response_protocol(cfg); let (available_data_req_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); net_config.add_request_response_protocol(cfg); @@ -851,7 +851,7 @@ where net_config.add_request_response_protocol(cfg); let (candidate_req_vstaging_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); - net_config.add_request_response_protocol(cfg); + net_config.add_request_response_protocol(cfg); let (dispute_req_receiver, cfg) = IncomingRequest::get_config_receiver(&req_protocol_names); net_config.add_request_response_protocol(cfg); diff --git a/runtime/parachains/src/inclusion/mod.rs b/runtime/parachains/src/inclusion/mod.rs index fe7cb0251e15..c77b51f6b6f9 100644 --- a/runtime/parachains/src/inclusion/mod.rs +++ b/runtime/parachains/src/inclusion/mod.rs @@ -21,7 +21,8 @@ //! to included. use crate::{ - configuration::{self, HostConfiguration}, disputes, dmp, hrmp, paras, + configuration::{self, HostConfiguration}, + disputes, dmp, hrmp, paras, paras_inherent::DisputedBitfield, scheduler::{self, CoreAssignment}, shared::{self, AllowedRelayParentsTracker}, @@ -995,9 +996,7 @@ impl Pallet { )) } - pub(crate) fn relay_dispatch_queue_size( - para_id: ParaId, - ) -> (u32, u32) { + pub(crate) fn relay_dispatch_queue_size(para_id: ParaId) -> (u32, u32) { let fp = T::MessageQueue::footprint(AggregateMessageOrigin::Ump(UmpQueueId::Para(para_id))); (fp.count as u32, fp.size as u32) } @@ -1023,9 +1022,7 @@ impl Pallet { let (para_queue_count, mut para_queue_size) = Self::relay_dispatch_queue_size(para); - if para_queue_count.saturating_add(additional_msgs) > - config.max_upward_queue_count - { + if para_queue_count.saturating_add(additional_msgs) > config.max_upward_queue_count { return Err(UmpAcceptanceCheckErr::CapacityExceeded { count: para_queue_count.saturating_add(additional_msgs).into(), limit: config.max_upward_queue_count.into(), @@ -1037,14 +1034,13 @@ impl Pallet { if msg_size > config.max_upward_message_size { return Err(UmpAcceptanceCheckErr::MessageSize { idx: idx as u32, - msg_size: msg_size, + msg_size, max_size: config.max_upward_message_size, }) } // make sure that the queue is not overfilled. // we do it here only once since returning false invalidates the whole relay-chain block. - if para_queue_size.saturating_add(msg_size) > config.max_upward_queue_size - { + if para_queue_size.saturating_add(msg_size) > config.max_upward_queue_size { return Err(UmpAcceptanceCheckErr::TotalSizeExceeded { total_size: para_queue_size.saturating_add(msg_size).into(), limit: config.max_upward_queue_size.into(), diff --git a/runtime/parachains/src/runtime_api_impl/vstaging.rs b/runtime/parachains/src/runtime_api_impl/vstaging.rs index af806da1d974..8548f4dff5c2 100644 --- a/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -16,7 +16,7 @@ //! Put implementations of functions from staging APIs here. -use crate::{configuration, dmp, hrmp, initializer, paras, shared, inclusion}; +use crate::{configuration, dmp, hrmp, inclusion, initializer, paras, shared}; use primitives::{ vstaging::{ AsyncBackingParams, BackingState, CandidatePendingAvailability, Constraints, @@ -50,7 +50,8 @@ pub fn backing_state( Some(block_num).zip(>::future_code_hash(para_id)) }); - let (ump_msg_count, ump_total_bytes) = >::relay_dispatch_queue_size(para_id); + let (ump_msg_count, ump_total_bytes) = + >::relay_dispatch_queue_size(para_id); let ump_remaining = config.max_upward_queue_count - ump_msg_count; let ump_remaining_bytes = config.max_upward_queue_size - ump_total_bytes; From 137668d1720dafecb603ec679f91d01def6e6135 Mon Sep 17 00:00:00 2001 From: Bradley Olson <34992650+BradleyOlson64@users.noreply.github.com> Date: Fri, 2 Jun 2023 11:27:50 -0700 Subject: [PATCH 54/76] Renaming Parathread Mentions (#7287) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Renaming parathreads * Renaming module to pallet * More updates * PVF: Refactor workers into separate crates, remove host dependency (#7253) * PVF: Refactor workers into separate crates, remove host dependency * Fix compile error * Remove some leftover code * Fix compile errors * Update Cargo.lock * Remove worker main.rs files I accidentally copied these from the other PR. This PR isn't intended to introduce standalone workers yet. * Address review comments * cargo fmt * Update a couple of comments * Update log targets * Update quote to 1.0.27 (#7280) Signed-off-by: Oliver Tale-Yazdi Co-authored-by: parity-processbot <> * pallets: implement `Default` for `GenesisConfig` in `no_std` (#7271) * pallets: implement Default for GenesisConfig in no_std This change is follow-up of: https://github.com/paritytech/substrate/pull/14108 It is a step towards: https://github.com/paritytech/substrate/issues/13334 * Cargo.lock updated * update lockfile for {"substrate"} --------- Co-authored-by: parity-processbot <> * cli: enable BEEFY by default on test networks (#7293) We consider BEEFY mature enough to run by default on all nodes for test networks (Rococo/Wococo/Versi). Right now, most nodes are not running it since it's opt-in using --beefy flag. Switch to an opt-out model for test networks. Replace --beefy flag from CLI with --no-beefy and have BEEFY client start by default on test networks. Signed-off-by: acatangiu * runtime: past session slashing runtime API (#6667) * runtime/vstaging: unapplied_slashes runtime API * runtime/vstaging: key_ownership_proof runtime API * runtime/ParachainHost: submit_report_dispute_lost * fix key_ownership_proof API * runtime: submit_report_dispute_lost runtime API * nits * Update node/subsystem-types/src/messages.rs Co-authored-by: Marcin S. * revert unrelated fmt changes * post merge fixes * fix compilation --------- Co-authored-by: Marcin S. * Correcting git mishap * Document usage of `gum` crate (#7294) * Document usage of gum crate * Small fix * Add some more basic info * Update node/gum/src/lib.rs Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com> * Update target docs --------- Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com> * XCM: Fix issue with RequestUnlock (#7278) * XCM: Fix issue with RequestUnlock * Leave API changes for v4 * Fix clippy errors * Fix tests --------- Co-authored-by: parity-processbot <> * Companion for Substrate#14228 (#7295) * Companion for Substrate#14228 https://github.com/paritytech/substrate/pull/14228 * update lockfile for {"substrate"} --------- Co-authored-by: parity-processbot <> * Companion for #14237: Use latest sp-crates (#7300) * To revert: Update substrate branch to "lexnv/bump_sp_crates" Signed-off-by: Alexandru Vasile * Revert "To revert: Update substrate branch to "lexnv/bump_sp_crates"" This reverts commit 5f1db84eac4a226c37b7f6ce6ee19b49dc7e2008. * Update cargo lock Signed-off-by: Alexandru Vasile * Update cargo.lock Signed-off-by: Alexandru Vasile * Update cargo.lock Signed-off-by: Alexandru Vasile --------- Signed-off-by: Alexandru Vasile * bounded-collections bump to 0.1.7 (#7305) * bounded-collections bump to 0.1.7 Companion for: paritytech/substrate#14225 * update lockfile for {"substrate"} --------- Co-authored-by: parity-processbot <> * bump to quote 1.0.28 (#7306) * `RollingSessionWindow` cleanup (#7204) * Replace `RollingSessionWindow` with `RuntimeInfo` - initial commit * Fix tests in import * Fix the rest of the tests * Remove dead code * Fix todos * Simplify session caching * Comments for `SessionInfoProvider` * Separate `SessionInfoProvider` from `State` * `cache_session_info_for_head` becomes freestanding function * Remove unneeded `mut` usage * fn session_info -> fn get_session_info() to avoid name clashes. The function also tries to initialize `SessionInfoProvider` * Fix SessionInfo retrieval * Code cleanup * Don't wrap `SessionInfoProvider` in an `Option` * Remove `earliest_session()` * Remove pre-caching -> wip * Fix some tests and code cleanup * Fix all tests * Fixes in tests * Fix comments, variable names and small style changes * Fix a warning * impl From for NonZeroUsize * Fix logging for `get_session_info` - remove redundant logs and decrease log level to DEBUG * Code review feedback * Storage migration removing `COL_SESSION_WINDOW_DATA` from parachains db * Remove `col_session_data` usages * Storage migration clearing columns w/o removing them * Remove session data column usages from `approval-voting` and `dispute-coordinator` tests * Add some test cases from `RollingSessionWindow` to `dispute-coordinator` tests * Fix formatting in initialized.rs * Fix a corner case in `SessionInfo` caching for `dispute-coordinator` * Remove `RollingSessionWindow` ;( * Revert "Fix formatting in initialized.rs" This reverts commit 0f94664ec9f3a7e3737a30291195990e1e7065fc. * v2 to v3 migration drops `COL_DISPUTE_COORDINATOR_DATA` instead of clearing it * Fix `NUM_COLUMNS` in `approval-voting` * Use `columns::v3::NUM_COLUMNS` when opening db * Update node/service/src/parachains_db/upgrade.rs Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com> * Don't write in `COL_DISPUTE_COORDINATOR_DATA` for `test_rocksdb_migrate_2_to_3` * Fix `NUM+COLUMNS` in approval_voting * Fix formatting * Fix columns usage * Clarification comments about the different db versions --------- Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com> * pallet-para-config: Remove remnant WeightInfo functions (#7308) * pallet-para-config: Remove remnant WeightInfo functions Signed-off-by: Oliver Tale-Yazdi * set_config_with_weight begone Signed-off-by: Oliver Tale-Yazdi * ".git/.scripts/commands/bench/bench.sh" runtime kusama-dev runtime_parachains::configuration --------- Signed-off-by: Oliver Tale-Yazdi Co-authored-by: command-bot <> * XCM: PayOverXcm config (#6900) * Move XCM query functionality to trait * Fix tests * Add PayOverXcm implementation * fix the PayOverXcm trait to compile * moved doc comment out of trait implmeentation and to the trait * PayOverXCM documentation * Change documentation a bit * Added empty benchmark methods implementation and changed docs * update PayOverXCM to convert AccountIds to MultiLocations * Implement benchmarking method * Change v3 to latest * Descend origin to an asset sender (#6970) * descend origin to an asset sender * sender as tuple of dest and sender * Add more variants to the QueryResponseStatus enum * Change Beneficiary to Into<[u8; 32]> * update PayOverXcm to return concrete errors and use AccountId as sender * use polkadot-primitives for AccountId * fix dependency to use polkadot-core-primitives * force Unpaid instruction to the top of the instructions list * modify report_outcome to accept interior argument * use new_query directly for building final xcm query, instead of report_outcome * fix usage of new_query to use the XcmQueryHandler * fix usage of new_query to use the XcmQueryHandler * tiny method calling fix * xcm query handler (#7198) * drop redundant query status * rename ReportQueryStatus to OuterQueryStatus * revert rename of QueryResponseStatus * update mapping * Update xcm/xcm-builder/src/pay.rs Co-authored-by: Gavin Wood * Updates * Docs * Fix benchmarking stuff * Destination can be determined based on asset_kind * Tweaking API to minimise clones * Some repotting and docs --------- Co-authored-by: Anthony Alaribe Co-authored-by: Muharem Ismailov Co-authored-by: Anthony Alaribe Co-authored-by: Gavin Wood * Companion for #14265 (#7307) * Update Cargo.lock Signed-off-by: Alexandru Vasile * Update Cargo.lock Signed-off-by: Alexandru Vasile --------- Signed-off-by: Alexandru Vasile Co-authored-by: parity-processbot <> * bump serde to 1.0.163 (#7315) * bump serde to 1.0.163 * bump ci * update lockfile for {"substrate"} --------- Co-authored-by: parity-processbot <> * fmt * Updated fmt * Removing changes accidentally pulled from master * fix another master pull issue * Another master pull fix * fmt * Fixing implementers guide build * Revert "Merge branch 'rh-async-backing-feature-while-frozen' of https://github.com/paritytech/polkadot into brad-rename-parathread" This reverts commit bebc24af52ab61155e3fe02cb3ce66a592bce49c, reversing changes made to 1b2de662dfb11173679d6da5bd0da9d149c85547. --------- Signed-off-by: Oliver Tale-Yazdi Signed-off-by: acatangiu Signed-off-by: Alexandru Vasile Co-authored-by: Marcin S Co-authored-by: Oliver Tale-Yazdi Co-authored-by: Michal Kucharczyk <1728078+michalkucharczyk@users.noreply.github.com> Co-authored-by: Adrian Catangiu Co-authored-by: ordian Co-authored-by: Marcin S. Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com> Co-authored-by: Francisco Aguirre Co-authored-by: Bastian Köcher Co-authored-by: Alexandru Vasile <60601340+lexnv@users.noreply.github.com> Co-authored-by: Sam Johnson Co-authored-by: Tsvetomir Dimitrov Co-authored-by: Anthony Alaribe Co-authored-by: Muharem Ismailov Co-authored-by: Anthony Alaribe Co-authored-by: Gavin Wood --- node/core/provisioner/src/lib.rs | 2 +- .../src/validator_side/mod.rs | 6 +- .../src/vstaging/mod.rs | 4 +- primitives/src/v4/mod.rs | 16 ++-- roadmap/implementers-guide/src/SUMMARY.md | 23 +++--- roadmap/implementers-guide/src/glossary.md | 5 +- .../src/node/utility/provisioner.md | 2 +- .../implementers-guide/src/pvf-prechecking.md | 8 +- .../src/runtime-api/availability-cores.md | 4 +- .../implementers-guide/src/runtime/README.md | 8 +- .../src/runtime/configuration.md | 2 +- .../src/runtime/disputes.md | 2 +- roadmap/implementers-guide/src/runtime/dmp.md | 2 +- .../implementers-guide/src/runtime/hrmp.md | 2 +- .../src/runtime/inclusion.md | 10 +-- .../src/runtime/initializer.md | 2 +- .../implementers-guide/src/runtime/paras.md | 44 +++++------ .../src/runtime/scheduler.md | 75 ++++++++++--------- .../implementers-guide/src/runtime/shared.md | 2 +- .../implementers-guide/src/types/candidate.md | 6 +- .../implementers-guide/src/types/runtime.md | 12 +-- roadmap/parachains.md | 14 ++-- runtime/common/src/assigned_slots.rs | 22 +++--- runtime/common/src/integration_tests.rs | 27 ++++--- runtime/common/src/mock.rs | 11 ++- runtime/common/src/paras_registrar.rs | 59 ++++++++------- runtime/common/src/paras_sudo_wrapper.rs | 18 ++--- runtime/common/src/slots/mod.rs | 19 ++--- runtime/common/src/traits.rs | 14 ++-- runtime/parachains/src/configuration.rs | 28 +++---- runtime/parachains/src/hrmp.rs | 2 +- runtime/parachains/src/inclusion/mod.rs | 3 +- runtime/parachains/src/lib.rs | 4 +- runtime/parachains/src/paras/mod.rs | 61 +++++++-------- runtime/parachains/src/scheduler.rs | 72 +++++++++--------- runtime/parachains/src/scheduler/tests.rs | 28 +++---- runtime/polkadot/src/lib.rs | 1 + 37 files changed, 323 insertions(+), 297 deletions(-) diff --git a/node/core/provisioner/src/lib.rs b/node/core/provisioner/src/lib.rs index c989366ffa05..e1d450112ff8 100644 --- a/node/core/provisioner/src/lib.rs +++ b/node/core/provisioner/src/lib.rs @@ -663,7 +663,7 @@ async fn request_backable_candidates( // The candidate occupying the core is available, choose its // child in the fragment tree. // - // TODO: doesn't work for parathreads. We lean hard on the assumption + // TODO: doesn't work for on-demand parachains. We lean hard on the assumption // that cores are fixed to specific parachains within a session. // https://github.com/paritytech/polkadot/issues/5492 (scheduled_core.para_id, vec![occupied_core.candidate_hash]) diff --git a/node/network/collator-protocol/src/validator_side/mod.rs b/node/network/collator-protocol/src/validator_side/mod.rs index aad945defcdb..aae5e5cb8ee5 100644 --- a/node/network/collator-protocol/src/validator_side/mod.rs +++ b/node/network/collator-protocol/src/validator_side/mod.rs @@ -520,9 +520,9 @@ where }, }; - // This code won't work well, if at all for parathreads. For parathreads we'll - // have to be aware of which core the parathread claim is going to be multiplexed - // onto. The parathread claim will also have a known collator, and we should always + // This code won't work well, if at all for on-demand parachains. For on-demand we'll + // have to be aware of which core the on-demand claim is going to be multiplexed + // onto. The on-demand claim will also have a known collator, and we should always // allow an incoming connection from that collator. If not even connecting to them // directly. // diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index 711ad0df33b2..f12c64957f0c 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -549,7 +549,7 @@ fn find_local_validator_state( let our_group = groups.by_validator_index(validator_index)?; - // note: this won't work well for parathreads because it only works + // note: this won't work well for on-demand parachains because it only works // when core assignments to paras are static throughout the session. let core = group_rotation_info.core_for_group(our_group, availability_cores.len()); @@ -1708,7 +1708,7 @@ fn group_for_para( group_rotation_info: &GroupRotationInfo, para_id: ParaId, ) -> Option { - // Note: this won't work well for parathreads as it assumes that core assignments are fixed + // Note: this won't work well for on-demand parachains as it assumes that core assignments are fixed // across blocks. let core_index = availability_cores.iter().position(|c| c.para_id() == Some(para_id)); diff --git a/primitives/src/v4/mod.rs b/primitives/src/v4/mod.rs index 3bb561ce46ba..75acf39e0576 100644 --- a/primitives/src/v4/mod.rs +++ b/primitives/src/v4/mod.rs @@ -802,12 +802,13 @@ impl TypeIndex for GroupIndex { } } -/// A claim on authoring the next block for a given parathread. +/// A claim on authoring the next block for a given parathread (on-demand parachain). #[derive(Clone, Encode, Decode, TypeInfo, RuntimeDebug)] #[cfg_attr(feature = "std", derive(PartialEq))] pub struct ParathreadClaim(pub Id, pub CollatorId); -/// An entry tracking a claim to ensure it does not pass the maximum number of retries. +/// An entry tracking a parathread (on-demand parachain) claim to ensure it does not +/// pass the maximum number of retries. #[derive(Clone, Encode, Decode, TypeInfo, RuntimeDebug)] #[cfg_attr(feature = "std", derive(PartialEq))] pub struct ParathreadEntry { @@ -821,7 +822,7 @@ pub struct ParathreadEntry { #[derive(Clone, Encode, Decode, TypeInfo, RuntimeDebug)] #[cfg_attr(feature = "std", derive(PartialEq))] pub enum CoreOccupied { - /// A parathread. + /// A parathread (on-demand parachain). Parathread(ParathreadEntry), /// A parachain. Parachain, @@ -974,8 +975,9 @@ pub enum CoreState { /// variant. #[codec(index = 1)] Scheduled(ScheduledCore), - /// The core is currently free and there is nothing scheduled. This can be the case for parathread - /// cores when there are no parathread blocks queued. Parachain cores will never be left idle. + /// The core is currently free and there is nothing scheduled. This can be the case for + /// on-demand parachain cores when there are no on-demand blocks queued. Leased parachain + /// cores will never be left idle. #[codec(index = 2)] Free, } @@ -1176,10 +1178,10 @@ pub const POLKADOT_ENGINE_ID: runtime_primitives::ConsensusEngineId = *b"POL1"; /// A consensus log item for polkadot validation. To be used with [`POLKADOT_ENGINE_ID`]. #[derive(Decode, Encode, Clone, PartialEq, Eq)] pub enum ConsensusLog { - /// A parachain or parathread upgraded its code. + /// A parachain upgraded its code. #[codec(index = 1)] ParaUpgradeCode(Id, ValidationCodeHash), - /// A parachain or parathread scheduled a code upgrade. + /// A parachain scheduled a code upgrade. #[codec(index = 2)] ParaScheduleUpgradeCode(Id, ValidationCodeHash, BlockNumber), /// Governance requests to auto-approve every candidate included up to the given block diff --git a/roadmap/implementers-guide/src/SUMMARY.md b/roadmap/implementers-guide/src/SUMMARY.md index ef94ef3659c6..3095013409d9 100644 --- a/roadmap/implementers-guide/src/SUMMARY.md +++ b/roadmap/implementers-guide/src/SUMMARY.md @@ -12,18 +12,17 @@ - [Messaging Overview](messaging.md) - [PVF Pre-checking](pvf-prechecking.md) - [Runtime Architecture](runtime/README.md) - - [`Initializer` Module](runtime/initializer.md) - - [`Configuration` Module](runtime/configuration.md) - - [`Shared`](runtime/shared.md) - - [`Disputes` Module](runtime/disputes.md) - - [`Paras` Module](runtime/paras.md) - - [`Scheduler` Module](runtime/scheduler.md) - - [`Inclusion` Module](runtime/inclusion.md) - - [`ParaInherent` Module](runtime/parainherent.md) - - [`DMP` Module](runtime/dmp.md) - - [`UMP` Module](runtime/ump.md) - - [`HRMP` Module](runtime/hrmp.md) - - [`Session Info` Module](runtime/session_info.md) + - [`Initializer` Pallet](runtime/initializer.md) + - [`Configuration` Pallet](runtime/configuration.md) + - [`Shared` Pallet](runtime/shared.md) + - [`Disputes` Pallet](runtime/disputes.md) + - [`Paras` Pallet](runtime/paras.md) + - [`Scheduler` Pallet](runtime/scheduler.md) + - [`Inclusion` Pallet](runtime/inclusion.md) + - [`ParaInherent` Pallet](runtime/parainherent.md) + - [`DMP` Pallet](runtime/dmp.md) + - [`HRMP` Pallet](runtime/hrmp.md) + - [`Session Info` Pallet](runtime/session_info.md) - [Runtime APIs](runtime-api/README.md) - [Validators](runtime-api/validators.md) - [Validator Groups](runtime-api/validator-groups.md) diff --git a/roadmap/implementers-guide/src/glossary.md b/roadmap/implementers-guide/src/glossary.md index d379c2813b59..e3c34458184b 100644 --- a/roadmap/implementers-guide/src/glossary.md +++ b/roadmap/implementers-guide/src/glossary.md @@ -24,11 +24,12 @@ exactly one downward message queue. - **Parablock:** A block in a parachain. - **Parachain:** A constituent chain secured by the Relay Chain's validators. - **Parachain Validators:** A subset of validators assigned during a period of time to back candidates for a specific parachain -- **Parathread:** A parachain which is scheduled on a pay-as-you-go basis. +- **On-demand parachain:** A parachain which is scheduled on a pay-as-you-go basis. +- **Lease holding parachain:** A parachain possessing an active slot lease. The lease holder is assigned a single availability core for the duration of the lease, granting consistent blockspace scheduling at the rate 1 parablock per relay block. - **PDK (Parachain Development Kit):** A toolset that allows one to develop a parachain. Cumulus is a PDK. - **Preimage:** In our context, if `H(X) = Y` where `H` is a hash function and `Y` is the hash, then `X` is the hash preimage. - **Proof-of-Validity (PoV):** A stateless-client proof that a parachain candidate is valid, with respect to some validation function. -- **PVF:** Parachain Validation Function. The validation code that is run by validators on parachains or parathreads. +- **PVF:** Parachain Validation Function. The validation code that is run by validators on parachains. - **PVF Prechecking:** This is the process of initially checking the PVF when it is first added. We attempt preparation of the PVF and make sure it succeeds within a given timeout. - **PVF Preparation:** This is the process of preparing the WASM blob and includes both prevalidation and compilation. As prevalidation is pretty minimal right now, preparation mostly consists of compilation. - **Relay Parent:** A block in the relay chain, referred to in a context where work is being done in the context of the state at this block. diff --git a/roadmap/implementers-guide/src/node/utility/provisioner.md b/roadmap/implementers-guide/src/node/utility/provisioner.md index 4cb930b1a159..a3998cabba6c 100644 --- a/roadmap/implementers-guide/src/node/utility/provisioner.md +++ b/roadmap/implementers-guide/src/node/utility/provisioner.md @@ -179,7 +179,7 @@ The response is a vector of `BackedCandidate`s, sorted in order of their core in - Often referred to simply as "cores", availability cores are an abstraction used for resource management. For the provisioner, availability cores are most relevant in that core states determine which `para_id`s to provision backable candidates for. - For more on availability cores see [Scheduler Module: Availability Cores](../../runtime/scheduler.md#availability-cores) - **Availability Bitfield:** - - Often referred to simply as a "bitfield", an availability bitfield represents the view of parablock candidate availability from a particular validator's perspective. Each bit in the bitfield corresponds to a single [availability core](../runtime-api/availability-cores.md). + - Often referred to simply as a "bitfield", an availability bitfield represents the view of parablock candidate availability from a particular validator's perspective. Each bit in the bitfield corresponds to a single [availability core](../../runtime-api/availability-cores.md). - For more on availability bitfields see [availability](../../types/availability.md) - **Backable vs. Backed:** - Note that we sometimes use "backed" to refer to candidates that are "backable", but not yet backed on chain. diff --git a/roadmap/implementers-guide/src/pvf-prechecking.md b/roadmap/implementers-guide/src/pvf-prechecking.md index 155d32d52898..961f9323308f 100644 --- a/roadmap/implementers-guide/src/pvf-prechecking.md +++ b/roadmap/implementers-guide/src/pvf-prechecking.md @@ -4,7 +4,7 @@ ## Motivation -Parachains' and parathreads' validation function is described by a wasm module that we refer to as a PVF. Since a PVF is a wasm module the typical way of executing it is to compile it to machine code. +Parachains' validation function is described by a wasm module that we refer to as a PVF. Since a PVF is a wasm module the typical way of executing it is to compile it to machine code. Typically an optimizing compiler consists of algorithms that are able to optimize the resulting machine code heavily. However, while those algorithms perform quite well for a typical wasm code produced by standard toolchains (e.g. rustc/LLVM), those algorithms can be abused to consume a lot of resources. Moreover, since those algorithms are rather complex there is a lot of room for a bug that can crash the compiler. @@ -23,8 +23,8 @@ As a result of this issue we need a fairly hard guarantee that the PVFs of regis The problem is solved by having a pre-checking process which is run when a new validation code is included in the chain. A new PVF can be added in two cases: -- A new parachain or parathread is registered. -- An existing parachain or parathread signalled an upgrade of its validation code. +- A new parachain is registered. +- An existing parachain signalled an upgrade of its validation code. Before any of those operations finish, the PVF pre-checking vote is initiated. The PVF pre-checking vote is identified by the PVF code hash that is being voted on. If there is already PVF pre-checking process running, then no new PVF pre-checking vote will be started. Instead, the operation just subscribes to the existing vote. @@ -48,7 +48,7 @@ On the node-side, there is a PVF pre-checking [subsystem][pvf-prechecker-subsyst ## Summary -Parachains' and parathreads' validation function is described by a wasm module that we refer to as a PVF. +Parachains' validation function is described by a wasm module that we refer to as a PVF. In order to make the PVF usable for candidate validation it has to be registered on-chain. diff --git a/roadmap/implementers-guide/src/runtime-api/availability-cores.md b/roadmap/implementers-guide/src/runtime-api/availability-cores.md index b95af2343b36..9402924f0013 100644 --- a/roadmap/implementers-guide/src/runtime-api/availability-cores.md +++ b/roadmap/implementers-guide/src/runtime-api/availability-cores.md @@ -52,8 +52,8 @@ enum CoreState { /// If a particular Collator is required to author this block, that is also present in this /// variant. Scheduled(ScheduledCore), - /// The core is currently free and there is nothing scheduled. This can be the case for parathread - /// cores when there are no parathread blocks queued. Parachain cores will never be left idle. + /// The core is currently free and there is nothing scheduled. This can be the case for on-demand + /// cores when there are no on-demand parachain blocks queued. Leased cores will never be left idle. Free, } ``` diff --git a/roadmap/implementers-guide/src/runtime/README.md b/roadmap/implementers-guide/src/runtime/README.md index f1f9d6c950e2..995b684b1f06 100644 --- a/roadmap/implementers-guide/src/runtime/README.md +++ b/roadmap/implementers-guide/src/runtime/README.md @@ -6,7 +6,7 @@ Due to the (lack of) guarantees provided by a particular blockchain-runtime fram We also expect, although it's beyond the scope of this guide, that these runtime modules will exist alongside various other modules. This has two facets to consider. First, even if the modules that we describe here don't invoke each others' entry points or routines during initialization, we still have to protect against those other modules doing that. Second, some of those modules are expected to provide governance capabilities for the chain. Configuration exposed by parachain-host modules is mostly for the benefit of these governance modules, to allow the operators or community of the chain to tweak parameters. -The runtime's primary roles to manage scheduling and updating of parachains and parathreads, as well as handling misbehavior reports and slashing. This guide doesn't focus on how parachains or parathreads are registered, only that they are. Also, this runtime description assumes that validator sets are selected somehow, but doesn't assume any other details than a periodic _session change_ event. Session changes give information about the incoming validator set and the validator set of the following session. +The runtime's primary role is to manage scheduling and updating of parachains, as well as handling misbehavior reports and slashing. This guide doesn't focus on how parachains are registered, only that they are. Also, this runtime description assumes that validator sets are selected somehow, but doesn't assume any other details than a periodic _session change_ event. Session changes give information about the incoming validator set and the validator set of the following session. The runtime also serves another role, which is to make data available to the Node-side logic via Runtime APIs. These Runtime APIs should be sufficient for the Node-side code to author blocks correctly. @@ -17,9 +17,9 @@ We will split the logic of the runtime up into these modules: * Initializer: manages initialization order of the other modules. * Shared: manages shared storage and configurations for other modules. * Configuration: manages configuration and configuration updates in a non-racy manner. -* Paras: manages chain-head and validation code for parachains and parathreads. -* Scheduler: manages parachain and parathread scheduling as well as validator assignments. -* Inclusion: handles the inclusion and availability of scheduled parachains and parathreads. +* Paras: manages chain-head and validation code for parachains. +* Scheduler: manages parachain scheduling as well as validator assignments. +* Inclusion: handles the inclusion and availability of scheduled parachains. * SessionInfo: manages various session keys of validators and other params stored per session. * Disputes: handles dispute resolution for included, available parablocks. * Slashing: handles slashing logic for concluded disputes. diff --git a/roadmap/implementers-guide/src/runtime/configuration.md b/roadmap/implementers-guide/src/runtime/configuration.md index 96d63faccedd..e3463846b3cb 100644 --- a/roadmap/implementers-guide/src/runtime/configuration.md +++ b/roadmap/implementers-guide/src/runtime/configuration.md @@ -1,4 +1,4 @@ -# Configuration Module +# Configuration Pallet This module is responsible for managing all configuration of the parachain host in-flight. It provides a central point for configuration updates to prevent races between configuration changes and parachain-processing logic. Configuration can only change during the session change routine, and as this module handles the session change notification first it provides an invariant that the configuration does not change throughout the entire session. Both the [scheduler](scheduler.md) and [inclusion](inclusion.md) modules rely on this invariant to ensure proper behavior of the scheduler. diff --git a/roadmap/implementers-guide/src/runtime/disputes.md b/roadmap/implementers-guide/src/runtime/disputes.md index 1d3e3f62dc01..a2558b74f562 100644 --- a/roadmap/implementers-guide/src/runtime/disputes.md +++ b/roadmap/implementers-guide/src/runtime/disputes.md @@ -1,4 +1,4 @@ -# Disputes Module +# Disputes Pallet After a backed candidate is made available, it is included and proceeds into an acceptance period during which validators are randomly selected to do (secondary) approval checks of the parablock. Any reports disputing the validity of the candidate will cause escalation, where even more validators are requested to check the block, and so on, until either the parablock is determined to be invalid or valid. Those on the wrong side of the dispute are slashed and, if the parablock is deemed invalid, the relay chain is rolled back to a point before that block was included. diff --git a/roadmap/implementers-guide/src/runtime/dmp.md b/roadmap/implementers-guide/src/runtime/dmp.md index bade5ad4b8c4..f56df31934ef 100644 --- a/roadmap/implementers-guide/src/runtime/dmp.md +++ b/roadmap/implementers-guide/src/runtime/dmp.md @@ -1,4 +1,4 @@ -# DMP Module +# DMP Pallet A module responsible for Downward Message Processing (DMP). See [Messaging Overview](../messaging.md) for more details. diff --git a/roadmap/implementers-guide/src/runtime/hrmp.md b/roadmap/implementers-guide/src/runtime/hrmp.md index 2b0b4751e30a..927c14cd5969 100644 --- a/roadmap/implementers-guide/src/runtime/hrmp.md +++ b/roadmap/implementers-guide/src/runtime/hrmp.md @@ -1,4 +1,4 @@ -# HRMP Module +# HRMP Pallet A module responsible for Horizontally Relay-routed Message Passing (HRMP). See [Messaging Overview](../messaging.md) for more details. diff --git a/roadmap/implementers-guide/src/runtime/inclusion.md b/roadmap/implementers-guide/src/runtime/inclusion.md index 8b8ea944d7fd..3fe7711ae2d0 100644 --- a/roadmap/implementers-guide/src/runtime/inclusion.md +++ b/roadmap/implementers-guide/src/runtime/inclusion.md @@ -1,6 +1,6 @@ -# Inclusion Module +# Inclusion Pallet -The inclusion module is responsible for inclusion and availability of scheduled parachains and parathreads. It also manages the UMP dispatch queue of each parachain/thread. +The inclusion module is responsible for inclusion and availability of scheduled parachains. It also manages the UMP dispatch queue of each parachain. ## Storage @@ -61,9 +61,9 @@ No initialization routine runs for this module. However, the initialization of t All failed checks should lead to an unrecoverable error making the block invalid. * `process_bitfields(expected_bits, Bitfields, core_lookup: Fn(CoreIndex) -> Option)`: - 1. call `sanitize_bitfields` and use the sanitized `signed_bitfields` from now on. - 1. call `sanitize_backed_candidates` and use the sanitized `backed_candidates` from now on. - 1. apply each bit of bitfield to the corresponding pending candidate. looking up parathread cores using the `core_lookup`. Disregard bitfields that have a `1` bit for any free cores. + 1. Call `sanitize_bitfields` and use the sanitized `signed_bitfields` from now on. + 1. Call `sanitize_backed_candidates` and use the sanitized `backed_candidates` from now on. + 1. Apply each bit of bitfield to the corresponding pending candidate, looking up on-demand parachain cores using the `core_lookup`. Disregard bitfields that have a `1` bit for any free cores. 1. For each applied bit of each availability-bitfield, set the bit for the validator in the `CandidatePendingAvailability`'s `availability_votes` bitfield. Track all candidates that now have >2/3 of bits set in their `availability_votes`. These candidates are now available and can be enacted. 1. For all now-available candidates, invoke the `enact_candidate` routine with the candidate and relay-parent number. 1. Return a list of `(CoreIndex, CandidateHash)` from freed cores consisting of the cores where candidates have become available. diff --git a/roadmap/implementers-guide/src/runtime/initializer.md b/roadmap/implementers-guide/src/runtime/initializer.md index ffeacd5cb357..19dfcbde50a9 100644 --- a/roadmap/implementers-guide/src/runtime/initializer.md +++ b/roadmap/implementers-guide/src/runtime/initializer.md @@ -1,4 +1,4 @@ -# Initializer Module +# Initializer Pallet This module is responsible for initializing the other modules in a deterministic order. It also has one other purpose as described in the overview of the runtime: accepting and forwarding session change notifications. diff --git a/roadmap/implementers-guide/src/runtime/paras.md b/roadmap/implementers-guide/src/runtime/paras.md index a89cca6b658e..71e5fcbaea21 100644 --- a/roadmap/implementers-guide/src/runtime/paras.md +++ b/roadmap/implementers-guide/src/runtime/paras.md @@ -1,7 +1,7 @@ -# Paras Module +# Paras Pallet -The Paras module is responsible for storing information on parachains and parathreads. Registered -parachains and parathreads cannot change except at session boundaries and after at least a full +The Paras module is responsible for storing information on parachains. Registered +parachains cannot change except at session boundaries and after at least a full session has passed. This is primarily to ensure that the number and meaning of bits required for the availability bitfields does not change except at session boundaries. @@ -54,15 +54,15 @@ struct ParaGenesisArgs { pub enum ParaLifecycle { /// A Para is new and is onboarding. Onboarding, - /// Para is a Parathread. + /// Para is a Parathread (on-demand parachain). Parathread, - /// Para is a Parachain. + /// Para is a lease holding Parachain. Parachain, - /// Para is a Parathread which is upgrading to a Parachain. + /// Para is a Parathread (on-demand Parachain) which is upgrading to a lease holding Parachain. UpgradingParathread, - /// Para is a Parachain which is downgrading to a Parathread. + /// Para is a lease holding Parachain which is downgrading to an on-demand parachain. DowngradingParachain, - /// Parathread is being offboarded. + /// Parathread (on-demand parachain) is being offboarded. OutgoingParathread, /// Parachain is being offboarded. OutgoingParachain, @@ -102,11 +102,11 @@ struct PvfCheckActiveVoteState { #### Para Lifecycle -Because the state changes of parachains and parathreads are delayed, we track the specific state of +Because the state changes of parachains are delayed, we track the specific state of the para using the `ParaLifecycle` enum. ``` -None Parathread Parachain +None Parathread (on-demand parachain) Parachain + + + | | | | (≈2 Session Delay) | | @@ -147,7 +147,7 @@ During the transition period, the para object is still considered in its existin PvfActiveVoteMap: map ValidationCodeHash => PvfCheckActiveVoteState; /// The list of all currently active PVF votes. Auxiliary to `PvfActiveVoteMap`. PvfActiveVoteList: Vec; -/// All parachains. Ordered ascending by ParaId. Parathreads are not included. +/// All parachains. Ordered ascending by ParaId. On-demand parachains are not included. Parachains: Vec, /// The current lifecycle state of all known Para Ids. ParaLifecycle: map ParaId => Option, @@ -229,10 +229,10 @@ CodeByHash: map ValidationCodeHash => Option 1. Apply all incoming paras by initializing the `Heads` and `CurrentCode` using the genesis parameters as well as `MostRecentContext` to `0`. 1. Amend the `Parachains` list and `ParaLifecycle` to reflect changes in registered parachains. - 1. Amend the `ParaLifecycle` set to reflect changes in registered parathreads. - 1. Upgrade all parathreads that should become parachains, updating the `Parachains` list and + 1. Amend the `ParaLifecycle` set to reflect changes in registered on-demand parachains. + 1. Upgrade all on-demand parachains that should become lease holding parachains, updating the `Parachains` list and `ParaLifecycle`. - 1. Downgrade all parachains that should become parathreads, updating the `Parachains` list and + 1. Downgrade all lease holding parachains that should become on-demand parachains, updating the `Parachains` list and `ParaLifecycle`. 1. (Deferred) Return list of outgoing paras to the initializer for use by other modules. 1. Go over all active PVF pre-checking votes: @@ -254,8 +254,8 @@ CodeByHash: map ValidationCodeHash => Option * `schedule_para_initialize(ParaId, ParaGenesisArgs)`: Schedule a para to be initialized at the next session. Noop if para is already registered in the system with some `ParaLifecycle`. * `schedule_para_cleanup(ParaId)`: Schedule a para to be cleaned up after the next full session. -* `schedule_parathread_upgrade(ParaId)`: Schedule a parathread to be upgraded to a parachain. -* `schedule_parachain_downgrade(ParaId)`: Schedule a parachain to be downgraded to a parathread. +* `schedule_parathread_upgrade(ParaId)`: Schedule a parathread (on-demand parachain) to be upgraded to a parachain. +* `schedule_parachain_downgrade(ParaId)`: Schedule a parachain to be downgraded from lease holding to on-demand. * `schedule_code_upgrade(ParaId, new_code, relay_parent: BlockNumber, HostConfiguration)`: Schedule a future code upgrade of the given parachain. In case the PVF pre-checking is disabled, or the new code is already present in the storage, the upgrade will be applied after inclusion of a block of the same parachain executed in the context of a relay-chain block with number >= `relay_parent + config.validation_upgrade_delay`. If the upgrade is scheduled `UpgradeRestrictionSignal` is set and it will remain set until `relay_parent + config.validation_upgrade_cooldown`. @@ -263,12 +263,12 @@ In case the PVF pre-checking is enabled, or the new code is not already present * `note_new_head(ParaId, HeadData, BlockNumber)`: note that a para has progressed to a new head, where the new head was executed in the context of a relay-chain block with given number, the latter value is inserted into the `MostRecentContext` mapping. This will apply pending code upgrades based on the block number provided. If an upgrade took place it will clear the `UpgradeGoAheadSignal`. * `lifecycle(ParaId) -> Option`: Return the `ParaLifecycle` of a para. -* `is_parachain(ParaId) -> bool`: Returns true if the para ID references any live parachain, - including those which may be transitioning to a parathread in the future. -* `is_parathread(ParaId) -> bool`: Returns true if the para ID references any live parathread, - including those which may be transitioning to a parachain in the future. -* `is_valid_para(ParaId) -> bool`: Returns true if the para ID references either a live parathread - or live parachain. +* `is_parachain(ParaId) -> bool`: Returns true if the para ID references any live lease holding parachain, + including those which may be transitioning to an on-demand parachain in the future. +* `is_parathread(ParaId) -> bool`: Returns true if the para ID references any live parathread (on-demand parachain), + including those which may be transitioning to a lease holding parachain in the future. +* `is_valid_para(ParaId) -> bool`: Returns true if the para ID references either a live on-demand parachain + or live lease holding parachain. * `can_upgrade_validation_code(ParaId) -> bool`: Returns true if the given para can signal code upgrade right now. * `pvfs_require_prechecking() -> Vec`: Returns the list of PVF validation code hashes that require PVF pre-checking votes. diff --git a/roadmap/implementers-guide/src/runtime/scheduler.md b/roadmap/implementers-guide/src/runtime/scheduler.md index 7383177aa1cb..312ecedcb50f 100644 --- a/roadmap/implementers-guide/src/runtime/scheduler.md +++ b/roadmap/implementers-guide/src/runtime/scheduler.md @@ -1,24 +1,24 @@ -# Scheduler Module +# Scheduler Pallet > TODO: this section is still heavily under construction. key questions about availability cores and validator assignment are still open and the flow of the the section may be contradictory or inconsistent The Scheduler module is responsible for two main tasks: -- Partitioning validators into groups and assigning groups to parachains and parathreads. -- Scheduling parachains and parathreads +- Partitioning validators into groups and assigning groups to parachains. +- Scheduling parachains for each block It aims to achieve these tasks with these goals in mind: - It should be possible to know at least a block ahead-of-time, ideally more, which validators are going to be assigned to which parachains. - Parachains that have a candidate pending availability in this fork of the chain should not be assigned. - Validator assignments should not be gameable. Malicious cartels should not be able to manipulate the scheduler to assign themselves as desired. -- High or close to optimal throughput of parachains and parathreads. Work among validator groups should be balanced. +- High or close to optimal throughput of parachains. Work among validator groups should be balanced. ## Availability Cores -The Scheduler manages resource allocation using the concept of "Availability Cores". There will be one availability core for each parachain, and a fixed number of cores used for multiplexing parathreads. Validators will be partitioned into groups, with the same number of groups as availability cores. Validator groups will be assigned to different availability cores over time. +The Scheduler manages resource allocation using the concept of "Availability Cores". There will be one availability core for each lease holding parachain, and a fixed number of cores used for multiplexing on-demand parachains. Validators will be partitioned into groups, with the same number of groups as availability cores. Validator groups will be assigned to different availability cores over time. -An availability core can exist in either one of two states at the beginning or end of a block: free or occupied. A free availability core can have a parachain or parathread assigned to it for the potential to have a backed candidate included. After backing, the core enters the occupied state as the backed candidate is pending availability. There is an important distinction: a core is not considered occupied until it is in charge of a block pending availability, although the implementation may treat scheduled cores the same as occupied ones for brevity. A core exits the occupied state when the candidate is no longer pending availability - either on timeout or on availability. A core starting in the occupied state can move to the free state and back to occupied all within a single block, as availability bitfields are processed before backed candidates. At the end of the block, there is a possible timeout on availability which can move the core back to the free state if occupied. +An availability core can exist in either one of two states at the beginning or end of a block: free or occupied. A free availability core can have a lease holding or on-demand parachain assigned to it for the potential to have a backed candidate included. After backing, the core enters the occupied state as the backed candidate is pending availability. There is an important distinction: a core is not considered occupied until it is in charge of a block pending availability, although the implementation may treat scheduled cores the same as occupied ones for brevity. A core exits the occupied state when the candidate is no longer pending availability - either on timeout or on availability. A core starting in the occupied state can move to the free state and back to occupied all within a single block, as availability bitfields are processed before backed candidates. At the end of the block, there is a possible timeout on availability which can move the core back to the free state if occupied. Cores are treated as an ordered list and are typically referred to by their index in that list. @@ -82,54 +82,57 @@ digraph { ## Validator Groups -Validator group assignments do not need to change very quickly. The security benefits of fast rotation are redundant with the challenge mechanism in the [Approval process](../protocol-approval.md). Because of this, we only divide validators into groups at the beginning of the session and do not shuffle membership during the session. However, we do take steps to ensure that no particular validator group has dominance over a single parachain or parathread-multiplexer for an entire session to provide better guarantees of live-ness. +Validator group assignments do not need to change very quickly. The security benefits of fast rotation are redundant with the challenge mechanism in the [Approval process](../protocol-approval.md). Because of this, we only divide validators into groups at the beginning of the session and do not shuffle membership during the session. However, we do take steps to ensure that no particular validator group has dominance over a single lease holding parachain or on-demand parachain-multiplexer for an entire session to provide better guarantees of live-ness. Validator groups rotate across availability cores in a round-robin fashion, with rotation occurring at fixed intervals. The i'th group will be assigned to the `(i+k)%n`'th core at any point in time, where `k` is the number of rotations that have occurred in the session, and `n` is the number of cores. This makes upcoming rotations within the same session predictable. -When a rotation occurs, validator groups are still responsible for distributing availability chunks for any previous cores that are still occupied and pending availability. In practice, rotation and availability-timeout frequencies should be set so this will only be the core they have just been rotated from. It is possible that a validator group is rotated onto a core which is currently occupied. In this case, the validator group will have nothing to do until the previously-assigned group finishes their availability work and frees the core or the availability process times out. Depending on if the core is for a parachain or parathread, a different timeout `t` from the [`HostConfiguration`](../types/runtime.md#host-configuration) will apply. Availability timeouts should only be triggered in the first `t-1` blocks after the beginning of a rotation. +When a rotation occurs, validator groups are still responsible for distributing availability chunks for any previous cores that are still occupied and pending availability. In practice, rotation and availability-timeout frequencies should be set so this will only be the core they have just been rotated from. It is possible that a validator group is rotated onto a core which is currently occupied. In this case, the validator group will have nothing to do until the previously-assigned group finishes their availability work and frees the core or the availability process times out. Depending on if the core is for a lease holding parachain or on-demand parachain, a different timeout `t` from the [`HostConfiguration`](../types/runtime.md#host-configuration) will apply. Availability timeouts should only be triggered in the first `t-1` blocks after the beginning of a rotation. ## Claims -Parathreads operate on a system of claims. Collators participate in auctions to stake a claim on authoring the next block of a parathread, although the auction mechanism is beyond the scope of the scheduler. The scheduler guarantees that they'll be given at least a certain number of attempts to author a candidate that is backed. Attempts that fail during the availability phase are not counted, since ensuring availability at that stage is the responsibility of the backing validators, not of the collator. When a claim is accepted, it is placed into a queue of claims, and each claim is assigned to a particular parathread-multiplexing core in advance. Given that the current assignments of validator groups to cores are known, and the upcoming assignments are predictable, it is possible for parathread collators to know who they should be talking to now and how they should begin establishing connections with as a fallback. +On-demand parachains operate on a system of claims. Collators purchase claims on authoring the next block of an on-demand parachain, although the purchase mechanism is beyond the scope of the scheduler. The scheduler guarantees that they'll be given at least a certain number of attempts to author a candidate that is backed. Attempts that fail during the availability phase are not counted, since ensuring availability at that stage is the responsibility of the backing validators, not of the collator. When a claim is accepted, it is placed into a queue of claims, and each claim is assigned to a particular on-demand parachain-multiplexing core in advance. Given that the current assignments of validator groups to cores are known, and the upcoming assignments are predictable, it is possible for on-demand parachain collators to know who they should be talking to now and how they should begin establishing connections with as a fallback. -With this information, the Node-side can be aware of which parathreads have a good chance of being includable within the relay-chain block and can focus any additional resources on backing candidates from those parathreads. Furthermore, Node-side code is aware of which validator group will be responsible for that thread. If the necessary conditions are reached for core reassignment, those candidates can be backed within the same block as the core being freed. +With this information, the Node-side can be aware of which on-demand parachains have a good chance of being includable within the relay-chain block and can focus any additional resources on backing candidates from those on-demand parachains. Furthermore, Node-side code is aware of which validator group will be responsible for that thread. If the necessary conditions are reached for core reassignment, those candidates can be backed within the same block as the core being freed. -Parathread claims, when scheduled onto a free core, may not result in a block pending availability. This may be due to collator error, networking timeout, or censorship by the validator group. In this case, the claims should be retried a certain number of times to give the collator a fair shot. +On-demand claims, when scheduled onto a free core, may not result in a block pending availability. This may be due to collator error, networking timeout, or censorship by the validator group. In this case, the claims should be retried a certain number of times to give the collator a fair shot. ## Storage Utility structs: ```rust -// A claim on authoring the next block for a given parathread. +// A claim on authoring the next block for a given parathread (on-demand parachain). struct ParathreadClaim(ParaId, CollatorId); -// An entry tracking a claim to ensure it does not pass the maximum number of retries. +// An entry tracking a parathread (on-demand parachain) claim to ensure it does not +// pass the maximum number of retries. struct ParathreadEntry { claim: ParathreadClaim, retries: u32, } -// A queued parathread entry, pre-assigned to a core. +// A queued parathread (on-demand parachain) entry, pre-assigned to a core. struct QueuedParathread { claim: ParathreadEntry, - /// offset within the set of para-threads ranged `0..config.parathread_cores`. + /// offset within the set of parathreads (on-demand parachains) ranged `0..config.parathread_cores`. core_offset: u32, } struct ParathreadQueue { queue: Vec, - /// offset within the set of para-threads ranged `0..config.parathread_cores`. + /// offset within the set of parathreads (on-demand parachains) ranged `0..config.parathread_cores`. next_core_offset: u32, } enum CoreOccupied { + // On-demand parachain Parathread(ParathreadEntry), // claim & retries Parachain, } enum AssignmentKind { Parachain, + // On-demand parachain Parathread(CollatorId, u32), } @@ -150,13 +153,13 @@ Storage layout: ```rust /// All the validator groups. One for each core. Indices are into the `ActiveValidators` storage. ValidatorGroups: Vec>; -/// A queue of upcoming claims and which core they should be mapped onto. +/// A queue of upcoming parathread (on-demand parachain) claims and which core they should be mapped onto. ParathreadQueue: ParathreadQueue; /// One entry for each availability core. Entries are `None` if the core is not currently occupied. -/// The i'th parachain belongs to the i'th core, with the remaining cores all being -/// parathread-multiplexers. +/// The i'th parachain lease belongs to the i'th core, with the remaining cores all being +/// on-demand parachain-multiplexers. AvailabilityCores: Vec>; -/// An index used to ensure that only one claim on a parathread exists in the queue or is +/// An index used to ensure that only one claim on a parathread (on-demand parachain) exists in the queue or is /// currently being handled by an occupied core. ParathreadClaimIndex: Vec; /// The block number where the session start occurred. Used to track how many group rotations have occurred. @@ -186,11 +189,11 @@ Actions: - Note that the total number of validators `V` in AV may not be evenly divided by `n_cores`. - The groups are selected by partitioning AV. The first `V % N` groups will have `(V / n_cores) + 1` members, while the remaining groups will have `(V / N)` members each. - Instead of using the indices within AV, which point to the broader set, indices _into_ AV should be used. This implies that groups should have simply ascending validator indices. -1. Prune the parathread queue to remove all retries beyond `configuration.parathread_retries`. - - Also prune all parathread claims corresponding to de-registered parathreads. - - all pruned claims should have their entry removed from the parathread index. - - assign all non-pruned claims to new cores if the number of parathread cores has changed between the `new_config` and `old_config` of the `SessionChangeNotification`. - - Assign claims in equal balance across all cores if rebalancing, and set the `next_core` of the `ParathreadQueue` by incrementing the relative index of the last assigned core and taking it modulo the number of parathread cores. +1. Prune the parathread (on-demand parachain) queue to remove all retries beyond `configuration.parathread_retries`. + - Also prune all on-demand claims corresponding to de-registered parachains. + - all pruned claims should have their entry removed from the parathread (on-demand parachain) index. + - assign all non-pruned claims to new cores if the number of on-demand parachain cores has changed between the `new_config` and `old_config` of the `SessionChangeNotification`. + - Assign claims in equal balance across all cores if rebalancing, and set the `next_core` of the `ParathreadQueue` (on-demand queue) by incrementing the relative index of the last assigned core and taking it modulo the number of on-demand cores. ## Initialization @@ -202,17 +205,17 @@ No finalization routine runs for this module. ## Routines -- `add_parathread_claim(ParathreadClaim)`: Add a parathread claim to the queue. - - Fails if any parathread claim on the same parathread is currently indexed. +- `add_parathread_claim(ParathreadClaim)`: Add a parathread (on-demand parachain) claim to the queue. + - Fails if any on-demand claim on the same parachain is currently indexed. - Fails if the queue length is >= `config.scheduling_lookahead * config.parathread_cores`. - - The core used for the parathread claim is the `next_core` field of the `ParathreadQueue` and adding `Paras::parachains().len()` to it. + - The core used for the on-demand claim is the `next_core` field of the `ParathreadQueue` (on-demand queue) and adding `Paras::parachains().len()` to it. - `next_core` is then updated by adding 1 and taking it modulo `config.parathread_cores`. - The claim is then added to the claim index. - `free_cores(Vec<(CoreIndex, FreedReason)>)`: indicate previosuly-occupied cores which are to be considered returned and why they are being returned. - - All freed parachain cores should be assigned to their respective parachain - - All freed parathread cores whose reason for freeing was `FreedReason::Concluded` should have the claim removed from the claim index. - - All freed parathread cores whose reason for freeing was `FreedReason::TimedOut` should have the claim added to the parathread queue again without retries incremented - - All freed parathread cores should take the next parathread entry from the queue. + - All freed lease holding parachain cores should be assigned to their respective parachain + - All freed on-demand parachain cores whose reason for freeing was `FreedReason::Concluded` should have the claim removed from the claim index. + - All freed on-demand cores whose reason for freeing was `FreedReason::TimedOut` should have the claim added to the parathread queue (on-demand queue) again without retries incremented + - All freed on-demand cores should take the next on-demand parachain entry from the queue. - `schedule(Vec<(CoreIndex, FreedReason)>, now: BlockNumber)`: schedule new core assignments, with a parameter indicating previously-occupied cores which are to be considered returned and why they are being returned. - Invoke `free_cores(freed_cores)` - The i'th validator group will be assigned to the `(i+k)%n`'th core at any point in time, where `k` is the number of rotations that have occurred in the session, and `n` is the total number of cores. This makes upcoming rotations within the same session predictable. Rotations are based off of `now`. @@ -224,9 +227,9 @@ No finalization routine runs for this module. - Since both the availability cores and the newly-occupied cores lists are sorted ascending, this method can be implemented efficiently. - `core_para(CoreIndex) -> ParaId`: return the currently-scheduled or occupied ParaId for the given core. - `group_validators(GroupIndex) -> Option>`: return all validators in a given group, if the group index is valid for this session. -- `availability_timeout_predicate() -> Option bool>`: returns an optional predicate that should be used for timing out occupied cores. if `None`, no timing-out should be done. The predicate accepts the index of the core, and the block number since which it has been occupied. The predicate should be implemented based on the time since the last validator group rotation, and the respective parachain and parathread timeouts, i.e. only within `max(config.chain_availability_period, config.thread_availability_period)` of the last rotation would this return `Some`. +- `availability_timeout_predicate() -> Option bool>`: returns an optional predicate that should be used for timing out occupied cores. if `None`, no timing-out should be done. The predicate accepts the index of the core, and the block number since which it has been occupied. The predicate should be implemented based on the time since the last validator group rotation, and the respective parachain timeouts, i.e. only within `max(config.chain_availability_period, config.thread_availability_period)` of the last rotation would this return `Some`. - `group_rotation_info(now: BlockNumber) -> GroupRotationInfo`: Returns a helper for determining group rotation. -- `next_up_on_available(CoreIndex) -> Option`: Return the next thing that will be scheduled on this core assuming it is currently occupied and the candidate occupying it became available. Returns in `ScheduledCore` format (todo: link to Runtime APIs page; linkcheck doesn't allow this right now). For parachains, this is always the ID of the parachain and no specified collator. For parathreads, this is based on the next item in the `ParathreadQueue` assigned to that core, and is `None` if there isn't one. -- `next_up_on_time_out(CoreIndex) -> Option`: Return the next thing that will be scheduled on this core assuming it is currently occupied and the candidate occupying it timed out. Returns in `ScheduledCore` format (todo: link to Runtime APIs page; linkcheck doesn't allow this right now). For parachains, this is always the ID of the parachain and no specified collator. For parathreads, this is based on the next item in the `ParathreadQueue` assigned to that core, or if there isn't one, the claim that is currently occupying the core. Otherwise `None`. +- `next_up_on_available(CoreIndex) -> Option`: Return the next thing that will be scheduled on this core assuming it is currently occupied and the candidate occupying it became available. Returns in `ScheduledCore` format (todo: link to Runtime APIs page; linkcheck doesn't allow this right now). For lease holding parachains, this is always the ID of the parachain and no specified collator. For on-demand parachains, this is based on the next item in the `ParathreadQueue` (on-demand queue) assigned to that core, and is `None` if there isn't one. +- `next_up_on_time_out(CoreIndex) -> Option`: Return the next thing that will be scheduled on this core assuming it is currently occupied and the candidate occupying it timed out. Returns in `ScheduledCore` format (todo: link to Runtime APIs page; linkcheck doesn't allow this right now). For parachains, this is always the ID of the parachain and no specified collator. For on-demand parachains, this is based on the next item in the `ParathreadQueue` (on-demand queue) assigned to that core, or if there isn't one, the claim that is currently occupying the core. Otherwise `None`. - `clear()`: - - Free all scheduled cores and return parathread claims to queue, with retries incremented. Skip parathreads which no longer exist under paras. + - Free all scheduled cores and return on-demand claims to queue, with retries incremented. Skip on-demand parachains which no longer exist under paras. diff --git a/roadmap/implementers-guide/src/runtime/shared.md b/roadmap/implementers-guide/src/runtime/shared.md index 58845e19a0dc..0f173134e2a2 100644 --- a/roadmap/implementers-guide/src/runtime/shared.md +++ b/roadmap/implementers-guide/src/runtime/shared.md @@ -1,4 +1,4 @@ -# Shared Module +# Shared Pallet This module is responsible for managing shared storage and configuration for other modules. diff --git a/roadmap/implementers-guide/src/types/candidate.md b/roadmap/implementers-guide/src/types/candidate.md index 729c72180ee5..a37f98054c5e 100644 --- a/roadmap/implementers-guide/src/types/candidate.md +++ b/roadmap/implementers-guide/src/types/candidate.md @@ -1,7 +1,7 @@ # Candidate Types Para candidates are some of the most common types, both within the runtime and on the Node-side. -Candidates are the fundamental datatype for advancing parachains and parathreads, encapsulating the collator's signature, the context of the parablock, the commitments to the output, and a commitment to the data which proves it valid. +Candidates are the fundamental datatype for advancing parachains, encapsulating the collator's signature, the context of the parablock, the commitments to the output, and a commitment to the data which proves it valid. In a way, this entire guide is about these candidates: how they are scheduled, constructed, backed, included, and challenged. @@ -142,7 +142,7 @@ struct PersistedValidationData { ## `HeadData` -Head data is a type-safe abstraction around bytes (`Vec`) for the purposes of representing heads of parachains or parathreads. +Head data is a type-safe abstraction around bytes (`Vec`) for the purposes of representing heads of parachains. ```rust struct HeadData(Vec); @@ -150,7 +150,7 @@ struct HeadData(Vec); ## Candidate Commitments -The execution and validation of parachain or parathread candidates produces a number of values which either must be committed to on the relay chain or committed to the state of the relay chain. +The execution and validation of parachain candidates produces a number of values which either must be committed to blocks on the relay chain or committed to the state of the relay chain. ```rust /// Commitments made in a `CandidateReceipt`. Many of these are outputs of validation. diff --git a/roadmap/implementers-guide/src/types/runtime.md b/roadmap/implementers-guide/src/types/runtime.md index 55c0a571b6c8..79da899bd35e 100644 --- a/roadmap/implementers-guide/src/types/runtime.md +++ b/roadmap/implementers-guide/src/types/runtime.md @@ -19,9 +19,9 @@ struct HostConfiguration { pub max_code_size: u32, /// The maximum head-data size, in bytes. pub max_head_data_size: u32, - /// The amount of availability cores to dedicate to parathreads. + /// The amount of availability cores to dedicate to parathreads (on-demand parachains). pub parathread_cores: u32, - /// The number of retries that a parathread author has to submit their block. + /// The number of retries that a parathread (on-demand parachain) author has to submit their block. pub parathread_retries: u32, /// How often parachain groups should be rotated across parachains. pub group_rotation_frequency: BlockNumber, @@ -29,10 +29,10 @@ struct HostConfiguration { /// after inclusion that validators have to make the block available and signal its availability to /// the chain. Must be at least 1. pub chain_availability_period: BlockNumber, - /// The availability period, in blocks, for parathreads. Same as the `chain_availability_period`, + /// The availability period, in blocks, for parathreads (on-demand parachains). Same as the `chain_availability_period`, /// but a differing timeout due to differing requirements. Must be at least 1. pub thread_availability_period: BlockNumber, - /// The amount of blocks ahead to schedule parathreads. + /// The amount of blocks ahead to schedule on-demand parachains. pub scheduling_lookahead: u32, /// The maximum number of validators to have per core. `None` means no maximum. pub max_validators_per_core: Option, @@ -88,7 +88,7 @@ struct HostConfiguration { pub hrmp_channel_max_total_size: u32, /// The maximum number of inbound HRMP channels a parachain is allowed to accept. pub hrmp_max_parachain_inbound_channels: u32, - /// The maximum number of inbound HRMP channels a parathread is allowed to accept. + /// The maximum number of inbound HRMP channels a parathread (on-demand parachain) is allowed to accept. pub hrmp_max_parathread_inbound_channels: u32, /// The maximum size of a message that could ever be put into an HRMP channel. /// @@ -96,7 +96,7 @@ struct HostConfiguration { pub hrmp_channel_max_message_size: u32, /// The maximum number of outbound HRMP channels a parachain is allowed to open. pub hrmp_max_parachain_outbound_channels: u32, - /// The maximum number of outbound HRMP channels a parathread is allowed to open. + /// The maximum number of outbound HRMP channels a parathread (on-demand parachain) is allowed to open. pub hrmp_max_parathread_outbound_channels: u32, /// The maximum number of outbound HRMP messages can be sent by a candidate. /// diff --git a/roadmap/parachains.md b/roadmap/parachains.md index 89e8fdaf3892..9d6c014a1c7c 100644 --- a/roadmap/parachains.md +++ b/roadmap/parachains.md @@ -41,13 +41,13 @@ Category: Runtime Auctioning and registration of parachains. This is already implemented and follows the [Parachain Allocation — Research at W3F](https://research.web3.foundation/en/latest/polkadot/Parachain-Allocation.html) document. -#### *Parathread Auctions* +#### *On-demand Blockspace Purchase* Category: Runtime -Parathreads are pay-as-you-go parachains. This consists of an on-chain mechanism for resolving an auction by collators and ensuring that they author a block. +The blockspace purchasing system for on-demand parachains consists of an on-chain mechanism for resolving block space purchases by collators and ensuring that they author a block. -The node-side portion of parathreads is for collators to actually cast bids and to be configured for which conditions to cast bids under. +The node-side portion of on-demand parachains is for collators to actually purchase blockspace and to configure the conditions in which purchases are made. #### *Validator Assignment* @@ -76,11 +76,11 @@ Category: Networking A black-box networking component for validators or fishermen on a parachain to obtain the PoV block referenced by hash in an attestation, for the purpose of validating. When fetching "current" PoV blocks (close to the head of the chain, or relating to the block currently being built), this should be fast. When fetching "old" PoV blocks, it should be possible and fall back on recovering from the availability erasure-coding. -#### *Parathread Auction Voting* +#### *On-demand Blockspace Purchase* Category: Node, Networking -How and when collators are configured to cast votes in parathread auctions. +How and when collators are configured to purchase on-demand blockspace. #### *Collation Loop* @@ -146,7 +146,7 @@ We will need a network where collators of paras can discover and fetch the relev Category: Runtime -Runtime logic for paras to open and close channels by putting down a deposit. The amount of channels a parathread can open will be limited. Channels that are pending close should remain open until the watermark of the recipient has reached the block height of the close request. +Runtime logic for paras to open and close channels by putting down a deposit. The amount of channels an on-demand parachain can open will be limited. Channels that are pending close should remain open until the watermark of the recipient has reached the block height of the close request. --- ### Fishing/Slashing @@ -197,7 +197,7 @@ The very first phase - this is parachains without slashing (full security) or cr ### Assignment: - Auctions - - Parathread Auctions + - On-demand Blockspace purchase - Validator Assignment ### Agreement: diff --git a/runtime/common/src/assigned_slots.rs b/runtime/common/src/assigned_slots.rs index f2da950d0b16..053f22e5415a 100644 --- a/runtime/common/src/assigned_slots.rs +++ b/runtime/common/src/assigned_slots.rs @@ -160,13 +160,15 @@ pub mod pallet { #[pallet::error] pub enum Error { - /// The specified parachain or parathread is not registered. + /// The specified parachain is not registered. ParaDoesntExist, - /// Not a parathread. + /// Not a parathread (on-demand parachain). NotParathread, - /// Cannot upgrade parathread. + /// Cannot upgrade on-demand parachain to lease holding + /// parachain. CannotUpgrade, - /// Cannot downgrade parachain. + /// Cannot downgrade lease holding parachain to + /// on-demand. CannotDowngrade, /// Permanent or Temporary slot already assigned. SlotAlreadyAssigned, @@ -371,7 +373,7 @@ pub mod pallet { } } - // Force downgrade to parathread (if needed) before end of lease period + // Force downgrade to on-demand parachain (if needed) before end of lease period if is_parachain { if let Err(err) = runtime_parachains::schedule_parachain_downgrade::(id) { // Treat failed downgrade as warning .. slot lease has been cleared, @@ -507,7 +509,7 @@ impl Pallet { TemporarySlots::::contains_key(id) } - /// Returns whether a para is currently a parachain. + /// Returns whether a para is currently a lease holding parachain. fn is_parachain(id: ParaId) -> bool { T::Registrar::is_parachain(id) } @@ -897,7 +899,7 @@ mod tests { ParaId::from(1_u32), )); - // Para is a parachain for PermanentSlotLeasePeriodLength * LeasePeriod blocks + // Para is a lease holding parachain for PermanentSlotLeasePeriodLength * LeasePeriod blocks while block < 9 { println!("block #{}", block); @@ -913,7 +915,7 @@ mod tests { run_to_block(block); } - // Para lease ended, downgraded back to parathread + // Para lease ended, downgraded back to parathread (on-demand parachain) assert_eq!(TestRegistrar::::is_parathread(ParaId::from(1_u32)), true); assert_eq!(Slots::already_leased(ParaId::from(1_u32), 0, 5), false); }); @@ -1080,7 +1082,7 @@ mod tests { assert_eq!(AssignedSlots::active_temporary_slot_count(), 1); // Block 1-5 - // Para is a parachain for TemporarySlotLeasePeriodLength * LeasePeriod blocks + // Para is a lease holding parachain for TemporarySlotLeasePeriodLength * LeasePeriod blocks while block < 6 { println!("block #{}", block); println!("lease period #{}", AssignedSlots::current_lease_period_index()); @@ -1112,7 +1114,7 @@ mod tests { println!("lease period #{}", AssignedSlots::current_lease_period_index()); println!("lease {:?}", Slots::lease(ParaId::from(1_u32))); - // Para lease ended, downgraded back to parathread + // Para lease ended, downgraded back to on-demand parachain assert_eq!(TestRegistrar::::is_parathread(ParaId::from(1_u32)), true); assert_eq!(Slots::already_leased(ParaId::from(1_u32), 0, 3), false); assert_eq!(AssignedSlots::active_temporary_slot_count(), 0); diff --git a/runtime/common/src/integration_tests.rs b/runtime/common/src/integration_tests.rs index cf879e6bb182..6707be94bd6e 100644 --- a/runtime/common/src/integration_tests.rs +++ b/runtime/common/src/integration_tests.rs @@ -382,7 +382,7 @@ fn basic_end_to_end_works() { // User 1 and 2 will own parachains Balances::make_free_balance_be(&account_id(1), 1_000_000_000); Balances::make_free_balance_be(&account_id(2), 1_000_000_000); - // First register 2 parathreads + // First register 2 on-demand parachains let genesis_head = Registrar::worst_head_data(); let validation_code = Registrar::worst_validation_code(); assert_ok!(Registrar::reserve(signed(1))); @@ -414,7 +414,7 @@ fn basic_end_to_end_works() { lease_period_index_start )); - // 2 sessions later they are parathreads + // 2 sessions later they are parathreads (on-demand parachains) run_to_session(START_SESSION_INDEX + 2); assert_eq!(Paras::lifecycle(ParaId::from(para_1)), Some(ParaLifecycle::Parathread)); assert_eq!(Paras::lifecycle(ParaId::from(para_2)), Some(ParaLifecycle::Parathread)); @@ -499,7 +499,7 @@ fn basic_end_to_end_works() { let lease_start_block = start_block + 400 + offset; run_to_block(lease_start_block); - // First slot, Para 1 should be transitioning to Parachain + // First slot, Para 1 should be transitioning to lease holding Parachain assert_eq!( Paras::lifecycle(ParaId::from(para_1)), Some(ParaLifecycle::UpgradingParathread) @@ -815,7 +815,7 @@ fn competing_bids() { #[test] fn basic_swap_works() { - // This test will test a swap between a parachain and parathread works successfully. + // This test will test a swap between a lease holding parachain and on-demand parachain works successfully. new_test_ext().execute_with(|| { assert!(System::block_number().is_one()); /* So events are emitted */ @@ -825,7 +825,7 @@ fn basic_swap_works() { // User 1 and 2 will own paras Balances::make_free_balance_be(&account_id(1), 1_000_000_000); Balances::make_free_balance_be(&account_id(2), 1_000_000_000); - // First register 2 parathreads with different data + // First register 2 on-demand parachains with different data let validation_code = test_validation_code(10); assert_ok!(Registrar::reserve(signed(1))); assert_ok!(Registrar::register( @@ -859,7 +859,7 @@ fn basic_swap_works() { lease_period_index_start )); - // 2 sessions later they are parathreads + // 2 sessions later they are on-demand parachains run_to_session(START_SESSION_INDEX + 2); assert_eq!(Paras::lifecycle(ParaId::from(2000)), Some(ParaLifecycle::Parathread)); assert_eq!(Paras::lifecycle(ParaId::from(2001)), Some(ParaLifecycle::Parathread)); @@ -932,7 +932,7 @@ fn basic_swap_works() { assert_eq!(Paras::lifecycle(ParaId::from(2000)), Some(ParaLifecycle::Parathread)); assert_eq!(Paras::lifecycle(ParaId::from(2001)), Some(ParaLifecycle::Parachain)); - // Deregister parathread + // Deregister on-demand parachain assert_ok!(Registrar::deregister(para_origin(2000).into(), ParaId::from(2000))); // Correct deposit is unreserved assert_eq!(Balances::reserved_balance(&account_id(1)), 100); // crowdloan deposit left over @@ -987,7 +987,7 @@ fn parachain_swap_works() { // User 1 and 2 will own paras Balances::make_free_balance_be(&account_id(1), 1_000_000_000); Balances::make_free_balance_be(&account_id(2), 1_000_000_000); - // First register 2 parathreads with different data + // First register 2 on-demand parachains with different data let validation_code = test_validation_code(10); assert_ok!(Registrar::reserve(signed(1))); assert_ok!(Registrar::register( @@ -1028,7 +1028,7 @@ fn parachain_swap_works() { lease_period_index_start )); - // 2 sessions later they are parathreads + // 2 sessions later they are on-demand parachains run_to_block(starting_block + 20); assert_eq!(Paras::lifecycle(ParaId::from(winner)), Some(ParaLifecycle::Parathread)); @@ -1165,8 +1165,7 @@ fn crowdloan_ending_period_bid() { // User 1 and 2 will own paras Balances::make_free_balance_be(&account_id(1), 1_000_000_000); Balances::make_free_balance_be(&account_id(2), 1_000_000_000); - - // First register 2 parathreads + // First register 2 on-demand parachains let validation_code = test_validation_code(10); assert_ok!(Registrar::reserve(signed(1))); assert_ok!(Registrar::register( @@ -1201,7 +1200,7 @@ fn crowdloan_ending_period_bid() { lease_period_index_start )); - // 2 sessions later they are parathreads + // 2 sessions later they are on-demand parachains run_to_session(START_SESSION_INDEX + 2); assert_eq!(Paras::lifecycle(ParaId::from(2000)), Some(ParaLifecycle::Parathread)); assert_eq!(Paras::lifecycle(ParaId::from(2001)), Some(ParaLifecycle::Parathread)); @@ -1534,7 +1533,7 @@ fn cant_bid_on_existing_lease_periods() { run_to_session(START_SESSION_INDEX); Balances::make_free_balance_be(&account_id(1), 1_000_000_000); - // First register a parathread + // First register an on-demand parachain let validation_code = test_validation_code(10); assert_ok!(Registrar::reserve(signed(1))); assert_ok!(Registrar::register( @@ -1555,7 +1554,7 @@ fn cant_bid_on_existing_lease_periods() { lease_period_index_start )); - // 2 sessions later they are parathreads + // 2 sessions later they are on-demand parachains run_to_session(START_SESSION_INDEX + 2); // Open a crowdloan for Para 1 for slots 0-3 diff --git a/runtime/common/src/mock.rs b/runtime/common/src/mock.rs index 06cc7771dede..f5e4f1f48daa 100644 --- a/runtime/common/src/mock.rs +++ b/runtime/common/src/mock.rs @@ -31,6 +31,7 @@ use std::{cell::RefCell, collections::HashMap}; thread_local! { static OPERATIONS: RefCell> = RefCell::new(Vec::new()); static PARACHAINS: RefCell> = RefCell::new(Vec::new()); + // On-demand parachains static PARATHREADS: RefCell> = RefCell::new(Vec::new()); static LOCKS: RefCell> = RefCell::new(HashMap::new()); static MANAGERS: RefCell>> = RefCell::new(HashMap::new()); @@ -49,6 +50,7 @@ impl Registrar for TestRegistrar { PARACHAINS.with(|x| x.borrow().clone()) } + // Is on-demand parachain fn is_parathread(id: ParaId) -> bool { PARATHREADS.with(|x| x.borrow().binary_search(&id).is_ok()) } @@ -75,7 +77,7 @@ impl Registrar for TestRegistrar { Err(_) => Ok(()), } })?; - // Should not be parathread, then make it. + // Should not be parathread (on-demand parachain), then make it. PARATHREADS.with(|x| { let mut parathreads = x.borrow_mut(); match parathreads.binary_search(&id) { @@ -99,7 +101,7 @@ impl Registrar for TestRegistrar { Err(_) => Ok(()), } })?; - // Remove from parathread. + // Remove from parathreads (on-demand parachains). PARATHREADS.with(|x| { let mut parathreads = x.borrow_mut(); match parathreads.binary_search(&id) { @@ -114,6 +116,8 @@ impl Registrar for TestRegistrar { Ok(()) } + /// If the ParaId corresponds to a parathread (on-demand parachain), + /// then upgrade it to a lease holding parachain fn make_parachain(id: ParaId) -> DispatchResult { PARATHREADS.with(|x| { let mut parathreads = x.borrow_mut(); @@ -144,6 +148,9 @@ impl Registrar for TestRegistrar { }); Ok(()) } + + /// If the ParaId corresponds to a lease holding parachain, then downgrade it to a + /// parathread (on-demand parachain) fn make_parathread(id: ParaId) -> DispatchResult { PARACHAINS.with(|x| { let mut parachains = x.borrow_mut(); diff --git a/runtime/common/src/paras_registrar.rs b/runtime/common/src/paras_registrar.rs index 5ecef73fae1c..8653e6f19123 100644 --- a/runtime/common/src/paras_registrar.rs +++ b/runtime/common/src/paras_registrar.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Pallet to handle parathread/parachain registration and related fund management. +//! Pallet to handle parachain registration and related fund management. //! In essence this is a simple wrapper around `paras`. use frame_support::{ @@ -113,13 +113,13 @@ pub mod pallet { type RuntimeOrigin: From<::RuntimeOrigin> + Into::RuntimeOrigin>>; - /// The system's currency for parathread payment. + /// The system's currency for on-demand parachain payment. type Currency: ReservableCurrency; - /// Runtime hook for when a parachain and parathread swap. + /// Runtime hook for when a lease holding parachain and on-demand parachain swap. type OnSwap: crate::traits::OnSwap; - /// The deposit to be paid to run a parathread. + /// The deposit to be paid to run a on-demand parachain. /// This should include the cost for storing the genesis head and validation code. #[pallet::constant] type ParaDeposit: Get>; @@ -155,13 +155,13 @@ pub mod pallet { HeadDataTooLarge, /// Para is not a Parachain. NotParachain, - /// Para is not a Parathread. + /// Para is not a Parathread (on-demand parachain). NotParathread, /// Cannot deregister para CannotDeregister, - /// Cannot schedule downgrade of parachain to parathread + /// Cannot schedule downgrade of lease holding parachain to on-demand parachain CannotDowngrade, - /// Cannot schedule upgrade of parathread to parachain + /// Cannot schedule upgrade of on-demand parachain to lease holding parachain CannotUpgrade, /// Para is locked from manipulation by the manager. Must use parachain or relay chain governance. ParaLocked, @@ -263,7 +263,7 @@ pub mod pallet { /// Deregister a Para Id, freeing all data and returning any deposit. /// - /// The caller must be Root, the `para` owner, or the `para` itself. The para must be a parathread. + /// The caller must be Root, the `para` owner, or the `para` itself. The para must be an on-demand parachain. #[pallet::call_index(2)] #[pallet::weight(::WeightInfo::deregister())] pub fn deregister(origin: OriginFor, id: ParaId) -> DispatchResult { @@ -271,7 +271,7 @@ pub mod pallet { Self::do_deregister(id) } - /// Swap a parachain with another parachain or parathread. + /// Swap a lease holding parachain with another parachain, either on-demand or lease holding. /// /// The origin must be Root, the `para` owner, or the `para` itself. /// @@ -280,8 +280,8 @@ pub mod pallet { /// /// The `ParaId`s remain mapped to the same head data and code so external code can rely on /// `ParaId` to be a long-term identifier of a notional "parachain". However, their - /// scheduling info (i.e. whether they're a parathread or parachain), auction information - /// and the auction deposit are switched. + /// scheduling info (i.e. whether they're an on-demand parachain or lease holding parachain), + /// auction information and the auction deposit are switched. #[pallet::call_index(3)] #[pallet::weight(::WeightInfo::swap())] pub fn swap(origin: OriginFor, id: ParaId, other: ParaId) -> DispatchResult { @@ -301,7 +301,7 @@ pub mod pallet { if PendingSwap::::get(other) == Some(id) { let other_lifecycle = paras::Pallet::::lifecycle(other).ok_or(Error::::NotRegistered)?; - // identify which is a parachain and which is a parathread + // identify which is a lease holding parachain and which is a parathread (on-demand parachain) if id_lifecycle == ParaLifecycle::Parachain && other_lifecycle == ParaLifecycle::Parathread { @@ -345,7 +345,8 @@ pub mod pallet { /// /// This function will reserve a new Para Id to be owned/managed by the origin account. /// The origin account is able to register head data and validation code using `register` to create - /// a parathread. Using the Slots pallet, a parathread can then be upgraded to get a parachain slot. + /// an on-demand parachain. Using the Slots pallet, an on-demand parachain can then be upgraded to + /// a lease holding parachain. /// /// ## Arguments /// - `origin`: Must be called by a `Signed` origin. Becomes the manager/owner of the new para ID. @@ -417,17 +418,17 @@ impl Registrar for Pallet { Some(Paras::::get(id)?.manager) } - // All parachains. Ordered ascending by ParaId. Parathreads are not included. + // All lease holding parachains. Ordered ascending by ParaId. On-demand parachains are not included. fn parachains() -> Vec { paras::Pallet::::parachains() } - // Return if a para is a parathread + // Return if a para is a parathread (on-demand parachain) fn is_parathread(id: ParaId) -> bool { paras::Pallet::::is_parathread(id) } - // Return if a para is a parachain + // Return if a para is a lease holding parachain fn is_parachain(id: ParaId) -> bool { paras::Pallet::::is_parachain(id) } @@ -460,9 +461,9 @@ impl Registrar for Pallet { Self::do_deregister(id) } - // Upgrade a registered parathread into a parachain. + // Upgrade a registered on-demand parachain into a lease holding parachain. fn make_parachain(id: ParaId) -> DispatchResult { - // Para backend should think this is a parathread... + // Para backend should think this is an on-demand parachain... ensure!( paras::Pallet::::lifecycle(id) == Some(ParaLifecycle::Parathread), Error::::NotParathread @@ -475,7 +476,7 @@ impl Registrar for Pallet { Ok(()) } - // Downgrade a registered para into a parathread. + // Downgrade a registered para into a parathread (on-demand parachain). fn make_parathread(id: ParaId) -> DispatchResult { // Para backend should think this is a parachain... ensure!( @@ -602,7 +603,7 @@ impl Pallet { /// Deregister a Para Id, freeing all data returning any deposit. fn do_deregister(id: ParaId) -> DispatchResult { match paras::Pallet::::lifecycle(id) { - // Para must be a parathread, or not exist at all. + // Para must be a parathread (on-demand parachain), or not exist at all. Some(ParaLifecycle::Parathread) | None => {}, _ => return Err(Error::::NotParathread.into()), } @@ -642,7 +643,7 @@ impl Pallet { Ok((ParaGenesisArgs { genesis_head, validation_code, para_kind }, deposit)) } - /// Swap a parachain and parathread, which involves scheduling an appropriate lifecycle update. + /// Swap a lease holding parachain and parathread (on-demand parachain), which involves scheduling an appropriate lifecycle update. fn do_thread_and_chain_swap(to_downgrade: ParaId, to_upgrade: ParaId) { let res1 = runtime_parachains::schedule_parachain_downgrade::(to_downgrade); debug_assert!(res1.is_ok()); @@ -928,16 +929,16 @@ mod tests { conclude_pvf_checking::(&validation_code, VALIDATORS, START_SESSION_INDEX); run_to_session(START_SESSION_INDEX + 2); - // It is now a parathread. + // It is now a parathread (on-demand parachain). assert!(Parachains::is_parathread(para_id)); assert!(!Parachains::is_parachain(para_id)); - // Some other external process will elevate parathread to parachain + // Some other external process will elevate on-demand to lease holding parachain assert_ok!(Registrar::make_parachain(para_id)); run_to_session(START_SESSION_INDEX + 4); - // It is now a parachain. + // It is now a lease holding parachain. assert!(!Parachains::is_parathread(para_id)); assert!(Parachains::is_parachain(para_id)); - // Turn it back into a parathread + // Turn it back into a parathread (on-demand parachain) assert_ok!(Registrar::make_parathread(para_id)); run_to_session(START_SESSION_INDEX + 6); assert!(Parachains::is_parathread(para_id)); @@ -1325,7 +1326,7 @@ mod tests { run_to_session(START_SESSION_INDEX + 2); - // They are now a parathread. + // They are now parathreads (on-demand parachains). assert!(Parachains::is_parathread(para_1)); assert!(Parachains::is_parathread(para_2)); @@ -1336,7 +1337,8 @@ mod tests { Error::::CannotSwap ); - // Some other external process will elevate one parathread to parachain + // Some other external process will elevate one on-demand + // parachain to a lease holding parachain assert_ok!(Registrar::make_parachain(para_1)); // Cannot swap @@ -1357,7 +1359,7 @@ mod tests { run_to_session(START_SESSION_INDEX + 4); - // It is now a parachain. + // It is now a lease holding parachain. assert!(Parachains::is_parachain(para_1)); assert!(Parachains::is_parathread(para_2)); @@ -1518,6 +1520,7 @@ mod benchmarking { } swap { + // On demand parachain let parathread = register_para::(LOWEST_PUBLIC_ID.into()); let parachain = register_para::((LOWEST_PUBLIC_ID + 1).into()); diff --git a/runtime/common/src/paras_sudo_wrapper.rs b/runtime/common/src/paras_sudo_wrapper.rs index 8944e932e9ef..c1c2973568fa 100644 --- a/runtime/common/src/paras_sudo_wrapper.rs +++ b/runtime/common/src/paras_sudo_wrapper.rs @@ -41,22 +41,22 @@ pub mod pallet { #[pallet::error] pub enum Error { - /// The specified parachain or parathread is not registered. + /// The specified parachain is not registered. ParaDoesntExist, - /// The specified parachain or parathread is already registered. + /// The specified parachain is already registered. ParaAlreadyExists, /// A DMP message couldn't be sent because it exceeds the maximum size allowed for a downward /// message. ExceedsMaxMessageSize, /// Could not schedule para cleanup. CouldntCleanup, - /// Not a parathread. + /// Not a parathread (on-demand parachain). NotParathread, - /// Not a parachain. + /// Not a lease holding parachain. NotParachain, - /// Cannot upgrade parathread. + /// Cannot upgrade on-demand parachain to lease holding parachain. CannotUpgrade, - /// Cannot downgrade parachain. + /// Cannot downgrade lease holding parachain to on-demand. CannotDowngrade, } @@ -89,7 +89,7 @@ pub mod pallet { Ok(()) } - /// Upgrade a parathread to a parachain + /// Upgrade a parathread (on-demand parachain) to a lease holding parachain #[pallet::call_index(2)] #[pallet::weight((1_000, DispatchClass::Operational))] pub fn sudo_schedule_parathread_upgrade( @@ -97,7 +97,7 @@ pub mod pallet { id: ParaId, ) -> DispatchResult { ensure_root(origin)?; - // Para backend should think this is a parathread... + // Para backend should think this is a parathread (on-demand parachain)... ensure!( paras::Pallet::::lifecycle(id) == Some(ParaLifecycle::Parathread), Error::::NotParathread, @@ -107,7 +107,7 @@ pub mod pallet { Ok(()) } - /// Downgrade a parachain to a parathread + /// Downgrade a lease holding parachain to an on-demand parachain #[pallet::call_index(3)] #[pallet::weight((1_000, DispatchClass::Operational))] pub fn sudo_schedule_parachain_downgrade( diff --git a/runtime/common/src/slots/mod.rs b/runtime/common/src/slots/mod.rs index 202062fd7ce9..3eaa4bfec870 100644 --- a/runtime/common/src/slots/mod.rs +++ b/runtime/common/src/slots/mod.rs @@ -14,9 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! Parathread and parachains leasing system. Allows para IDs to be claimed, the code and data to be initialized and -//! parachain slots (i.e. continuous scheduling) to be leased. Also allows for parachains and parathreads to be -//! swapped. +//! Parachains leasing system. Allows para IDs to be claimed, the code and data to be initialized and +//! parachain slots (i.e. continuous scheduling) to be leased. Also allows for lease holding parachains and +//! on-demand parachains to be swapped. //! //! This doesn't handle the mechanics of determining which para ID actually ends up with a parachain lease. This //! must handled by a separately, through the trait interface that this pallet provides or the root dispatchables. @@ -244,7 +244,7 @@ impl Pallet { if lease_periods.len() == 1 { // Just one entry, which corresponds to the now-ended lease period. // - // `para` is now just a parathread. + // `para` is now just an on-demand parachain. // // Unreserve whatever is left. if let Some((who, value)) = &lease_periods[0] { @@ -944,7 +944,7 @@ mod tests { Error::::ParaNotOnboarding ); - // Trying Para 2 again should fail cause they are not currently a parathread + // Trying Para 2 again should fail cause they are not currently an on-demand parachain assert!(Slots::trigger_onboard(RuntimeOrigin::signed(1), 2.into()).is_err()); assert_eq!(TestRegistrar::::operations(), vec![(2.into(), 1, true),]); @@ -1003,6 +1003,7 @@ mod benchmarking { assert_eq!(event, &system_event); } + // Registers a parathread (on-demand parachain) fn register_a_parathread(i: u32) -> (ParaId, T::AccountId) { let para = ParaId::from(i); let leaser: T::AccountId = account("leaser", i, 0); @@ -1051,7 +1052,7 @@ mod benchmarking { }.into()); } - // Worst case scenario, T parathreads onboard, and C parachains offboard. + // Worst case scenario, T on-demand parachains onboard, and C lease holding parachains offboard. manage_lease_period_start { // Assume reasonable maximum of 100 paras at any time let c in 0 .. 100; @@ -1063,14 +1064,14 @@ mod benchmarking { // If there is an offset, we need to be on that block to be able to do lease things. frame_system::Pallet::::set_block_number(T::LeaseOffset::get() + One::one()); - // Make T parathreads + // Make T parathreads (on-demand parachains) let paras_info = (0..t).map(|i| { register_a_parathread::(i) }).collect::>(); T::Registrar::execute_pending_transitions(); - // T parathread are upgrading to parachains + // T on-demand parachains are upgrading to lease holding parachains for (para, leaser) in paras_info { let amount = T::Currency::minimum_balance(); let origin = T::ForceOrigin::try_successful_origin() @@ -1080,7 +1081,7 @@ mod benchmarking { T::Registrar::execute_pending_transitions(); - // C parachains are downgrading to parathreads + // C lease holding parachains are downgrading to on-demand parachains for i in 200 .. 200 + c { let (para, leaser) = register_a_parathread::(i); T::Registrar::make_parachain(para)?; diff --git a/runtime/common/src/traits.rs b/runtime/common/src/traits.rs index f24a5b977968..3fd97c7cb8c7 100644 --- a/runtime/common/src/traits.rs +++ b/runtime/common/src/traits.rs @@ -31,15 +31,16 @@ pub trait Registrar { /// Report the manager (permissioned owner) of a parachain, if there is one. fn manager_of(id: ParaId) -> Option; - /// All parachains. Ordered ascending by `ParaId`. Parathreads are not included. + /// All lease holding parachains. Ordered ascending by `ParaId`. On-demand + /// parachains are not included. fn parachains() -> Vec; - /// Return if a `ParaId` is a Parachain. + /// Return if a `ParaId` is a lease holding Parachain. fn is_parachain(id: ParaId) -> bool { Self::parachains().binary_search(&id).is_ok() } - /// Return if a `ParaId` is a Parathread. + /// Return if a `ParaId` is a Parathread (on-demand parachain). fn is_parathread(id: ParaId) -> bool; /// Return if a `ParaId` is registered in the system. @@ -70,7 +71,7 @@ pub trait Registrar { /// Elevate a para to parachain status. fn make_parachain(id: ParaId) -> DispatchResult; - /// Lower a para back to normal from parachain status. + /// Downgrade lease holding parachain into parathread (on-demand parachain) fn make_parathread(id: ParaId) -> DispatchResult; #[cfg(any(feature = "runtime-benchmarks", test))] @@ -80,7 +81,8 @@ pub trait Registrar { fn worst_validation_code() -> ValidationCode; /// Execute any pending state transitions for paras. - /// For example onboarding to parathread, or parathread to parachain. + /// For example onboarding to on-demand parachain, or upgrading on-demand to + /// lease holding parachain. #[cfg(any(feature = "runtime-benchmarks", test))] fn execute_pending_transitions(); } @@ -250,7 +252,7 @@ pub trait Auctioneer { fn has_won_an_auction(para: ParaId, bidder: &Self::AccountId) -> bool; } -/// Runtime hook for when we swap a parachain and parathread. +/// Runtime hook for when we swap a lease holding parachain and an on-demand parachain. #[impl_trait_for_tuples::impl_for_tuples(30)] pub trait OnSwap { /// Updates any needed state/references to enact a logical swap of two parachains. Identity, diff --git a/runtime/parachains/src/configuration.rs b/runtime/parachains/src/configuration.rs index 05b0ca1fc538..62efc095334d 100644 --- a/runtime/parachains/src/configuration.rs +++ b/runtime/parachains/src/configuration.rs @@ -43,7 +43,7 @@ pub use pallet::*; const LOG_TARGET: &str = "runtime::configuration"; -/// All configuration of the runtime with respect to parachains and parathreads. +/// All configuration of the runtime with respect to parachains. #[derive(Clone, Encode, Decode, PartialEq, sp_core::RuntimeDebug, scale_info::TypeInfo)] #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] pub struct HostConfiguration { @@ -135,7 +135,8 @@ pub struct HostConfiguration { pub max_downward_message_size: u32, /// The maximum number of outbound HRMP channels a parachain is allowed to open. pub hrmp_max_parachain_outbound_channels: u32, - /// The maximum number of outbound HRMP channels a parathread is allowed to open. + /// The maximum number of outbound HRMP channels a parathread (on-demand parachain) is allowed + /// to open. pub hrmp_max_parathread_outbound_channels: u32, /// The deposit that the sender should provide for opening an HRMP channel. pub hrmp_sender_deposit: Balance, @@ -147,7 +148,8 @@ pub struct HostConfiguration { pub hrmp_channel_max_total_size: u32, /// The maximum number of inbound HRMP channels a parachain is allowed to accept. pub hrmp_max_parachain_inbound_channels: u32, - /// The maximum number of inbound HRMP channels a parathread is allowed to accept. + /// The maximum number of inbound HRMP channels a parathread (on-demand parachain) is allowed + /// to accept. pub hrmp_max_parathread_inbound_channels: u32, /// The maximum size of a message that could ever be put into an HRMP channel. /// @@ -163,9 +165,9 @@ pub struct HostConfiguration { /// How long to keep code on-chain, in blocks. This should be sufficiently long that disputes /// have concluded. pub code_retention_period: BlockNumber, - /// The amount of execution cores to dedicate to parathread execution. + /// The amount of execution cores to dedicate to parathread (on-demand parachain) execution. pub parathread_cores: u32, - /// The number of retries that a parathread author has to submit their block. + /// The number of retries that a parathread (on-demand parachain) author has to submit their block. pub parathread_retries: u32, /// How often parachain groups should be rotated across parachains. /// @@ -177,12 +179,12 @@ pub struct HostConfiguration { /// /// Must be at least 1. pub chain_availability_period: BlockNumber, - /// The availability period, in blocks, for parathreads. Same as the `chain_availability_period`, - /// but a differing timeout due to differing requirements. + /// The availability period, in blocks, for parathreads (on-demand parachains). Same as the + /// `chain_availability_period`, but a differing timeout due to differing requirements. /// /// Must be at least 1. pub thread_availability_period: BlockNumber, - /// The amount of blocks ahead to schedule parachains and parathreads. + /// The amount of blocks ahead to schedule parachains. pub scheduling_lookahead: u32, /// The maximum number of validators to have per core. /// @@ -633,7 +635,7 @@ pub mod pallet { }) } - /// Set the number of parathread execution cores. + /// Set the number of parathread (on-demand parachain) execution cores. #[pallet::call_index(6)] #[pallet::weight(( T::WeightInfo::set_config_with_u32(), @@ -646,7 +648,7 @@ pub mod pallet { }) } - /// Set the number of retries for a particular parathread. + /// Set the number of retries for a particular parathread (on-demand parachain). #[pallet::call_index(7)] #[pallet::weight(( T::WeightInfo::set_config_with_u32(), @@ -691,7 +693,7 @@ pub mod pallet { }) } - /// Set the availability period for parathreads. + /// Set the availability period for parathreads (on-demand parachains). #[pallet::call_index(10)] #[pallet::weight(( T::WeightInfo::set_config_with_block_number(), @@ -995,7 +997,7 @@ pub mod pallet { }) } - /// Sets the maximum number of inbound HRMP channels a parathread is allowed to accept. + /// Sets the maximum number of inbound HRMP channels a parathread (on-demand parachain) is allowed to accept. #[pallet::call_index(35)] #[pallet::weight(( T::WeightInfo::set_config_with_u32(), @@ -1040,7 +1042,7 @@ pub mod pallet { }) } - /// Sets the maximum number of outbound HRMP channels a parathread is allowed to open. + /// Sets the maximum number of outbound HRMP channels a parathread (on-demand parachain) is allowed to open. #[pallet::call_index(38)] #[pallet::weight(( T::WeightInfo::set_config_with_u32(), diff --git a/runtime/parachains/src/hrmp.rs b/runtime/parachains/src/hrmp.rs index 993c4a2fcbb2..12ef3a6384e9 100644 --- a/runtime/parachains/src/hrmp.rs +++ b/runtime/parachains/src/hrmp.rs @@ -331,7 +331,7 @@ pub mod pallet { StorageMap<_, Twox64Concat, HrmpChannelId, HrmpOpenChannelRequest>; // NOTE: could become bounded, but we don't have a global maximum for this. - // `HRMP_MAX_INBOUND_CHANNELS_BOUND` are per parachain/parathread, while this storage tracks the + // `HRMP_MAX_INBOUND_CHANNELS_BOUND` are per parachain, while this storage tracks the // global state. #[pallet::storage] pub type HrmpOpenChannelRequestsList = diff --git a/runtime/parachains/src/inclusion/mod.rs b/runtime/parachains/src/inclusion/mod.rs index c77b51f6b6f9..09902dbe39d7 100644 --- a/runtime/parachains/src/inclusion/mod.rs +++ b/runtime/parachains/src/inclusion/mod.rs @@ -14,8 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! The inclusion pallet is responsible for inclusion and availability of scheduled parachains -//! and parathreads. +//! The inclusion pallet is responsible for inclusion and availability of scheduled parachains. //! //! It is responsible for carrying candidates from being backable to being backed, and then from backed //! to included. diff --git a/runtime/parachains/src/lib.rs b/runtime/parachains/src/lib.rs index 43c5c6441ad9..a80898b97480 100644 --- a/runtime/parachains/src/lib.rs +++ b/runtime/parachains/src/lib.rs @@ -76,12 +76,12 @@ pub fn schedule_para_cleanup(id: primitives::Id) -> Result<(), >::schedule_para_cleanup(id).map_err(|_| ()) } -/// Schedule a parathread to be upgraded to a parachain. +/// Schedule a parathread (on-demand parachain) to be upgraded to a lease holding parachain. pub fn schedule_parathread_upgrade(id: ParaId) -> Result<(), ()> { paras::Pallet::::schedule_parathread_upgrade(id).map_err(|_| ()) } -/// Schedule a parachain to be downgraded to a parathread. +/// Schedule a lease holding parachain to be downgraded to an on-demand parachain. pub fn schedule_parachain_downgrade(id: ParaId) -> Result<(), ()> { paras::Pallet::::schedule_parachain_downgrade(id).map_err(|_| ()) } diff --git a/runtime/parachains/src/paras/mod.rs b/runtime/parachains/src/paras/mod.rs index 68ebd15aeb6a..26d94f4a908c 100644 --- a/runtime/parachains/src/paras/mod.rs +++ b/runtime/parachains/src/paras/mod.rs @@ -18,15 +18,15 @@ //! //! # Tracking State of Paras //! -//! The most important responsibility of this module is to track which parachains and parathreads +//! The most important responsibility of this module is to track which parachains //! are active and what their current state is. The current state of a para consists of the current //! head data and the current validation code (AKA Parachain Validation Function (PVF)). //! //! A para is not considered live until it is registered and activated in this pallet. //! -//! The set of parachains and parathreads cannot change except at session boundaries. This is -//! primarily to ensure that the number and meaning of bits required for the availability bitfields -//! does not change except at session boundaries. +//! The set of parachains cannot change except at session boundaries. This is primarily to ensure +//! that the number and meaning of bits required for the availability bitfields does not change +//! except at session boundaries. //! //! # Validation Code Upgrades //! @@ -61,7 +61,8 @@ //! //! # Para Lifecycle Management //! -//! A para can be in one of the two stable states: it is either a parachain or a parathread. +//! A para can be in one of the two stable states: it is either a lease holding parachain or an +//! on-demand parachain. //! //! However, in order to get into one of those two states, it must first be onboarded. Onboarding //! can be only enacted at session boundaries. Onboarding must take at least one full session. @@ -181,17 +182,17 @@ pub struct ParaPastCodeMeta { /// state will be used to determine the state transition to apply to the para. #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug, TypeInfo)] pub enum ParaLifecycle { - /// Para is new and is onboarding as a Parathread or Parachain. + /// Para is new and is onboarding as an on-demand or lease holding Parachain. Onboarding, - /// Para is a Parathread. + /// Para is a Parathread (on-demand parachain). Parathread, - /// Para is a Parachain. + /// Para is a lease holding Parachain. Parachain, - /// Para is a Parathread which is upgrading to a Parachain. + /// Para is a Parathread (on-demand parachain) which is upgrading to a lease holding Parachain. UpgradingParathread, - /// Para is a Parachain which is downgrading to a Parathread. + /// Para is a lease holding Parachain which is downgrading to an on-demand parachain. DowngradingParachain, - /// Parathread is queued to be offboarded. + /// Parathread (on-demand parachain) is queued to be offboarded. OffboardingParathread, /// Parachain is queued to be offboarded. OffboardingParachain, @@ -199,14 +200,14 @@ pub enum ParaLifecycle { impl ParaLifecycle { /// Returns true if parachain is currently onboarding. To learn if the - /// parachain is onboarding as a parachain or parathread, look at the + /// parachain is onboarding as a lease holding or on-demand parachain, look at the /// `UpcomingGenesis` storage item. pub fn is_onboarding(&self) -> bool { matches!(self, ParaLifecycle::Onboarding) } /// Returns true if para is in a stable state, i.e. it is currently - /// a parachain or parathread, and not in any transition state. + /// a lease holding or on-demand parachain, and not in any transition state. pub fn is_stable(&self) -> bool { matches!(self, ParaLifecycle::Parathread | ParaLifecycle::Parachain) } @@ -223,7 +224,7 @@ impl ParaLifecycle { ) } - /// Returns true if para is currently treated as a parathread. + /// Returns true if para is currently treated as a parathread (on-demand parachain). /// This also includes transitioning states, so you may want to combine /// this check with `is_stable` if you specifically want `Paralifecycle::Parathread`. pub fn is_parathread(&self) -> bool { @@ -297,12 +298,12 @@ pub struct ParaGenesisArgs { pub genesis_head: HeadData, /// The initial validation code to use. pub validation_code: ValidationCode, - /// Parachain or Parathread. + /// Lease holding or on-demand parachain. #[cfg_attr(feature = "std", serde(rename = "parachain"))] pub para_kind: ParaKind, } -/// Distinguishes between Parachain and Parathread +/// Distinguishes between lease holding Parachain and Parathread (on-demand parachain) #[derive(PartialEq, Eq, Clone, RuntimeDebug)] pub enum ParaKind { Parathread, @@ -602,9 +603,9 @@ pub mod pallet { CannotOnboard, /// Para cannot be offboarded at this time. CannotOffboard, - /// Para cannot be upgraded to a parachain. + /// Para cannot be upgraded to a lease holding parachain. CannotUpgrade, - /// Para cannot be downgraded to a parathread. + /// Para cannot be downgraded to an on-demand parachain. CannotDowngrade, /// The statement for PVF pre-checking is stale. PvfCheckStatementStale, @@ -640,7 +641,7 @@ pub mod pallet { pub(super) type PvfActiveVoteList = StorageValue<_, Vec, ValueQuery>; - /// All parachains. Ordered ascending by `ParaId`. Parathreads are not included. + /// All lease holding parachains. Ordered ascending by `ParaId`. On demand parachains are not included. /// /// Consider using the [`ParachainsCache`] type of modifying. #[pallet::storage] @@ -1219,7 +1220,7 @@ impl Pallet { // The actions to take are based on the lifecycle of of the paras. // // The final state of any para after the actions queue should be as a - // parachain, parathread, or not registered. (stable states) + // lease holding parachain, on-demand parachain, or not registered. (stable states) // // Returns the list of outgoing paras from the actions queue. fn apply_actions_queue(session: SessionIndex) -> Vec { @@ -1238,17 +1239,17 @@ impl Pallet { Self::initialize_para_now(&mut parachains, para, &genesis_data); } }, - // Upgrade a parathread to a parachain + // Upgrade an on-demand parachain to a lease holding parachain Some(ParaLifecycle::UpgradingParathread) => { parachains.add(para); ParaLifecycles::::insert(¶, ParaLifecycle::Parachain); }, - // Downgrade a parachain to a parathread + // Downgrade a lease holding parachain to an on-demand parachain Some(ParaLifecycle::DowngradingParachain) => { parachains.remove(para); ParaLifecycles::::insert(¶, ParaLifecycle::Parathread); }, - // Offboard a parathread or parachain from the system + // Offboard a lease holding or on-demand parachain from the system Some(ParaLifecycle::OffboardingParachain) | Some(ParaLifecycle::OffboardingParathread) => { parachains.remove(para); @@ -1688,7 +1689,7 @@ impl Pallet { /// /// Will return error if either is true: /// - /// - para is not a stable parachain or parathread (i.e. [`ParaLifecycle::is_stable`] is `false`) + /// - para is not a stable parachain (i.e. [`ParaLifecycle::is_stable`] is `false`) /// - para has a pending upgrade. /// - para has unprocessed messages in its UMP queue. /// @@ -1738,7 +1739,7 @@ impl Pallet { Ok(()) } - /// Schedule a parathread to be upgraded to a parachain. + /// Schedule a parathread (on-demand parachain) to be upgraded to a lease holding parachain. /// /// Will return error if `ParaLifecycle` is not `Parathread`. pub(crate) fn schedule_parathread_upgrade(id: ParaId) -> DispatchResult { @@ -1757,7 +1758,7 @@ impl Pallet { Ok(()) } - /// Schedule a parachain to be downgraded to a parathread. + /// Schedule a lease holding parachain to be downgraded to an on-demand parachain. /// /// Noop if `ParaLifecycle` is not `Parachain`. pub(crate) fn schedule_parachain_downgrade(id: ParaId) -> DispatchResult { @@ -2036,9 +2037,9 @@ impl Pallet { ParaLifecycles::::get(&id).map_or(false, |state| state.is_offboarding()) } - /// Whether a para ID corresponds to any live parachain. + /// Whether a para ID corresponds to any live lease holding parachain. /// - /// Includes parachains which will downgrade to a parathread in the future. + /// Includes lease holding parachains which will downgrade to a on-demand parachains in the future. pub fn is_parachain(id: ParaId) -> bool { if let Some(state) = ParaLifecycles::::get(&id) { state.is_parachain() @@ -2047,9 +2048,9 @@ impl Pallet { } } - /// Whether a para ID corresponds to any live parathread. + /// Whether a para ID corresponds to any live parathread (on-demand parachain). /// - /// Includes parathreads which will upgrade to parachains in the future. + /// Includes on-demand parachains which will upgrade to lease holding parachains in the future. pub fn is_parathread(id: ParaId) -> bool { if let Some(state) = ParaLifecycles::::get(&id) { state.is_parathread() diff --git a/runtime/parachains/src/scheduler.rs b/runtime/parachains/src/scheduler.rs index ccbb237f3ba5..44333183c528 100644 --- a/runtime/parachains/src/scheduler.rs +++ b/runtime/parachains/src/scheduler.rs @@ -14,11 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -//! The scheduler module for parachains and parathreads. +//! The scheduler module for parachains. //! //! This module is responsible for two main tasks: -//! - Partitioning validators into groups and assigning groups to parachains and parathreads -//! - Scheduling parachains and parathreads +//! - Partitioning validators into groups and assigning groups to parachains +//! - Scheduling parachains //! //! It aims to achieve these tasks with these goals in mind: //! - It should be possible to know at least a block ahead-of-time, ideally more, @@ -27,11 +27,11 @@ //! should not be assigned. //! - Validator assignments should not be gameable. Malicious cartels should not be able to //! manipulate the scheduler to assign themselves as desired. -//! - High or close to optimal throughput of parachains and parathreads. Work among validator groups should be balanced. +//! - High or close to optimal throughput of parachains. Work among validator groups should be balanced. //! //! The Scheduler manages resource allocation using the concept of "Availability Cores". //! There will be one availability core for each parachain, and a fixed number of cores -//! used for multiplexing parathreads. Validators will be partitioned into groups, with the same +//! used for multiplexing on-demand parachains. Validators will be partitioned into groups, with the same //! number of groups as availability cores. Validator groups will be assigned to different availability cores //! over time. @@ -55,7 +55,7 @@ pub mod migration; const LOG_TARGET: &str = "runtime::scheduler"; -/// A queued parathread entry, pre-assigned to a core. +/// A queued parathread (on-demand parachain) entry, pre-assigned to a core. #[derive(Encode, Decode, TypeInfo)] #[cfg_attr(test, derive(PartialEq, Debug))] pub struct QueuedParathread { @@ -63,7 +63,7 @@ pub struct QueuedParathread { core_offset: u32, } -/// The queue of all parathread claims. +/// The queue of all parathread (on-demand parachain) claims. #[derive(Encode, Decode, TypeInfo)] #[cfg_attr(test, derive(PartialEq, Debug))] pub struct ParathreadClaimQueue { @@ -73,9 +73,10 @@ pub struct ParathreadClaimQueue { } impl ParathreadClaimQueue { - /// Queue a parathread entry to be processed. + /// Queue a parathread (on-demand parachain) entry to be processed. /// - /// Provide the entry and the number of parathread cores, which must be greater than 0. + /// Provide the entry and the number of parathread (on-demand parachain) cores, + /// which must be greater than 0. fn enqueue_entry(&mut self, entry: ParathreadEntry, n_parathread_cores: u32) { let core_offset = self.next_core_offset; self.next_core_offset = (self.next_core_offset + 1) % n_parathread_cores; @@ -117,7 +118,7 @@ pub enum FreedReason { pub enum AssignmentKind { /// A parachain. Parachain, - /// A parathread. + /// A parathread (on-demand parachain). Parathread(CollatorId, u32), } @@ -171,8 +172,9 @@ pub mod pallet { /// broader set of Polkadot validators, but instead just the subset used for parachains during /// this session. /// - /// Bound: The number of cores is the sum of the numbers of parachains and parathread multiplexers. - /// Reasonably, 100-1000. The dominant factor is the number of validators: safe upper bound at 10k. + /// Bound: The number of cores is the sum of the numbers of lease holding parachains and on-demand + /// parachain multiplexers. Reasonably, 100-1000. The dominant factor is the number of validators: + /// safe upper bound at 10k. #[pallet::storage] #[pallet::getter(fn validator_groups)] pub(crate) type ValidatorGroups = StorageValue<_, Vec>, ValueQuery>; @@ -180,26 +182,27 @@ pub mod pallet { /// A queue of upcoming claims and which core they should be mapped onto. /// /// The number of queued claims is bounded at the `scheduling_lookahead` - /// multiplied by the number of parathread multiplexer cores. Reasonably, 10 * 50 = 500. + /// multiplied by the number of parathread (on-demand parachain) multiplexer cores. Reasonably, + /// 10 * 50 = 500. #[pallet::storage] pub(crate) type ParathreadQueue = StorageValue<_, ParathreadClaimQueue, ValueQuery>; /// One entry for each availability core. Entries are `None` if the core is not currently occupied. Can be /// temporarily `Some` if scheduled but not occupied. /// The i'th parachain belongs to the i'th core, with the remaining cores all being - /// parathread-multiplexers. + /// on-demand parachain-multiplexers. /// /// Bounded by the maximum of either of these two values: - /// * The number of parachains and parathread multiplexers + /// * The number of lease holding parachains and on-demand parachain multiplexers /// * The number of validators divided by `configuration.max_validators_per_core`. #[pallet::storage] #[pallet::getter(fn availability_cores)] pub(crate) type AvailabilityCores = StorageValue<_, Vec>, ValueQuery>; - /// An index used to ensure that only one claim on a parathread exists in the queue or is - /// currently being handled by an occupied core. + /// An index used to ensure that only one claim on a parathread (on-demand parachain) exists in the queue + /// or is currently being handled by an occupied core. /// - /// Bounded by the number of parathread cores and scheduling lookahead. Reasonably, 10 * 50 = 500. + /// Bounded by the number of parathread (on-demand parachain) cores and scheduling lookahead. Reasonably, 10 * 50 = 500. #[pallet::storage] pub(crate) type ParathreadClaimIndex = StorageValue<_, Vec, ValueQuery>; @@ -215,7 +218,7 @@ pub mod pallet { /// Currently scheduled cores - free but up to be occupied. /// - /// Bounded by the number of cores: one for each parachain and parathread multiplexer. + /// Bounded by the number of cores: one for each lease holding parachain and on-demand parachain multiplexer. /// /// The value contained here will not be valid after the end of a block. Runtime APIs should be used to determine scheduled cores/ /// for the upcoming block. @@ -301,17 +304,17 @@ impl Pallet { ValidatorGroups::::set(groups); } - // prune out all parathread claims with too many retries. + // prune out all parathread (on-demand parachain) claims with too many retries. // assign all non-pruned claims to new cores, if they've changed. ParathreadClaimIndex::::mutate(|claim_index| { - // wipe all parathread metadata if no parathread cores are configured. + // wipe all on-demand metadata if no parathread (on-demand parachain) cores are configured. if config.parathread_cores == 0 { thread_queue = ParathreadClaimQueue { queue: Vec::new(), next_core_offset: 0 }; claim_index.clear(); return } - // prune out all entries beyond retry or that no longer correspond to live parathread. + // prune out all entries beyond retry or that no longer correspond to live parathread (on-demand parachain). thread_queue.queue.retain(|queued| { let will_keep = queued.claim.retries <= config.parathread_retries && >::is_parathread(queued.claim.claim.0); @@ -344,10 +347,11 @@ impl Pallet { >::set(now); } - /// Add a parathread claim to the queue. If there is a competing claim in the queue or currently - /// assigned to a core, this call will fail. This call will also fail if the queue is full. + /// Add a parathread (on-demand parachain) claim to the queue. If there is a competing claim in the + /// queue or currently assigned to a core, this call will fail. This call will also fail if the queue + /// is full. /// - /// Fails if the claim does not correspond to any live parathread. + /// Fails if the claim does not correspond to any live on-demand parachain. #[allow(unused)] pub fn add_parathread_claim(claim: ParathreadClaim) { if !>::is_parathread(claim.0) { @@ -396,7 +400,7 @@ impl Pallet { Some(CoreOccupied::Parathread(entry)) => { match freed_reason { FreedReason::Concluded => { - // After a parathread candidate has successfully been included, + // After a parathread (on-demand parachain) candidate has successfully been included, // open it up for further claims! ParathreadClaimIndex::::mutate(|index| { if let Ok(i) = index.binary_search(&entry.claim.0) { @@ -405,7 +409,7 @@ impl Pallet { }) }, FreedReason::TimedOut => { - // If a parathread candidate times out, it's not the collator's fault, + // If a parathread (on-demand parachain) candidate times out, it's not the collator's fault, // so we don't increment retries. ParathreadQueue::::mutate(|queue| { queue.enqueue_entry(entry, config.parathread_cores); @@ -485,7 +489,7 @@ impl Pallet { core, }) } else { - // parathread core offset, rel. to beginning. + // parathread (on-demand parachain) core offset, rel. to beginning. let core_offset = (core_index - parachains.len()) as u32; parathread_queue.take_next_on_core(core_offset).map(|entry| CoreAssignment { @@ -613,7 +617,7 @@ impl Pallet { /// Returns an optional predicate that should be used for timing out occupied cores. /// /// If `None`, no timing-out should be done. The predicate accepts the index of the core, and the - /// block number since which it has been occupied, and the respective parachain and parathread + /// block number since which it has been occupied, and the respective lease holding and on-demand parachain /// timeouts, i.e. only within `max(config.chain_availability_period, config.thread_availability_period)` /// of the last rotation would this return `Some`, unless there are no rotations. /// @@ -673,8 +677,8 @@ impl Pallet { /// Return the next thing that will be scheduled on this core assuming it is currently /// occupied and the candidate occupying it became available. /// - /// For parachains, this is always the ID of the parachain and no specified collator. - /// For parathreads, this is based on the next item in the `ParathreadQueue` assigned to that + /// For lease holding parachains, this is always the ID of the parachain and no specified collator. + /// For on-demand parachains, this is based on the next item in the `ParathreadQueue` assigned to that /// core, and is None if there isn't one. pub(crate) fn next_up_on_available(core: CoreIndex) -> Option { let parachains = >::parachains(); @@ -693,8 +697,8 @@ impl Pallet { /// Return the next thing that will be scheduled on this core assuming it is currently /// occupied and the candidate occupying it became available. /// - /// For parachains, this is always the ID of the parachain and no specified collator. - /// For parathreads, this is based on the next item in the `ParathreadQueue` assigned to that + /// For lease holding parachains, this is always the ID of the parachain and no specified collator. + /// For on-demand parachains, this is based on the next item in the `ParathreadQueue` assigned to that /// core, or if there isn't one, the claim that is currently occupying the core, as long /// as the claim's retries would not exceed the limit. Otherwise None. pub(crate) fn next_up_on_time_out(core: CoreIndex) -> Option { @@ -729,7 +733,7 @@ impl Pallet { } } - // Free all scheduled cores and return parathread claims to queue, with retries incremented. + // Free all scheduled cores and return parathread (on-demand parachain) claims to queue, with retries incremented. pub(crate) fn clear() { let config = >::config(); ParathreadQueue::::mutate(|queue| { diff --git a/runtime/parachains/src/scheduler/tests.rs b/runtime/parachains/src/scheduler/tests.rs index be77ec03ed84..aa99008d18e6 100644 --- a/runtime/parachains/src/scheduler/tests.rs +++ b/runtime/parachains/src/scheduler/tests.rs @@ -170,7 +170,7 @@ fn add_parathread_claim_works() { ); } - // claims on non-live parathreads have no effect. + // claims on non-live parathreads (on-demand parachains) have no effect. { let thread_id2 = ParaId::from(11); Scheduler::add_parathread_claim(ParathreadClaim(thread_id2, collator.clone())); @@ -276,7 +276,7 @@ fn session_change_prunes_cores_beyond_retries_and_those_from_non_live_parathread 4, ); - // Will be pruned: not a live parathread. + // Will be pruned: not a live parathread (on-demand parachain). queue.enqueue_entry( ParathreadEntry { claim: ParathreadClaim(thread_d, collator.clone()), retries: 0 }, 4, @@ -450,11 +450,11 @@ fn schedule_schedules() { new_test_ext(genesis_config).execute_with(|| { assert_eq!(default_config().parathread_cores, 3); - // register 2 parachains + // register 2 lease holding parachains schedule_blank_para(chain_a, ParaKind::Parachain); schedule_blank_para(chain_b, ParaKind::Parachain); - // and 3 parathreads + // and 3 parathreads (on-demand parachains) schedule_blank_para(thread_a, ParaKind::Parathread); schedule_blank_para(thread_b, ParaKind::Parathread); schedule_blank_para(thread_c, ParaKind::Parathread); @@ -498,7 +498,7 @@ fn schedule_schedules() { ); } - // add a couple of parathread claims. + // add a couple of parathread (on-demand parachain) claims. Scheduler::add_parathread_claim(ParathreadClaim(thread_a, collator.clone())); Scheduler::add_parathread_claim(ParathreadClaim(thread_c, collator.clone())); @@ -571,11 +571,11 @@ fn schedule_schedules_including_just_freed() { new_test_ext(genesis_config).execute_with(|| { assert_eq!(default_config().parathread_cores, 3); - // register 2 parachains + // register 2 lease holding parachains schedule_blank_para(chain_a, ParaKind::Parachain); schedule_blank_para(chain_b, ParaKind::Parachain); - // and 5 parathreads + // and 5 parathreads (on-demand parachains) schedule_blank_para(thread_a, ParaKind::Parathread); schedule_blank_para(thread_b, ParaKind::Parathread); schedule_blank_para(thread_c, ParaKind::Parathread); @@ -598,7 +598,7 @@ fn schedule_schedules_including_just_freed() { _ => None, }); - // add a couple of parathread claims now that the parathreads are live. + // add a couple of parathread (on-demand parachain) claims now that the on-demand parachains are live. Scheduler::add_parathread_claim(ParathreadClaim(thread_a, collator.clone())); Scheduler::add_parathread_claim(ParathreadClaim(thread_c, collator.clone())); @@ -621,9 +621,9 @@ fn schedule_schedules_including_just_freed() { assert!(Scheduler::scheduled().is_empty()); } - // add a couple more parathread claims - the claim on `b` will go to the 3rd parathread core (4) - // and the claim on `d` will go back to the 1st parathread core (2). The claim on `e` then - // will go for core `3`. + // add a couple more parathread (on-demand parachain) claims - the claim on `b` will go to + // the 3rd on-demand core (4) and the claim on `d` will go back to the 1st on-demand + // core (2). The claim on `e` then will go for core `3`. Scheduler::add_parathread_claim(ParathreadClaim(thread_b, collator.clone())); Scheduler::add_parathread_claim(ParathreadClaim(thread_d, collator.clone())); Scheduler::add_parathread_claim(ParathreadClaim(thread_e, collator.clone())); @@ -633,8 +633,8 @@ fn schedule_schedules_including_just_freed() { { let scheduled = Scheduler::scheduled(); - // cores 0 and 1 are occupied by parachains. cores 2 and 3 are occupied by parathread - // claims. core 4 was free. + // cores 0 and 1 are occupied by lease holding parachains. cores 2 and 3 are occupied by + // on-demand parachain claims. core 4 was free. assert_eq!(scheduled.len(), 1); assert_eq!( scheduled[0], @@ -812,7 +812,7 @@ fn schedule_rotates_groups() { let config = { let mut config = default_config(); - // make sure parathread requests don't retry-out + // make sure parathread (on-demand parachain) requests don't retry-out config.parathread_retries = config.group_rotation_frequency * 3; config.parathread_cores = 2; config diff --git a/runtime/polkadot/src/lib.rs b/runtime/polkadot/src/lib.rs index ffe7daab5b39..a14bdfc690c2 100644 --- a/runtime/polkadot/src/lib.rs +++ b/runtime/polkadot/src/lib.rs @@ -836,6 +836,7 @@ where } parameter_types! { + // Deposit for a parathread (on-demand parachain) pub const ParathreadDeposit: Balance = 500 * DOLLARS; pub const MaxRetries: u32 = 3; } From 2b6ea6773d5caa4e9edbd7d80f03addadd0f0329 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Wed, 28 Jun 2023 00:59:19 +0400 Subject: [PATCH 55/76] fix bitfield distribution test --- node/network/bitfield-distribution/src/tests.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/node/network/bitfield-distribution/src/tests.rs b/node/network/bitfield-distribution/src/tests.rs index 39f1b8e8de10..1f9217b63071 100644 --- a/node/network/bitfield-distribution/src/tests.rs +++ b/node/network/bitfield-distribution/src/tests.rs @@ -1147,7 +1147,7 @@ fn network_protocol_versioning() { // validator 0 key pair let (mut state, signing_context, keystore, validator) = - state_with_view(our_view![hash_a, hash_b], hash_a); + state_with_view(our_view![hash_a, hash_b], hash_a, ReputationAggregator::new(|_| true)); let pool = sp_core::testing::TaskExecutor::new(); let (mut ctx, mut handle) = make_subsystem_context::(pool); @@ -1245,10 +1245,10 @@ fn network_protocol_versioning() { assert_matches!( handle.recv().await, AllMessages::NetworkBridgeTx( - NetworkBridgeTxMessage::ReportPeer(peer, rep) + NetworkBridgeTxMessage::ReportPeer(ReportPeerMessage::Single(peer, rep)) ) => { assert_eq!(peer, peer_a); - assert_eq!(rep, BENEFIT_VALID_MESSAGE_FIRST) + assert_eq!(rep, BENEFIT_VALID_MESSAGE_FIRST.into()) } ); }); From 789eadf1c12347fcd492d6035e70f67c1b47af7f Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Wed, 28 Jun 2023 01:02:47 +0400 Subject: [PATCH 56/76] approval distribution tests --- node/network/approval-distribution/src/tests.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/node/network/approval-distribution/src/tests.rs b/node/network/approval-distribution/src/tests.rs index 731aec2d53f6..ef18e82e76fd 100644 --- a/node/network/approval-distribution/src/tests.rs +++ b/node/network/approval-distribution/src/tests.rs @@ -415,7 +415,7 @@ fn delay_reputation_change() { let overseer = &mut virtual_overseer; // Setup peers - setup_peer_with_view(overseer, &peer, view![]).await; + setup_peer_with_view(overseer, &peer, ValidationVersion::V1, view![]).await; // new block `hash_a` with 1 candidates let meta = BlockApprovalMeta { @@ -435,7 +435,7 @@ fn delay_reputation_change() { let assignments = vec![(cert.clone(), 0u32)]; let msg = protocol_v1::ApprovalDistributionMessage::Assignments(assignments.clone()); - send_message_from_peer(overseer, &peer, msg).await; + send_message_from_peer(overseer, &peer, Versioned::V1(msg)).await; // send an `Accept` message from the Approval Voting subsystem assert_matches!( @@ -2389,7 +2389,8 @@ fn import_versioned_approval() { let parent_hash = Hash::repeat_byte(0xFF); let hash = Hash::repeat_byte(0xAA); - let _ = test_harness(State::default(), |mut virtual_overseer| async move { + let state = state_without_reputation_delay(); + let _ = test_harness(state, |mut virtual_overseer| async move { let overseer = &mut virtual_overseer; // All peers are aware of relay parent. setup_peer_with_view(overseer, &peer_a, ValidationVersion::VStaging, view![hash]).await; From b7b274860c401e8dfab96042161cd96d6293c2fc Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Wed, 28 Jun 2023 01:30:56 +0400 Subject: [PATCH 57/76] fix bridge tests --- node/network/bridge/src/rx/tests.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/node/network/bridge/src/rx/tests.rs b/node/network/bridge/src/rx/tests.rs index 17372c154816..8c0b6ee479aa 100644 --- a/node/network/bridge/src/rx/tests.rs +++ b/node/network/bridge/src/rx/tests.rs @@ -1343,7 +1343,7 @@ fn our_view_updates_decreasing_order_and_limited_to_max() { fn network_protocol_versioning_view_update() { let (oracle, handle) = make_sync_oracle(false); test_harness(Box::new(oracle), |test_harness| async move { - let TestHarness { mut network_handle, mut virtual_overseer } = test_harness; + let TestHarness { mut network_handle, mut virtual_overseer, .. } = test_harness; let peer_ids: Vec<_> = (0..4).map(|_| PeerId::random()).collect(); let peers = [ @@ -1399,7 +1399,7 @@ fn network_protocol_versioning_view_update() { fn network_protocol_versioning_subsystem_msg() { let (oracle, _handle) = make_sync_oracle(false); test_harness(Box::new(oracle), |test_harness| async move { - let TestHarness { mut network_handle, mut virtual_overseer } = test_harness; + let TestHarness { mut network_handle, mut virtual_overseer, .. } = test_harness; let peer = PeerId::random(); From c4b741214ea28848bd90fd8ab4709c2f5dd3d80b Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 3 Jul 2023 14:57:29 +0200 Subject: [PATCH 58/76] update Cargo.lock --- Cargo.lock | 492 +++++++++++++++++++++++++++++++---------------------- 1 file changed, 284 insertions(+), 208 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c0991ae3b784..d0271b05f523 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -559,16 +559,28 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitvec" +version = "0.20.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7774144344a4faa177370406a7ff5f1da24303817368584c6206c8303eb07848" +dependencies = [ + "funty 1.1.0", + "radium 0.6.2", + "tap", + "wyz 0.2.0", +] + [[package]] name = "bitvec" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" dependencies = [ - "funty", - "radium", + "funty 2.0.0", + "radium 0.7.0", "tap", - "wyz", + "wyz 0.5.1", ] [[package]] @@ -678,7 +690,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eb5b05133427c07c4776906f673ccf36c21b102c9829c641a5b56bd151d44fd6" dependencies = [ "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "serde", ] @@ -2368,7 +2380,7 @@ dependencies = [ "futures-timer", "log", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "parking_lot 0.12.1", "scale-info", ] @@ -2451,7 +2463,7 @@ name = "fork-tree" version = "3.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.6.1", ] [[package]] @@ -2479,7 +2491,7 @@ dependencies = [ "frame-system", "linregress", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "paste", "scale-info", "serde", @@ -2513,7 +2525,7 @@ dependencies = [ "lazy_static", "linked-hash-map", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "rand 0.8.5", "rand_pcg", "sc-block-builder", @@ -2561,7 +2573,7 @@ dependencies = [ "frame-election-provider-solution-type", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-arithmetic", "sp-core", @@ -2578,7 +2590,7 @@ dependencies = [ "frame-support", "frame-system", "frame-try-runtime", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-core", "sp-io", @@ -2594,7 +2606,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f2a893ede8dde2293e94dacf9c8f5db5d0506cd909257a8f0ac2b7d610baf50c" dependencies = [ "cfg-if", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "serde", ] @@ -2609,7 +2621,7 @@ dependencies = [ "indicatif", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "serde", "sp-core", "sp-io", @@ -2632,7 +2644,7 @@ dependencies = [ "impl-trait-for-tuples", "log", "macro_magic", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "paste", "scale-info", "secp256k1", @@ -2704,7 +2716,7 @@ dependencies = [ "frame-support", "frame-support-test-pallet", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "pretty_assertions", "rustversion", "scale-info", @@ -2728,7 +2740,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f89740 dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "serde", ] @@ -2741,7 +2753,7 @@ dependencies = [ "cfg-if", "frame-support", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "serde", "sp-core", @@ -2760,7 +2772,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-core", "sp-runtime", @@ -2772,7 +2784,7 @@ name = "frame-system-rpc-runtime-api" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.6.1", "sp-api", ] @@ -2782,7 +2794,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ "frame-support", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "sp-api", "sp-runtime", "sp-std", @@ -2821,6 +2833,12 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" +[[package]] +name = "funty" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fed34cd105917e91daa4da6b3728c47b068749d6a62c59811f06ed2ac71d9da7" + [[package]] name = "funty" version = "2.0.0" @@ -3461,7 +3479,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.6.1", ] [[package]] @@ -3802,7 +3820,7 @@ checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" name = "kusama-runtime" version = "0.9.43" dependencies = [ - "bitvec", + "bitvec 1.0.1", "frame-benchmarking", "frame-election-provider-support", "frame-executive", @@ -3863,7 +3881,7 @@ dependencies = [ "pallet-whitelist", "pallet-xcm", "pallet-xcm-benchmarks", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-parachains", @@ -4874,7 +4892,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f89740 dependencies = [ "futures", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "sc-client-api", "sc-offchain", "sp-api", @@ -4893,7 +4911,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f89740 dependencies = [ "anyhow", "jsonrpsee", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "serde", "sp-api", "sp-blockchain", @@ -5476,7 +5494,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-core", "sp-runtime", @@ -5491,7 +5509,7 @@ dependencies = [ "frame-support", "frame-system", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-application-crypto", "sp-authority-discovery", @@ -5507,7 +5525,7 @@ dependencies = [ "frame-support", "frame-system", "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-runtime", "sp-std", @@ -5525,7 +5543,7 @@ dependencies = [ "pallet-authorship", "pallet-session", "pallet-timestamp", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-application-crypto", "sp-consensus-babe", @@ -5548,7 +5566,7 @@ dependencies = [ "frame-system", "log", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-core", "sp-io", @@ -5585,7 +5603,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-runtime", "sp-std", @@ -5600,7 +5618,7 @@ dependencies = [ "frame-system", "pallet-authorship", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "serde", "sp-consensus-beefy", @@ -5623,7 +5641,7 @@ dependencies = [ "pallet-beefy", "pallet-mmr", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "serde", "sp-api", @@ -5644,7 +5662,7 @@ dependencies = [ "frame-system", "log", "pallet-treasury", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-core", "sp-io", @@ -5663,7 +5681,7 @@ dependencies = [ "log", "pallet-bounties", "pallet-treasury", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-core", "sp-io", @@ -5680,7 +5698,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-core", "sp-io", @@ -5697,7 +5715,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "serde", "sp-io", @@ -5714,7 +5732,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "serde", "sp-core", @@ -5734,7 +5752,7 @@ dependencies = [ "frame-system", "log", "pallet-election-provider-support-benchmarking", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "rand 0.8.5", "scale-info", "sp-arithmetic", @@ -5754,7 +5772,7 @@ dependencies = [ "frame-benchmarking", "frame-election-provider-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "sp-npos-elections", "sp-runtime", ] @@ -5768,7 +5786,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-core", "sp-io", @@ -5788,7 +5806,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-io", "sp-runtime", @@ -5807,7 +5825,7 @@ dependencies = [ "log", "pallet-authorship", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-application-crypto", "sp-consensus-grandpa", @@ -5828,7 +5846,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-io", "sp-runtime", @@ -5845,7 +5863,7 @@ dependencies = [ "frame-system", "log", "pallet-authorship", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-application-crypto", "sp-core", @@ -5863,7 +5881,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-core", "sp-io", @@ -5881,7 +5899,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-core", "sp-io", @@ -5898,7 +5916,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-arithmetic", "sp-core", @@ -5916,7 +5934,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-core", "sp-io", @@ -5934,7 +5952,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-io", "sp-runtime", @@ -5949,7 +5967,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-arithmetic", "sp-core", @@ -5965,7 +5983,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-core", "sp-io", @@ -5986,7 +6004,7 @@ dependencies = [ "pallet-bags-list", "pallet-nomination-pools", "pallet-staking", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-runtime", "sp-runtime-interface", @@ -6000,7 +6018,7 @@ version = "1.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ "pallet-nomination-pools", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "sp-api", "sp-std", ] @@ -6014,7 +6032,7 @@ dependencies = [ "frame-system", "log", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "serde", "sp-runtime", @@ -6039,7 +6057,7 @@ dependencies = [ "pallet-offences", "pallet-session", "pallet-staking", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-runtime", "sp-staking", @@ -6055,7 +6073,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-core", "sp-io", @@ -6071,7 +6089,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-io", "sp-runtime", @@ -6087,7 +6105,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-arithmetic", "sp-core", @@ -6104,7 +6122,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-io", "sp-runtime", @@ -6121,7 +6139,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "serde", "sp-arithmetic", @@ -6139,7 +6157,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-io", "sp-runtime", @@ -6157,7 +6175,7 @@ dependencies = [ "impl-trait-for-tuples", "log", "pallet-timestamp", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-core", "sp-io", @@ -6194,7 +6212,7 @@ dependencies = [ "frame-system", "hex-literal 0.3.4", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "rand_chacha 0.2.2", "scale-info", "sp-arithmetic", @@ -6215,7 +6233,7 @@ dependencies = [ "log", "pallet-authorship", "pallet-session", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "rand_chacha 0.2.2", "scale-info", "serde", @@ -6251,7 +6269,7 @@ name = "pallet-staking-runtime-api" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.6.1", "sp-api", ] @@ -6264,7 +6282,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-core", "sp-io", @@ -6280,7 +6298,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-io", "sp-runtime", @@ -6296,7 +6314,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-inherents", "sp-io", @@ -6315,7 +6333,7 @@ dependencies = [ "frame-system", "log", "pallet-treasury", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "serde", "sp-core", @@ -6331,7 +6349,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f89740 dependencies = [ "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "serde", "sp-core", @@ -6347,7 +6365,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f89740 dependencies = [ "jsonrpsee", "pallet-transaction-payment-rpc-runtime-api", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "sp-api", "sp-blockchain", "sp-core", @@ -6362,7 +6380,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ "pallet-transaction-payment", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "sp-api", "sp-runtime", "sp-weights", @@ -6378,7 +6396,7 @@ dependencies = [ "frame-system", "impl-trait-for-tuples", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "serde", "sp-runtime", @@ -6394,7 +6412,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-runtime", "sp-std", @@ -6408,7 +6426,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-core", "sp-io", @@ -6425,7 +6443,7 @@ dependencies = [ "frame-support", "frame-system", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-runtime", "sp-std", @@ -6439,7 +6457,7 @@ dependencies = [ "frame-benchmarking", "frame-support", "frame-system", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-api", "sp-runtime", @@ -6456,7 +6474,7 @@ dependencies = [ "frame-system", "log", "pallet-balances", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-parachain", "polkadot-runtime-parachains", "scale-info", @@ -6481,7 +6499,7 @@ dependencies = [ "pallet-assets", "pallet-balances", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-primitives", "polkadot-runtime-common", "scale-info", @@ -6515,6 +6533,19 @@ dependencies = [ "snap", ] +[[package]] +name = "parity-scale-codec" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "373b1a4c1338d9cd3d1fa53b3a11bdab5ab6bd80a20f7f7becd76953ae2be909" +dependencies = [ + "arrayvec 0.7.2", + "bitvec 0.20.4", + "byte-slice-cast", + "impl-trait-for-tuples", + "serde", +] + [[package]] name = "parity-scale-codec" version = "3.6.1" @@ -6522,7 +6553,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2287753623c76f953acd29d15d8100bcab84d29db78fb6f352adb3c53e83b967" dependencies = [ "arrayvec 0.7.2", - "bitvec", + "bitvec 1.0.1", "byte-slice-cast", "bytes", "impl-trait-for-tuples", @@ -6829,8 +6860,9 @@ dependencies = [ name = "polkadot-availability-bitfield-distribution" version = "0.9.43" dependencies = [ + "always-assert", "assert_matches", - "bitvec", + "bitvec 1.0.1", "env_logger 0.9.0", "futures", "futures-timer", @@ -6861,7 +6893,7 @@ dependencies = [ "futures", "futures-timer", "lru 0.9.0", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-erasure-coding", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -6891,7 +6923,7 @@ dependencies = [ "futures-timer", "log", "lru 0.9.0", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-erasure-coding", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -6990,13 +7022,13 @@ version = "0.9.43" dependencies = [ "always-assert", "assert_matches", - "bitvec", + "bitvec 1.0.1", "env_logger 0.9.0", "fatality", "futures", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -7004,6 +7036,7 @@ dependencies = [ "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "sc-keystore", "sc-network", "sp-core", "sp-keyring", @@ -7017,7 +7050,7 @@ dependencies = [ name = "polkadot-core-primitives" version = "0.9.43" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-core", "sp-runtime", @@ -7038,7 +7071,7 @@ dependencies = [ "indexmap", "lazy_static", "lru 0.9.0", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-erasure-coding", "polkadot-node-network-protocol", "polkadot-node-primitives", @@ -7062,7 +7095,7 @@ name = "polkadot-erasure-coding" version = "0.9.43" dependencies = [ "criterion", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-node-primitives", "polkadot-primitives", "reed-solomon-novelpoly", @@ -7109,7 +7142,7 @@ dependencies = [ "fatality", "futures", "futures-timer", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "parking_lot 0.12.1", "polkadot-node-metrics", "polkadot-node-network-protocol", @@ -7132,7 +7165,7 @@ name = "polkadot-node-collation-generation" version = "0.9.43" dependencies = [ "futures", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-erasure-coding", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -7152,7 +7185,7 @@ version = "0.9.43" dependencies = [ "assert_matches", "async-trait", - "bitvec", + "bitvec 1.0.1", "derive_more", "futures", "futures-timer", @@ -7160,7 +7193,7 @@ dependencies = [ "kvdb-memorydb", "lru 0.9.0", "merlin", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "parking_lot 0.12.1", "polkadot-node-jaeger", "polkadot-node-primitives", @@ -7190,14 +7223,14 @@ name = "polkadot-node-core-av-store" version = "0.9.43" dependencies = [ "assert_matches", - "bitvec", + "bitvec 1.0.1", "env_logger 0.9.0", "futures", "futures-timer", "kvdb", "kvdb-memorydb", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "parking_lot 0.12.1", "polkadot-erasure-coding", "polkadot-node-primitives", @@ -7219,7 +7252,7 @@ name = "polkadot-node-core-backing" version = "0.9.43" dependencies = [ "assert_matches", - "bitvec", + "bitvec 1.0.1", "fatality", "futures", "polkadot-erasure-coding", @@ -7264,7 +7297,7 @@ dependencies = [ "async-trait", "futures", "futures-timer", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-node-core-pvf", "polkadot-node-metrics", "polkadot-node-primitives", @@ -7286,7 +7319,7 @@ version = "0.9.43" dependencies = [ "futures", "maplit", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-node-metrics", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -7308,7 +7341,7 @@ dependencies = [ "futures-timer", "kvdb", "kvdb-memorydb", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "parking_lot 0.12.1", "polkadot-node-primitives", "polkadot-node-subsystem", @@ -7331,7 +7364,7 @@ dependencies = [ "kvdb", "kvdb-memorydb", "lru 0.9.0", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", @@ -7364,11 +7397,36 @@ dependencies = [ "tracing-gum", ] +[[package]] +name = "polkadot-node-core-prospective-parachains" +version = "0.9.16" +dependencies = [ + "assert_matches", + "bitvec 1.0.1", + "fatality", + "futures", + "parity-scale-codec 2.3.1", + "polkadot-node-primitives", + "polkadot-node-subsystem", + "polkadot-node-subsystem-test-helpers", + "polkadot-node-subsystem-types", + "polkadot-node-subsystem-util", + "polkadot-primitives", + "polkadot-primitives-test-helpers", + "sc-keystore", + "sp-application-crypto", + "sp-core", + "sp-keyring", + "sp-keystore", + "thiserror", + "tracing-gum", +] + [[package]] name = "polkadot-node-core-provisioner" version = "0.9.43" dependencies = [ - "bitvec", + "bitvec 1.0.1", "fatality", "futures", "futures-timer", @@ -7395,7 +7453,7 @@ dependencies = [ "futures-timer", "hex-literal 0.3.4", "libc", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "pin-project", "polkadot-core-primitives", "polkadot-node-core-pvf-common", @@ -7449,7 +7507,7 @@ dependencies = [ "cpu-time", "futures", "libc", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-parachain", "polkadot-primitives", "sc-executor", @@ -7470,7 +7528,7 @@ version = "0.9.43" dependencies = [ "cpu-time", "futures", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-node-core-pvf-common", "polkadot-parachain", "polkadot-primitives", @@ -7489,7 +7547,7 @@ version = "0.9.43" dependencies = [ "futures", "libc", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-node-core-pvf-common", "polkadot-parachain", "polkadot-primitives", @@ -7533,7 +7591,7 @@ dependencies = [ "lazy_static", "log", "mick-jaeger", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "parking_lot 0.12.1", "polkadot-node-primitives", "polkadot-primitives", @@ -7553,7 +7611,7 @@ dependencies = [ "futures-timer", "hyper", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-primitives", "polkadot-test-service", "prioritized-metered-channel", @@ -7576,11 +7634,12 @@ version = "0.9.43" dependencies = [ "async-channel", "async-trait", + "bitvec 1.0.1", "derive_more", "fatality", "futures", "hex", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-node-jaeger", "polkadot-node-primitives", "polkadot-primitives", @@ -7599,7 +7658,7 @@ version = "0.9.43" dependencies = [ "bounded-vec", "futures", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-erasure-coding", "polkadot-parachain", "polkadot-primitives", @@ -7683,7 +7742,7 @@ dependencies = [ "log", "lru 0.9.0", "parity-db", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "parking_lot 0.11.2", "pin-project", "polkadot-node-jaeger", @@ -7738,7 +7797,7 @@ dependencies = [ "bounded-collections", "derive_more", "frame-support", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-core-primitives", "scale-info", "serde", @@ -7768,9 +7827,9 @@ dependencies = [ name = "polkadot-primitives" version = "0.9.43" dependencies = [ - "bitvec", + "bitvec 1.0.1", "hex-literal 0.4.1", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-core-primitives", "polkadot-parachain", "scale-info", @@ -7836,7 +7895,7 @@ dependencies = [ name = "polkadot-runtime" version = "0.9.43" dependencies = [ - "bitvec", + "bitvec 1.0.1", "frame-benchmarking", "frame-election-provider-support", "frame-executive", @@ -7892,7 +7951,7 @@ dependencies = [ "pallet-vesting", "pallet-whitelist", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-primitives", "polkadot-runtime-common", "polkadot-runtime-constants", @@ -7938,7 +7997,7 @@ dependencies = [ name = "polkadot-runtime-common" version = "0.9.43" dependencies = [ - "bitvec", + "bitvec 1.0.1", "frame-benchmarking", "frame-election-provider-support", "frame-support", @@ -7960,7 +8019,7 @@ dependencies = [ "pallet-transaction-payment", "pallet-treasury", "pallet-vesting", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-primitives", "polkadot-primitives-test-helpers", "polkadot-runtime-parachains", @@ -8004,7 +8063,7 @@ version = "0.9.43" dependencies = [ "bs58", "frame-benchmarking", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-primitives", "sp-std", "sp-tracing", @@ -8016,7 +8075,7 @@ version = "0.9.43" dependencies = [ "assert_matches", "bitflags", - "bitvec", + "bitvec 1.0.1", "derive_more", "frame-benchmarking", "frame-support", @@ -8034,7 +8093,7 @@ dependencies = [ "pallet-staking", "pallet-timestamp", "pallet-vesting", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-parachain", "polkadot-primitives", "polkadot-primitives-test-helpers", @@ -8107,6 +8166,7 @@ dependencies = [ "polkadot-node-core-chain-selection", "polkadot-node-core-dispute-coordinator", "polkadot-node-core-parachains-inherent", + "polkadot-node-core-prospective-parachains", "polkadot-node-core-provisioner", "polkadot-node-core-pvf-checker", "polkadot-node-core-runtime-api", @@ -8186,18 +8246,22 @@ version = "0.9.43" dependencies = [ "arrayvec 0.5.2", "assert_matches", + "async-channel", + "bitvec 1.0.1", "fatality", "futures", "futures-timer", "indexmap", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-node-network-protocol", "polkadot-node-primitives", "polkadot-node-subsystem", "polkadot-node-subsystem-test-helpers", + "polkadot-node-subsystem-types", "polkadot-node-subsystem-util", "polkadot-primitives", "polkadot-primitives-test-helpers", + "rand_chacha 0.3.1", "sc-keystore", "sc-network", "sp-application-crypto", @@ -8215,7 +8279,7 @@ dependencies = [ name = "polkadot-statement-table" version = "0.9.43" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-primitives", "sp-core", ] @@ -8225,7 +8289,7 @@ name = "polkadot-test-client" version = "0.9.43" dependencies = [ "futures", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-node-subsystem", "polkadot-primitives", "polkadot-test-runtime", @@ -8279,7 +8343,7 @@ dependencies = [ name = "polkadot-test-runtime" version = "0.9.43" dependencies = [ - "bitvec", + "bitvec 1.0.1", "frame-election-provider-support", "frame-executive", "frame-support", @@ -8303,7 +8367,7 @@ dependencies = [ "pallet-transaction-payment-rpc-runtime-api", "pallet-vesting", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-parachain", "polkadot-primitives", "polkadot-runtime-common", @@ -8855,6 +8919,12 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "radium" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "643f8f41a8ebc4c5dc4515c82bb8abd397b527fc20fd681b7c011c2aee5d44fb" + [[package]] name = "radium" version = "0.7.0" @@ -9263,7 +9333,7 @@ dependencies = [ "pallet-vesting", "pallet-xcm", "pallet-xcm-benchmarks", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-parachain", "polkadot-primitives", "polkadot-runtime-common", @@ -9585,7 +9655,7 @@ dependencies = [ "libp2p", "log", "multihash 0.17.0", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "prost 0.11.0", "prost-build", "rand 0.8.5", @@ -9609,7 +9679,7 @@ dependencies = [ "futures", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "sc-block-builder", "sc-client-api", "sc-proposer-metrics", @@ -9629,7 +9699,7 @@ name = "sc-block-builder" version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.6.1", "sc-client-api", "sp-api", "sp-block-builder", @@ -9682,7 +9752,7 @@ dependencies = [ "libp2p-identity", "log", "names", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "rand 0.8.5", "regex", "rpassword", @@ -9716,7 +9786,7 @@ dependencies = [ "fnv", "futures", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "parking_lot 0.12.1", "sc-executor", "sc-transaction-pool-api", @@ -9747,7 +9817,7 @@ dependencies = [ "linked-hash-map", "log", "parity-db", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "parking_lot 0.12.1", "sc-client-api", "sc-state-db", @@ -9798,7 +9868,7 @@ dependencies = [ "num-bigint", "num-rational", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "parking_lot 0.12.1", "sc-client-api", "sc-consensus", @@ -9854,7 +9924,7 @@ dependencies = [ "fnv", "futures", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "parking_lot 0.12.1", "sc-client-api", "sc-consensus", @@ -9885,7 +9955,7 @@ dependencies = [ "futures", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "parking_lot 0.12.1", "sc-consensus-beefy", "sc-rpc", @@ -9902,7 +9972,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ "fork-tree", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "sc-client-api", "sc-consensus", "sp-blockchain", @@ -9923,7 +9993,7 @@ dependencies = [ "futures", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "parking_lot 0.12.1", "rand 0.8.5", "sc-block-builder", @@ -9958,7 +10028,7 @@ dependencies = [ "futures", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "sc-client-api", "sc-consensus-grandpa", "sc-rpc", @@ -9978,7 +10048,7 @@ dependencies = [ "futures", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "sc-client-api", "sc-consensus", "sc-telemetry", @@ -9998,7 +10068,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ "lru 0.10.0", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "parking_lot 0.12.1", "sc-executor-common", "sc-executor-wasmtime", @@ -10092,7 +10162,7 @@ dependencies = [ "linked_hash_set", "log", "mockall", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "parking_lot 0.12.1", "partial_sort", "pin-project", @@ -10143,7 +10213,7 @@ dependencies = [ "bitflags", "futures", "libp2p-identity", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "prost-build", "sc-consensus", "sp-consensus", @@ -10179,7 +10249,7 @@ dependencies = [ "futures", "libp2p-identity", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "prost 0.11.0", "prost-build", "sc-client-api", @@ -10205,7 +10275,7 @@ dependencies = [ "log", "lru 0.10.0", "mockall", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "prost 0.11.0", "prost-build", "sc-client-api", @@ -10233,7 +10303,7 @@ dependencies = [ "futures", "libp2p", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "sc-network", "sc-network-common", "sc-utils", @@ -10257,7 +10327,7 @@ dependencies = [ "libp2p", "num_cpus", "once_cell", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "parking_lot 0.12.1", "rand 0.8.5", "sc-client-api", @@ -10288,7 +10358,7 @@ dependencies = [ "futures", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "parking_lot 0.12.1", "sc-block-builder", "sc-chain-spec", @@ -10317,7 +10387,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ "jsonrpsee", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "sc-chain-spec", "sc-transaction-pool-api", "scale-info", @@ -10356,7 +10426,7 @@ dependencies = [ "hex", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "parking_lot 0.12.1", "sc-chain-spec", "sc-client-api", @@ -10383,7 +10453,7 @@ dependencies = [ "futures-timer", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "parking_lot 0.12.1", "pin-project", "rand 0.8.5", @@ -10442,7 +10512,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "parking_lot 0.12.1", "sp-core", ] @@ -10467,7 +10537,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ "jsonrpsee", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "sc-chain-spec", "sc-client-api", "sc-consensus-babe", @@ -10568,7 +10638,7 @@ dependencies = [ "futures-timer", "linked-hash-map", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "parking_lot 0.12.1", "sc-client-api", "sc-transaction-pool-api", @@ -10592,7 +10662,7 @@ dependencies = [ "async-trait", "futures", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "serde", "sp-blockchain", "sp-core", @@ -10621,10 +10691,10 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0cfdffd972d76b22f3d7f81c8be34b2296afd3a25e0a547bd9abe340a4dbbe97" dependencies = [ - "bitvec", + "bitvec 1.0.1", "cfg-if", "derive_more", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info-derive", "serde", ] @@ -11053,7 +11123,7 @@ name = "slot-range-helper" version = "0.9.43" dependencies = [ "enumn", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "paste", "sp-runtime", "sp-std", @@ -11131,7 +11201,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f89740 dependencies = [ "hash-db", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-api-proc-macro", "sp-core", @@ -11163,7 +11233,7 @@ name = "sp-application-crypto" version = "23.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "serde", "sp-core", @@ -11178,7 +11248,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f89740 dependencies = [ "integer-sqrt", "num-traits", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "serde", "sp-std", @@ -11190,7 +11260,7 @@ name = "sp-authority-discovery" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-api", "sp-application-crypto", @@ -11217,7 +11287,7 @@ dependencies = [ "futures", "log", "lru 0.10.0", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "parking_lot 0.12.1", "sp-api", "sp-consensus", @@ -11248,7 +11318,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ "async-trait", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-api", "sp-application-crypto", @@ -11265,7 +11335,7 @@ version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ "async-trait", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "serde", "sp-api", @@ -11284,7 +11354,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ "lazy_static", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "serde", "sp-api", @@ -11304,7 +11374,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f89740 dependencies = [ "finality-grandpa", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "serde", "sp-api", @@ -11320,7 +11390,7 @@ name = "sp-consensus-slots" version = "0.10.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "serde", "sp-std", @@ -11347,7 +11417,7 @@ dependencies = [ "libsecp256k1", "log", "merlin", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "parking_lot 0.12.1", "paste", "primitive-types", @@ -11419,7 +11489,7 @@ version = "0.19.0" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ "environmental", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "sp-std", "sp-storage", ] @@ -11431,7 +11501,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f89740 dependencies = [ "async-trait", "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-runtime", "sp-std", @@ -11448,7 +11518,7 @@ dependencies = [ "ed25519-dalek", "libsecp256k1", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "rustversion", "secp256k1", "sp-core", @@ -11479,7 +11549,7 @@ name = "sp-keystore" version = "0.27.0" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.6.1", "parking_lot 0.12.1", "sp-core", "sp-externalities", @@ -11501,7 +11571,7 @@ version = "0.1.0" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ "frame-metadata", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-std", ] @@ -11513,7 +11583,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f89740 dependencies = [ "ckb-merkle-mountain-range", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "serde", "sp-api", @@ -11529,7 +11599,7 @@ name = "sp-npos-elections" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "serde", "sp-arithmetic", @@ -11577,7 +11647,7 @@ dependencies = [ "hash256-std-hasher", "impl-trait-for-tuples", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "paste", "rand 0.8.5", "scale-info", @@ -11597,7 +11667,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f89740 dependencies = [ "bytes", "impl-trait-for-tuples", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "primitive-types", "sp-externalities", "sp-runtime-interface-proc-macro", @@ -11625,7 +11695,7 @@ name = "sp-session" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-api", "sp-core", @@ -11639,7 +11709,7 @@ name = "sp-staking" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "serde", "sp-core", @@ -11654,7 +11724,7 @@ source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f89740 dependencies = [ "hash-db", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "parking_lot 0.12.1", "rand 0.8.5", "smallvec", @@ -11673,7 +11743,7 @@ name = "sp-statement-store" version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-api", "sp-application-crypto", @@ -11696,7 +11766,7 @@ version = "13.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ "impl-serde", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "ref-cast", "serde", "sp-debug-derive", @@ -11709,7 +11779,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ "async-trait", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "sp-inherents", "sp-runtime", "sp-std", @@ -11721,7 +11791,7 @@ name = "sp-tracing" version = "10.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.6.1", "sp-std", "tracing", "tracing-core", @@ -11743,7 +11813,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ "async-trait", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "sp-core", "sp-inherents", @@ -11763,7 +11833,7 @@ dependencies = [ "lazy_static", "memory-db", "nohash-hasher", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "parking_lot 0.12.1", "scale-info", "schnellru", @@ -11781,7 +11851,7 @@ version = "22.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ "impl-serde", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "parity-wasm", "scale-info", "serde", @@ -11797,7 +11867,7 @@ name = "sp-version-proc-macro" version = "8.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.6.1", "proc-macro2", "quote", "syn 2.0.16", @@ -11811,7 +11881,7 @@ dependencies = [ "anyhow", "impl-trait-for-tuples", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "sp-std", "wasmtime", ] @@ -11821,7 +11891,7 @@ name = "sp-weights" version = "20.0.0" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "serde", "smallvec", @@ -11898,7 +11968,7 @@ dependencies = [ "pallet-election-provider-multi-phase", "pallet-staking", "pallet-transaction-payment", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "paste", "polkadot-core-primitives", "polkadot-runtime", @@ -12061,7 +12131,7 @@ dependencies = [ "futures", "jsonrpsee", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "sc-rpc-api", "sc-transaction-pool-api", "sp-api", @@ -12102,7 +12172,7 @@ version = "4.0.0-dev" source = "git+https://github.com/paritytech/substrate?branch=master#444bc4f897405fd864f1b8fbe7cdf3f3dbb33d81" dependencies = [ "jsonrpsee", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "sc-client-api", "sc-rpc-api", "serde", @@ -12121,7 +12191,7 @@ dependencies = [ "array-bytes", "async-trait", "futures", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "sc-client-api", "sc-client-db", "sc-consensus", @@ -12326,7 +12396,7 @@ name = "test-parachain-adder" version = "0.9.43" dependencies = [ "dlmalloc", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-parachain", "sp-io", "sp-std", @@ -12342,7 +12412,7 @@ dependencies = [ "futures", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-cli", "polkadot-node-core-pvf", "polkadot-node-primitives", @@ -12374,7 +12444,7 @@ version = "0.9.43" dependencies = [ "dlmalloc", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-parachain", "sp-io", "sp-std", @@ -12390,7 +12460,7 @@ dependencies = [ "futures", "futures-timer", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-cli", "polkadot-node-core-pvf", "polkadot-node-primitives", @@ -12412,7 +12482,7 @@ dependencies = [ name = "test-parachains" version = "0.9.43" dependencies = [ - "parity-scale-codec", + "parity-scale-codec 3.6.1", "sp-core", "test-parachain-adder", "test-parachain-halt", @@ -13026,7 +13096,7 @@ dependencies = [ "frame-try-runtime", "hex", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "sc-cli", "sc-executor", "serde", @@ -13942,7 +14012,7 @@ dependencies = [ name = "westend-runtime" version = "0.9.43" dependencies = [ - "bitvec", + "bitvec 1.0.1", "frame-benchmarking", "frame-election-provider-support", "frame-executive", @@ -13997,7 +14067,7 @@ dependencies = [ "pallet-vesting", "pallet-xcm", "pallet-xcm-benchmarks", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-parachain", "polkadot-primitives", "polkadot-runtime-common", @@ -14367,6 +14437,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "wyz" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85e60b0d1b5f99db2556934e21937020776a5d31520bf169e851ac44e6420214" + [[package]] name = "wyz" version = "0.5.1" @@ -14445,7 +14521,7 @@ dependencies = [ "hex-literal 0.4.1", "impl-trait-for-tuples", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "scale-info", "serde", "sp-io", @@ -14465,7 +14541,7 @@ dependencies = [ "pallet-balances", "pallet-transaction-payment", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-parachain", "polkadot-runtime-parachains", "polkadot-test-runtime", @@ -14489,7 +14565,7 @@ dependencies = [ "frame-support", "impl-trait-for-tuples", "log", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "sp-arithmetic", "sp-core", "sp-io", @@ -14534,7 +14610,7 @@ name = "xcm-simulator" version = "0.9.43" dependencies = [ "frame-support", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "paste", "polkadot-core-primitives", "polkadot-parachain", @@ -14557,7 +14633,7 @@ dependencies = [ "pallet-message-queue", "pallet-uniques", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-core-primitives", "polkadot-parachain", "polkadot-runtime-parachains", @@ -14584,7 +14660,7 @@ dependencies = [ "pallet-balances", "pallet-message-queue", "pallet-xcm", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "polkadot-core-primitives", "polkadot-parachain", "polkadot-runtime-parachains", @@ -14655,7 +14731,7 @@ version = "0.9.43" dependencies = [ "futures-util", "lazy_static", - "parity-scale-codec", + "parity-scale-codec 3.6.1", "reqwest", "serde", "serde_json", From 5fb7ffbc039cde006262eb640c63f1be03c850a6 Mon Sep 17 00:00:00 2001 From: Alin Dima Date: Mon, 10 Jul 2023 17:15:41 +0300 Subject: [PATCH 59/76] [async-backing-branch] Optimize collator-protocol validator-side request fetching (#7457) * Optimize collator-protocol validator-side request fetching * address feedback: replace tuples with structs * feedback: add doc comments * move collation types to subfolder --------- Signed-off-by: alindima --- Cargo.lock | 2 +- node/network/collator-protocol/Cargo.toml | 2 +- node/network/collator-protocol/src/error.rs | 3 - .../src/validator_side/collation.rs | 110 +++- .../src/validator_side/metrics.rs | 4 +- .../src/validator_side/mod.rs | 491 ++++++++---------- .../src/validator_side/tests/mod.rs | 6 +- 7 files changed, 322 insertions(+), 296 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 85035c37c734..ea6dd3099e1a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7009,7 +7009,6 @@ dependencies = [ name = "polkadot-collator-protocol" version = "0.9.43" dependencies = [ - "always-assert", "assert_matches", "bitvec 1.0.1", "env_logger 0.9.0", @@ -7032,6 +7031,7 @@ dependencies = [ "sp-keystore", "sp-runtime", "thiserror", + "tokio-util", "tracing-gum", ] diff --git a/node/network/collator-protocol/Cargo.toml b/node/network/collator-protocol/Cargo.toml index 027e8f9dbbbe..5381bc3354c1 100644 --- a/node/network/collator-protocol/Cargo.toml +++ b/node/network/collator-protocol/Cargo.toml @@ -5,7 +5,6 @@ authors.workspace = true edition.workspace = true [dependencies] -always-assert = "0.1.2" bitvec = { version = "1.0.1", default-features = false, features = ["alloc"] } futures = "0.3.21" futures-timer = "3" @@ -22,6 +21,7 @@ polkadot-node-subsystem-util = { path = "../../subsystem-util" } polkadot-node-subsystem = {path = "../../subsystem" } fatality = "0.0.6" thiserror = "1.0.31" +tokio-util = "0.7.1" [dev-dependencies] log = "0.4.17" diff --git a/node/network/collator-protocol/src/error.rs b/node/network/collator-protocol/src/error.rs index 6524c0cf04c1..9348198e7085 100644 --- a/node/network/collator-protocol/src/error.rs +++ b/node/network/collator-protocol/src/error.rs @@ -69,9 +69,6 @@ pub enum Error { /// to start seconding a candidate. #[derive(Debug, thiserror::Error)] pub enum SecondingError { - #[error("Failed to fetch a collation")] - FailedToFetch(#[from] oneshot::Canceled), - #[error("Error while accessing Runtime API")] RuntimeApi(#[from] RuntimeApiError), diff --git a/node/network/collator-protocol/src/validator_side/collation.rs b/node/network/collator-protocol/src/validator_side/collation.rs index 03623f61e9ff..196d78053207 100644 --- a/node/network/collator-protocol/src/validator_side/collation.rs +++ b/node/network/collator-protocol/src/validator_side/collation.rs @@ -27,15 +27,22 @@ //! ┌──────────────────────────────────────────┐ //! └─▶Advertised ─▶ Pending ─▶ Fetched ─▶ Validated -use futures::channel::oneshot; -use std::collections::VecDeque; +use std::{collections::VecDeque, future::Future, pin::Pin, task::Poll}; -use polkadot_node_network_protocol::PeerId; +use futures::{future::BoxFuture, FutureExt}; +use polkadot_node_network_protocol::{ + request_response::{outgoing::RequestError, v1 as request_v1, OutgoingResult}, + PeerId, +}; use polkadot_node_primitives::PoV; -use polkadot_node_subsystem_util::runtime::ProspectiveParachainsMode; +use polkadot_node_subsystem::jaeger; +use polkadot_node_subsystem_util::{ + metrics::prometheus::prometheus::HistogramTimer, runtime::ProspectiveParachainsMode, +}; use polkadot_primitives::{ CandidateHash, CandidateReceipt, CollatorId, Hash, Id as ParaId, PersistedValidationData, }; +use tokio_util::sync::CancellationToken; use crate::{error::SecondingError, LOG_TARGET}; @@ -148,10 +155,25 @@ pub fn fetched_collation_sanity_check( } } -pub type CollationEvent = (CollatorId, PendingCollation); +/// Identifier for a requested collation and the respective collator that advertised it. +#[derive(Debug, Clone)] +pub struct CollationEvent { + /// Collator id. + pub collator_id: CollatorId, + /// The requested collation data. + pub pending_collation: PendingCollation, +} -pub type PendingCollationFetch = - (CollationEvent, std::result::Result<(CandidateReceipt, PoV), oneshot::Canceled>); +/// Fetched collation data. +#[derive(Debug, Clone)] +pub struct PendingCollationFetch { + /// Collation identifier. + pub collation_event: CollationEvent, + /// Candidate receipt. + pub candidate_receipt: CandidateReceipt, + /// Proof of validity. + pub pov: PoV, +} /// The status of the collations in [`CollationsPerRelayParent`]. #[derive(Debug, Clone, Copy)] @@ -268,3 +290,77 @@ impl Collations { self.seconded_count < seconded_limit } } + +// Any error that can occur when awaiting a collation fetch response. +#[derive(Debug, thiserror::Error)] +pub(super) enum CollationFetchError { + #[error("Future was cancelled.")] + Cancelled, + #[error("{0}")] + Request(#[from] RequestError), +} + +/// Future that concludes when the collator has responded to our collation fetch request +/// or the request was cancelled by the validator. +pub(super) struct CollationFetchRequest { + /// Info about the requested collation. + pub pending_collation: PendingCollation, + /// Collator id. + pub collator_id: CollatorId, + /// Responses from collator. + pub from_collator: BoxFuture<'static, OutgoingResult>, + /// Handle used for checking if this request was cancelled. + pub cancellation_token: CancellationToken, + /// A jaeger span corresponding to the lifetime of the request. + pub span: Option, + /// A metric histogram for the lifetime of the request + pub _lifetime_timer: Option, +} + +impl Future for CollationFetchRequest { + type Output = ( + CollationEvent, + std::result::Result, + ); + + fn poll(mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>) -> Poll { + // First check if this fetch request was cancelled. + let cancelled = match std::pin::pin!(self.cancellation_token.cancelled()).poll(cx) { + Poll::Ready(()) => true, + Poll::Pending => false, + }; + + if cancelled { + self.span.as_mut().map(|s| s.add_string_tag("success", "false")); + return Poll::Ready(( + CollationEvent { + collator_id: self.collator_id.clone(), + pending_collation: self.pending_collation, + }, + Err(CollationFetchError::Cancelled), + )) + } + + let res = self.from_collator.poll_unpin(cx).map(|res| { + ( + CollationEvent { + collator_id: self.collator_id.clone(), + pending_collation: self.pending_collation, + }, + res.map_err(CollationFetchError::Request), + ) + }); + + match &res { + Poll::Ready((_, Ok(request_v1::CollationFetchingResponse::Collation(..)))) => { + self.span.as_mut().map(|s| s.add_string_tag("success", "true")); + }, + Poll::Ready((_, Err(_))) => { + self.span.as_mut().map(|s| s.add_string_tag("success", "false")); + }, + _ => {}, + }; + + res + } +} diff --git a/node/network/collator-protocol/src/validator_side/metrics.rs b/node/network/collator-protocol/src/validator_side/metrics.rs index 947fe36550f1..d898a5e7cefd 100644 --- a/node/network/collator-protocol/src/validator_side/metrics.rs +++ b/node/network/collator-protocol/src/validator_side/metrics.rs @@ -50,7 +50,7 @@ impl Metrics { .map(|metrics| metrics.collator_peer_count.set(collator_peers as u64)); } - /// Provide a timer for `PerRequest` structure which observes on drop. + /// Provide a timer for `CollationFetchRequest` structure which observes on drop. pub fn time_collation_request_duration( &self, ) -> Option { @@ -121,7 +121,7 @@ impl metrics::Metrics for Metrics { prometheus::Histogram::with_opts( prometheus::HistogramOpts::new( "polkadot_parachain_collator_protocol_validator_collation_request_duration", - "Lifetime of the `PerRequest` structure", + "Lifetime of the `CollationFetchRequest` structure", ).buckets(vec![0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.75, 0.9, 1.0, 1.2, 1.5, 1.75]), )?, registry, diff --git a/node/network/collator-protocol/src/validator_side/mod.rs b/node/network/collator-protocol/src/validator_side/mod.rs index 6a20e6dd413c..9ae9e116d0f4 100644 --- a/node/network/collator-protocol/src/validator_side/mod.rs +++ b/node/network/collator-protocol/src/validator_side/mod.rs @@ -14,29 +14,24 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -use always_assert::never; use futures::{ - channel::oneshot, - future::{BoxFuture, Fuse, FusedFuture}, - select, - stream::FuturesUnordered, - FutureExt, StreamExt, + channel::oneshot, future::BoxFuture, select, stream::FuturesUnordered, FutureExt, StreamExt, }; use futures_timer::Delay; use std::{ collections::{hash_map::Entry, HashMap, HashSet}, convert::TryInto, + future::Future, iter::FromIterator, - task::Poll, time::{Duration, Instant}, }; +use tokio_util::sync::CancellationToken; use sp_keystore::KeystorePtr; use polkadot_node_network_protocol::{ self as net_protocol, peer_set::{CollationVersion, PeerSet}, - request_response as req_res, request_response::{ outgoing::{Recipient, RequestError}, v1 as request_v1, vstaging as request_vstaging, OutgoingRequest, Requests, @@ -44,7 +39,7 @@ use polkadot_node_network_protocol::{ v1 as protocol_v1, vstaging as protocol_vstaging, OurView, PeerId, UnifiedReputationChange as Rep, Versioned, View, }; -use polkadot_node_primitives::{PoV, SignedFullStatement, Statement}; +use polkadot_node_primitives::{SignedFullStatement, Statement}; use polkadot_node_subsystem::{ jaeger, messages::{ @@ -56,13 +51,12 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_util::{ backing_implicit_view::View as ImplicitView, - metrics::prometheus::prometheus::HistogramTimer, reputation::{ReputationAggregator, REPUTATION_CHANGE_INTERVAL}, runtime::{prospective_parachains_mode, ProspectiveParachainsMode}, }; use polkadot_primitives::{ - CandidateHash, CandidateReceipt, CollatorId, CoreState, Hash, Id as ParaId, - OccupiedCoreAssumption, PersistedValidationData, + CandidateHash, CollatorId, CoreState, Hash, Id as ParaId, OccupiedCoreAssumption, + PersistedValidationData, }; use crate::error::{Error, FetchError, Result, SecondingError}; @@ -73,8 +67,9 @@ mod collation; mod metrics; use collation::{ - fetched_collation_sanity_check, BlockedAdvertisement, CollationEvent, CollationStatus, - Collations, FetchedCollation, PendingCollation, PendingCollationFetch, ProspectiveCandidate, + fetched_collation_sanity_check, BlockedAdvertisement, CollationEvent, CollationFetchError, + CollationFetchRequest, CollationStatus, Collations, FetchedCollation, PendingCollation, + PendingCollationFetch, ProspectiveCandidate, }; #[cfg(test)] @@ -118,26 +113,6 @@ const MAX_UNSHARED_DOWNLOAD_TIME: Duration = Duration::from_millis(100); #[cfg(test)] const ACTIVITY_POLL: Duration = Duration::from_millis(10); -// How often to poll collation responses. -// This is a hack that should be removed in a refactoring. -// See https://github.com/paritytech/polkadot/issues/4182 -const CHECK_COLLATIONS_POLL: Duration = Duration::from_millis(5); - -struct PerRequest { - /// Responses from collator. - /// - /// The response payload is the same for both versions of protocol - /// and doesn't have vstaging alias for simplicity. - from_collator: - Fuse>>, - /// Sender to forward to initial requester. - to_requester: oneshot::Sender<(CandidateReceipt, PoV)>, - /// A jaeger span corresponding to the lifetime of the request. - span: Option, - /// A metric histogram for the lifetime of the request - _lifetime_timer: Option, -} - #[derive(Debug)] struct CollatingPeerState { collator_id: CollatorId, @@ -419,12 +394,11 @@ struct State { /// this includes assignments from the implicit view. current_assignments: HashMap, - /// The collations we have requested by relay parent and para id. - /// - /// For each relay parent and para id we may be connected to a number - /// of collators each of those may have advertised a different collation. - /// So we group such cases here. - requested_collations: HashMap, + /// The collations we have requested from collators. + collation_requests: FuturesUnordered, + + /// Cancellation handles for the collation fetch requests. + collation_requests_cancel_handles: HashMap, /// Metrics. metrics: Metrics, @@ -440,9 +414,6 @@ struct State { /// requests to backing on new backed candidates and activations. blocked_advertisements: HashMap<(ParaId, Hash), Vec>, - /// Keep track of all fetch collation requests - collation_fetches: FuturesUnordered>, - /// When a timer in this `FuturesUnordered` triggers, we should dequeue the next request /// attempt in the corresponding `collations_per_relay_parent`. /// @@ -595,15 +566,13 @@ async fn fetch_collation( pc: PendingCollation, id: CollatorId, ) -> std::result::Result<(), FetchError> { - let (tx, rx) = oneshot::channel(); - let PendingCollation { relay_parent, peer_id, prospective_candidate, .. } = pc; let candidate_hash = prospective_candidate.as_ref().map(ProspectiveCandidate::candidate_hash); let peer_data = state.peer_data.get(&peer_id).ok_or(FetchError::UnknownPeer)?; if peer_data.has_advertised(&relay_parent, candidate_hash) { - request_collation(sender, state, pc, id.clone(), peer_data.version, tx).await?; + request_collation(sender, state, pc, id.clone(), peer_data.version).await?; let timeout = |collator_id, candidate_hash, relay_parent| async move { Delay::new(MAX_UNSHARED_DOWNLOAD_TIME).await; (collator_id, candidate_hash, relay_parent) @@ -611,7 +580,6 @@ async fn fetch_collation( state .collation_fetch_timeouts .push(timeout(id.clone(), candidate_hash, relay_parent).boxed()); - state.collation_fetches.push(rx.map(move |r| ((id, pc), r)).boxed()); Ok(()) } else { @@ -684,16 +652,19 @@ fn handle_peer_view_change(state: &mut State, peer_id: PeerId, view: View) { &state.per_relay_parent, view, ); - state - .requested_collations - .retain(|pc, _| pc.peer_id != peer_id || peer_data.has_advertised(&pc.relay_parent, None)); + state.collation_requests_cancel_handles.retain(|pc, handle| { + let keep = pc.peer_id != peer_id || peer_data.has_advertised(&pc.relay_parent, None); + if !keep { + handle.cancel(); + } + keep + }); } /// Request a collation from the network. /// This function will /// - Check for duplicate requests. /// - Check if the requested collation is in our view. -/// - Update `PerRequest` records with the `result` field if necessary. /// And as such invocations of this function may rely on that. async fn request_collation( sender: &mut impl overseer::CollatorProtocolSenderTrait, @@ -701,9 +672,8 @@ async fn request_collation( pending_collation: PendingCollation, collator_id: CollatorId, peer_protocol_version: CollationVersion, - result: oneshot::Sender<(CandidateReceipt, PoV)>, ) -> std::result::Result<(), FetchError> { - if state.requested_collations.contains_key(&pending_collation) { + if state.collation_requests_cancel_handles.contains_key(&pending_collation) { return Err(FetchError::AlreadyRequested) } @@ -739,9 +709,12 @@ async fn request_collation( _ => return Err(FetchError::ProtocolMismatch), }; - let per_request = PerRequest { - from_collator: response_recv.fuse(), - to_requester: result, + let cancellation_token = CancellationToken::new(); + let collation_request = CollationFetchRequest { + pending_collation, + collator_id: collator_id.clone(), + from_collator: response_recv.boxed(), + cancellation_token: cancellation_token.clone(), span: state .span_per_relay_parent .get(&relay_parent) @@ -749,7 +722,10 @@ async fn request_collation( _lifetime_timer: state.metrics.time_collation_request_duration(), }; - state.requested_collations.insert(pending_collation, per_request); + state.collation_requests.push(collation_request); + state + .collation_requests_cancel_handles + .insert(pending_collation, cancellation_token); gum::debug!( target: LOG_TARGET, @@ -1351,7 +1327,13 @@ where remove_outgoing(&mut state.current_assignments, per_relay_parent); } - state.requested_collations.retain(|k, _| k.relay_parent != removed); + state.collation_requests_cancel_handles.retain(|pc, handle| { + let keep = pc.relay_parent != removed; + if !keep { + handle.cancel(); + } + keep + }); state.fetched_candidates.retain(|k, _| k.relay_parent != removed); state.span_per_relay_parent.remove(&removed); } @@ -1504,8 +1486,9 @@ async fn process_msg( }, }; let fetched_collation = FetchedCollation::from(&receipt.to_plain()); - if let Some(collation_event) = state.fetched_candidates.remove(&fetched_collation) { - let (collator_id, pending_collation) = collation_event; + if let Some(CollationEvent { collator_id, pending_collation }) = + state.fetched_candidates.remove(&fetched_collation) + { let PendingCollation { relay_parent, peer_id, prospective_candidate, .. } = pending_collation; note_good_collation( @@ -1557,9 +1540,9 @@ async fn process_msg( let candidate_hash = fetched_collation.candidate_hash; let id = match state.fetched_candidates.entry(fetched_collation) { Entry::Occupied(entry) - if entry.get().1.commitments_hash == + if entry.get().pending_collation.commitments_hash == Some(candidate_receipt.commitments_hash) => - entry.remove().0, + entry.remove().collator_id, Entry::Occupied(_) => { gum::error!( target: LOG_TARGET, @@ -1616,9 +1599,6 @@ async fn run_inner( let next_inactivity_stream = tick_stream(ACTIVITY_POLL); futures::pin_mut!(next_inactivity_stream); - let check_collations_stream = tick_stream(CHECK_COLLATIONS_POLL); - futures::pin_mut!(check_collations_stream); - loop { select! { _ = reputation_delay => { @@ -1643,28 +1623,40 @@ async fn run_inner( _ = next_inactivity_stream.next() => { disconnect_inactive_peers(ctx.sender(), &eviction_policy, &state.peer_data).await; } - res = state.collation_fetches.select_next_some() => { - let (collator_id, pc) = res.0.clone(); + + resp = state.collation_requests.select_next_some() => { + let res = match handle_collation_fetch_response(&mut state, resp).await { + Err(Some((peer_id, rep))) => { + modify_reputation(&mut state.reputation, ctx.sender(), peer_id, rep).await; + continue + }, + Err(None) => { + continue + }, + Ok(res) => res + }; + + let CollationEvent {collator_id, pending_collation} = res.collation_event.clone(); if let Err(err) = kick_off_seconding(&mut ctx, &mut state, res).await { gum::warn!( target: LOG_TARGET, - relay_parent = ?pc.relay_parent, - para_id = ?pc.para_id, - peer_id = ?pc.peer_id, + relay_parent = ?pending_collation.relay_parent, + para_id = ?pending_collation.para_id, + peer_id = ?pending_collation.peer_id, error = %err, "Seconding aborted due to an error", ); if err.is_malicious() { // Report malicious peer. - modify_reputation(&mut state.reputation, ctx.sender(), pc.peer_id, COST_REPORT_BAD).await; + modify_reputation(&mut state.reputation, ctx.sender(), pending_collation.peer_id, COST_REPORT_BAD).await; } let maybe_candidate_hash = - pc.prospective_candidate.as_ref().map(ProspectiveCandidate::candidate_hash); + pending_collation.prospective_candidate.as_ref().map(ProspectiveCandidate::candidate_hash); dequeue_next_collation_and_fetch( &mut ctx, &mut state, - pc.relay_parent, + pending_collation.relay_parent, (collator_id, maybe_candidate_hash), ) .await; @@ -1686,47 +1678,12 @@ async fn run_inner( ) .await; } - _ = check_collations_stream.next() => { - let reputation_changes = poll_requests( - &mut state.requested_collations, - &state.metrics, - &state.span_per_relay_parent, - ).await; - - for (peer_id, rep) in reputation_changes { - modify_reputation(&mut state.reputation, ctx.sender(), peer_id, rep).await; - } - }, } } Ok(()) } -async fn poll_requests( - requested_collations: &mut HashMap, - metrics: &Metrics, - span_per_relay_parent: &HashMap, -) -> Vec<(PeerId, Rep)> { - let mut retained_requested = HashSet::new(); - let mut reputation_changes = Vec::new(); - for (pending_collation, per_req) in requested_collations.iter_mut() { - // Despite the await, this won't block on the response itself. - let result = - poll_collation_response(metrics, span_per_relay_parent, pending_collation, per_req) - .await; - - if !result.is_ready() { - retained_requested.insert(*pending_collation); - } - if let CollationFetchResult::Error(Some(rep)) = result { - reputation_changes.push((pending_collation.peer_id, rep)); - } - } - requested_collations.retain(|k, _| retained_requested.contains(k)); - reputation_changes -} - /// Dequeue another collation and fetch. #[overseer::contextbounds(CollatorProtocol, prefix = self::overseer)] async fn dequeue_next_collation_and_fetch( @@ -1809,10 +1766,10 @@ where async fn kick_off_seconding( ctx: &mut Context, state: &mut State, - (mut collation_event, res): PendingCollationFetch, + PendingCollationFetch { mut collation_event, candidate_receipt, pov }: PendingCollationFetch, ) -> std::result::Result<(), SecondingError> { - let relay_parent = collation_event.1.relay_parent; - let para_id = collation_event.1.para_id; + let pending_collation = collation_event.pending_collation; + let relay_parent = pending_collation.relay_parent; let per_relay_parent = match state.per_relay_parent.get_mut(&relay_parent) { Some(state) => state, @@ -1829,39 +1786,43 @@ async fn kick_off_seconding( let collations = &mut per_relay_parent.collations; let relay_parent_mode = per_relay_parent.prospective_parachains_mode; - let (candidate_receipt, pov) = res?; - let fetched_collation = FetchedCollation::from(&candidate_receipt); if let Entry::Vacant(entry) = state.fetched_candidates.entry(fetched_collation) { - collation_event.1.commitments_hash = Some(candidate_receipt.commitments_hash); - - let pvd = match (relay_parent_mode, collation_event.1.prospective_candidate) { - ( - ProspectiveParachainsMode::Enabled { .. }, - Some(ProspectiveCandidate { parent_head_data_hash, .. }), - ) => - request_prospective_validation_data( - ctx.sender(), - relay_parent, - parent_head_data_hash, - para_id, - ) - .await?, - (ProspectiveParachainsMode::Disabled, _) => - request_persisted_validation_data( - ctx.sender(), - candidate_receipt.descriptor().relay_parent, - candidate_receipt.descriptor().para_id, - ) - .await?, - _ => { - // `handle_advertisement` checks for protocol mismatch. - return Ok(()) - }, - } - .ok_or(SecondingError::PersistedValidationDataNotFound)?; + collation_event.pending_collation.commitments_hash = + Some(candidate_receipt.commitments_hash); + + let pvd = + match (relay_parent_mode, collation_event.pending_collation.prospective_candidate) { + ( + ProspectiveParachainsMode::Enabled { .. }, + Some(ProspectiveCandidate { parent_head_data_hash, .. }), + ) => + request_prospective_validation_data( + ctx.sender(), + relay_parent, + parent_head_data_hash, + pending_collation.para_id, + ) + .await?, + (ProspectiveParachainsMode::Disabled, _) => + request_persisted_validation_data( + ctx.sender(), + candidate_receipt.descriptor().relay_parent, + candidate_receipt.descriptor().para_id, + ) + .await?, + _ => { + // `handle_advertisement` checks for protocol mismatch. + return Ok(()) + }, + } + .ok_or(SecondingError::PersistedValidationDataNotFound)?; - fetched_collation_sanity_check(&collation_event.1, &candidate_receipt, &pvd)?; + fetched_collation_sanity_check( + &collation_event.pending_collation, + &candidate_receipt, + &pvd, + )?; ctx.send_message(CandidateBackingMessage::Second( relay_parent, @@ -1897,147 +1858,119 @@ async fn disconnect_inactive_peers( } } -enum CollationFetchResult { - /// The collation is still being fetched. - Pending, - /// The collation was fetched successfully. - Success, - /// An error occurred when fetching a collation or it was invalid. - /// A given reputation change should be applied to the peer. - Error(Option), -} - -impl CollationFetchResult { - fn is_ready(&self) -> bool { - !matches!(self, Self::Pending) - } -} - -/// Poll collation response, return immediately if there is none. -/// -/// Ready responses are handled, by logging and by -/// forwarding proper responses to the requester. -async fn poll_collation_response( - metrics: &Metrics, - spans: &HashMap, - pending_collation: &PendingCollation, - per_req: &mut PerRequest, -) -> CollationFetchResult { - if never!(per_req.from_collator.is_terminated()) { - gum::error!( - target: LOG_TARGET, - "We remove pending responses once received, this should not happen." - ); - return CollationFetchResult::Success - } - - if let Poll::Ready(response) = futures::poll!(&mut per_req.from_collator) { - let _span = spans - .get(&pending_collation.relay_parent) - .map(|s| s.child("received-collation")); - let _timer = metrics.time_handle_collation_request_result(); - - let mut metrics_result = Err(()); - let mut success = "false"; +/// Handle a collation fetch response. +async fn handle_collation_fetch_response( + state: &mut State, + response: ::Output, +) -> std::result::Result> { + let (CollationEvent { collator_id, pending_collation }, response) = response; + // Remove the cancellation handle, as the future already completed. + state.collation_requests_cancel_handles.remove(&pending_collation); + + let response = match response { + Err(CollationFetchError::Cancelled) => { + gum::debug!( + target: LOG_TARGET, + hash = ?pending_collation.relay_parent, + para_id = ?pending_collation.para_id, + peer_id = ?pending_collation.peer_id, + "Request was cancelled from the validator side" + ); + return Err(None) + }, + Err(CollationFetchError::Request(req_error)) => Err(req_error), + Ok(resp) => Ok(resp), + }; - let result = match response { - Err(RequestError::InvalidResponse(err)) => { - gum::warn!( - target: LOG_TARGET, - hash = ?pending_collation.relay_parent, - para_id = ?pending_collation.para_id, - peer_id = ?pending_collation.peer_id, - err = ?err, - "Collator provided response that could not be decoded" - ); - CollationFetchResult::Error(Some(COST_CORRUPTED_MESSAGE)) - }, - Err(err) if err.is_timed_out() => { - gum::debug!( - target: LOG_TARGET, - hash = ?pending_collation.relay_parent, - para_id = ?pending_collation.para_id, - peer_id = ?pending_collation.peer_id, - "Request timed out" - ); - // For now we don't want to change reputation on timeout, to mitigate issues like - // this: https://github.com/paritytech/polkadot/issues/4617 - CollationFetchResult::Error(None) - }, - Err(RequestError::NetworkError(err)) => { - gum::debug!( - target: LOG_TARGET, - hash = ?pending_collation.relay_parent, - para_id = ?pending_collation.para_id, - peer_id = ?pending_collation.peer_id, - err = ?err, - "Fetching collation failed due to network error" - ); - // A minor decrease in reputation for any network failure seems - // sensible. In theory this could be exploited, by DoSing this node, - // which would result in reduced reputation for proper nodes, but the - // same can happen for penalties on timeouts, which we also have. - CollationFetchResult::Error(Some(COST_NETWORK_ERROR)) - }, - Err(RequestError::Canceled(err)) => { - gum::debug!( - target: LOG_TARGET, - hash = ?pending_collation.relay_parent, - para_id = ?pending_collation.para_id, - peer_id = ?pending_collation.peer_id, - err = ?err, - "Canceled should be handled by `is_timed_out` above - this is a bug!" - ); - CollationFetchResult::Error(None) - }, - Ok(request_v1::CollationFetchingResponse::Collation(receipt, _)) - if receipt.descriptor().para_id != pending_collation.para_id => - { - gum::debug!( - target: LOG_TARGET, - expected_para_id = ?pending_collation.para_id, - got_para_id = ?receipt.descriptor().para_id, - peer_id = ?pending_collation.peer_id, - "Got wrong para ID for requested collation." - ); + let _span = state + .span_per_relay_parent + .get(&pending_collation.relay_parent) + .map(|s| s.child("received-collation")); + let _timer = state.metrics.time_handle_collation_request_result(); - CollationFetchResult::Error(Some(COST_WRONG_PARA)) - }, - Ok(request_v1::CollationFetchingResponse::Collation(receipt, pov)) => { - gum::debug!( - target: LOG_TARGET, - para_id = %pending_collation.para_id, - hash = ?pending_collation.relay_parent, - candidate_hash = ?receipt.hash(), - "Received collation", - ); - // Actual sending: - let _span = jaeger::Span::new(&pov, "received-collation"); - let (mut tx, _) = oneshot::channel(); - std::mem::swap(&mut tx, &mut (per_req.to_requester)); - let result = tx.send((receipt, pov)); + let mut metrics_result = Err(()); - if let Err(_) = result { - gum::warn!( - target: LOG_TARGET, - hash = ?pending_collation.relay_parent, - para_id = ?pending_collation.para_id, - peer_id = ?pending_collation.peer_id, - "Sending response back to requester failed (receiving side closed)" - ); - } else { - metrics_result = Ok(()); - success = "true"; - } + let result = match response { + Err(RequestError::InvalidResponse(err)) => { + gum::warn!( + target: LOG_TARGET, + hash = ?pending_collation.relay_parent, + para_id = ?pending_collation.para_id, + peer_id = ?pending_collation.peer_id, + err = ?err, + "Collator provided response that could not be decoded" + ); + Err(Some((pending_collation.peer_id, COST_CORRUPTED_MESSAGE))) + }, + Err(err) if err.is_timed_out() => { + gum::debug!( + target: LOG_TARGET, + hash = ?pending_collation.relay_parent, + para_id = ?pending_collation.para_id, + peer_id = ?pending_collation.peer_id, + "Request timed out" + ); + // For now we don't want to change reputation on timeout, to mitigate issues like + // this: https://github.com/paritytech/polkadot/issues/4617 + Err(None) + }, + Err(RequestError::NetworkError(err)) => { + gum::debug!( + target: LOG_TARGET, + hash = ?pending_collation.relay_parent, + para_id = ?pending_collation.para_id, + peer_id = ?pending_collation.peer_id, + err = ?err, + "Fetching collation failed due to network error" + ); + // A minor decrease in reputation for any network failure seems + // sensible. In theory this could be exploited, by DoSing this node, + // which would result in reduced reputation for proper nodes, but the + // same can happen for penalties on timeouts, which we also have. + Err(Some((pending_collation.peer_id, COST_NETWORK_ERROR))) + }, + Err(RequestError::Canceled(err)) => { + gum::debug!( + target: LOG_TARGET, + hash = ?pending_collation.relay_parent, + para_id = ?pending_collation.para_id, + peer_id = ?pending_collation.peer_id, + err = ?err, + "Canceled should be handled by `is_timed_out` above - this is a bug!" + ); + Err(None) + }, + Ok(request_v1::CollationFetchingResponse::Collation(receipt, _)) + if receipt.descriptor().para_id != pending_collation.para_id => + { + gum::debug!( + target: LOG_TARGET, + expected_para_id = ?pending_collation.para_id, + got_para_id = ?receipt.descriptor().para_id, + peer_id = ?pending_collation.peer_id, + "Got wrong para ID for requested collation." + ); - CollationFetchResult::Success - }, - }; - metrics.on_request(metrics_result); - per_req.span.as_mut().map(|s| s.add_string_tag("success", success)); + Err(Some((pending_collation.peer_id, COST_WRONG_PARA))) + }, + Ok(request_v1::CollationFetchingResponse::Collation(candidate_receipt, pov)) => { + gum::debug!( + target: LOG_TARGET, + para_id = %pending_collation.para_id, + hash = ?pending_collation.relay_parent, + candidate_hash = ?candidate_receipt.hash(), + "Received collation", + ); + let _span = jaeger::Span::new(&pov, "received-collation"); - result - } else { - CollationFetchResult::Pending - } + metrics_result = Ok(()); + Ok(PendingCollationFetch { + collation_event: CollationEvent { collator_id, pending_collation }, + candidate_receipt, + pov, + }) + }, + }; + state.metrics.on_request(metrics_result); + result } diff --git a/node/network/collator-protocol/src/validator_side/tests/mod.rs b/node/network/collator-protocol/src/validator_side/tests/mod.rs index 0ca87ae4fb19..fbc19d497659 100644 --- a/node/network/collator-protocol/src/validator_side/tests/mod.rs +++ b/node/network/collator-protocol/src/validator_side/tests/mod.rs @@ -28,7 +28,7 @@ use polkadot_node_network_protocol::{ request_response::{Requests, ResponseSender}, ObservedRole, }; -use polkadot_node_primitives::BlockData; +use polkadot_node_primitives::{BlockData, PoV}; use polkadot_node_subsystem::{ errors::RuntimeApiError, messages::{AllMessages, ReportPeerMessage, RuntimeApiMessage, RuntimeApiRequest}, @@ -36,8 +36,8 @@ use polkadot_node_subsystem::{ use polkadot_node_subsystem_test_helpers as test_helpers; use polkadot_node_subsystem_util::{reputation::add_reputation, TimeoutExt}; use polkadot_primitives::{ - CollatorPair, CoreState, GroupIndex, GroupRotationInfo, HeadData, OccupiedCore, - PersistedValidationData, ScheduledCore, ValidatorId, ValidatorIndex, + CandidateReceipt, CollatorPair, CoreState, GroupIndex, GroupRotationInfo, HeadData, + OccupiedCore, PersistedValidationData, ScheduledCore, ValidatorId, ValidatorIndex, }; use polkadot_primitives_test_helpers::{ dummy_candidate_descriptor, dummy_candidate_receipt_bad_sig, dummy_hash, From 89f600d59d320966e32db68959ef83461af97488 Mon Sep 17 00:00:00 2001 From: asynchronous rob Date: Mon, 10 Jul 2023 17:43:09 +0200 Subject: [PATCH 60/76] Update collation generation for asynchronous backing (#7405) * break candidate receipt construction and distribution into own function * update implementers' guide to include SubmitCollation * implement SubmitCollation for collation-generation * fmt * fix test compilation & remove unnecessary submodule * add some TODOs for a test suite. * Update roadmap/implementers-guide/src/types/overseer-protocol.md Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com> * add new test harness and first test * refactor to avoid requiring background sender * ensure collation gets packaged and distributed * tests for the fallback case with no hint * add parent rp-number hint tests * fmt * update uses of CollationGenerationConfig * fix remaining test * address review comments * use subsystemsender for background tasks * fmt * remove ValidationCodeHashHint and related tests --------- Co-authored-by: Andrei Sandu <54316454+sandreim@users.noreply.github.com> --- Cargo.lock | 2 + node/collation-generation/Cargo.toml | 2 + node/collation-generation/src/lib.rs | 364 ++++--- node/collation-generation/src/metrics.rs | 15 + node/collation-generation/src/tests.rs | 984 ++++++++++-------- node/overseer/src/tests.rs | 2 +- node/primitives/src/lib.rs | 42 +- node/subsystem-types/src/messages.rs | 8 +- node/test/service/src/lib.rs | 3 +- parachain/src/primitives.rs | 3 +- .../adder/collator/src/main.rs | 5 +- .../undying/collator/src/main.rs | 5 +- .../node/collators/collation-generation.md | 10 +- .../src/types/overseer-protocol.md | 51 + 14 files changed, 918 insertions(+), 578 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ea6dd3099e1a..b208f6b88470 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7153,6 +7153,7 @@ dependencies = [ name = "polkadot-node-collation-generation" version = "0.9.43" dependencies = [ + "assert_matches", "futures", "parity-scale-codec 3.6.1", "polkadot-erasure-coding", @@ -7163,6 +7164,7 @@ dependencies = [ "polkadot-primitives", "polkadot-primitives-test-helpers", "sp-core", + "sp-keyring", "sp-maybe-compressed-blob", "thiserror", "tracing-gum", diff --git a/node/collation-generation/Cargo.toml b/node/collation-generation/Cargo.toml index a7badc877d45..70fa0ef761b9 100644 --- a/node/collation-generation/Cargo.toml +++ b/node/collation-generation/Cargo.toml @@ -20,3 +20,5 @@ parity-scale-codec = { version = "3.6.1", default-features = false, features = [ [dev-dependencies] polkadot-node-subsystem-test-helpers = { path = "../subsystem-test-helpers" } test-helpers = { package = "polkadot-primitives-test-helpers", path = "../../primitives/test-helpers" } +assert_matches = "1.4.0" +sp-keyring = { git = "https://github.com/paritytech/substrate", branch = "master" } diff --git a/node/collation-generation/src/lib.rs b/node/collation-generation/src/lib.rs index 5599a737059f..8ee5b897ccc1 100644 --- a/node/collation-generation/src/lib.rs +++ b/node/collation-generation/src/lib.rs @@ -29,12 +29,15 @@ #![deny(missing_docs)] -use futures::{channel::mpsc, future::FutureExt, join, select, sink::SinkExt, stream::StreamExt}; +use futures::{channel::oneshot, future::FutureExt, join, select}; use parity_scale_codec::Encode; -use polkadot_node_primitives::{AvailableData, CollationGenerationConfig, PoV}; +use polkadot_node_primitives::{ + AvailableData, Collation, CollationGenerationConfig, CollationSecondedSignal, PoV, + SubmitCollationParams, +}; use polkadot_node_subsystem::{ messages::{CollationGenerationMessage, CollatorProtocolMessage}, - overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, SpawnedSubsystem, + overseer, ActiveLeavesUpdate, FromOrchestra, OverseerSignal, RuntimeApiError, SpawnedSubsystem, SubsystemContext, SubsystemError, SubsystemResult, }; use polkadot_node_subsystem_util::{ @@ -43,7 +46,7 @@ use polkadot_node_subsystem_util::{ }; use polkadot_primitives::{ collator_signature_payload, CandidateCommitments, CandidateDescriptor, CandidateReceipt, - CoreState, Hash, Id as ParaId, OccupiedCoreAssumption, PersistedValidationData, + CollatorPair, CoreState, Hash, Id as ParaId, OccupiedCoreAssumption, PersistedValidationData, ValidationCodeHash, }; use sp_core::crypto::Pair; @@ -84,26 +87,13 @@ impl CollationGenerationSubsystem { /// If `err_tx` is not `None`, errors are forwarded onto that channel as they occur. /// Otherwise, most are logged and then discarded. async fn run(mut self, mut ctx: Context) { - // when we activate new leaves, we spawn a bunch of sub-tasks, each of which is - // expected to generate precisely one message. We don't want to block the main loop - // at any point waiting for them all, so instead, we create a channel on which they can - // send those messages. We can then just monitor the channel and forward messages on it - // to the overseer here, via the context. - let (sender, receiver) = mpsc::channel(0); - - let mut receiver = receiver.fuse(); loop { select! { incoming = ctx.recv().fuse() => { - if self.handle_incoming::(incoming, &mut ctx, &sender).await { + if self.handle_incoming::(incoming, &mut ctx).await { break; } }, - msg = receiver.next() => { - if let Some(msg) = msg { - ctx.send_message(msg).await; - } - }, } } } @@ -116,7 +106,6 @@ impl CollationGenerationSubsystem { &mut self, incoming: SubsystemResult::Message>>, ctx: &mut Context, - sender: &mpsc::Sender, ) -> bool { match incoming { Ok(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate { @@ -131,7 +120,6 @@ impl CollationGenerationSubsystem { activated.into_iter().map(|v| v.hash), ctx, metrics, - sender, ) .await { @@ -152,6 +140,21 @@ impl CollationGenerationSubsystem { } false }, + Ok(FromOrchestra::Communication { + msg: CollationGenerationMessage::SubmitCollation(params), + }) => { + if let Some(config) = &self.config { + if let Err(err) = + handle_submit_collation(params, config, ctx, &self.metrics).await + { + gum::error!(target: LOG_TARGET, ?err, "Failed to submit collation"); + } + } else { + gum::error!(target: LOG_TARGET, "Collation submitted before initialization"); + } + + false + }, Ok(FromOrchestra::Signal(OverseerSignal::BlockFinalized(..))) => false, Err(err) => { gum::error!( @@ -185,11 +188,14 @@ async fn handle_new_activations( activated: impl IntoIterator, ctx: &mut Context, metrics: Metrics, - sender: &mpsc::Sender, ) -> crate::error::Result<()> { // follow the procedure from the guide: // https://paritytech.github.io/polkadot/book/node/collators/collation-generation.html + if config.collator.is_none() { + return Ok(()) + } + let _overall_timer = metrics.time_new_activations(); for relay_parent in activated { @@ -268,7 +274,7 @@ async fn handle_new_activations( }, }; - let validation_code_hash = match obtain_current_validation_code_hash( + let validation_code_hash = match obtain_validation_code_hash_with_assumption( relay_parent, scheduled_core.para_id, assumption, @@ -291,16 +297,18 @@ async fn handle_new_activations( }; let task_config = config.clone(); - let mut task_sender = sender.clone(); let metrics = metrics.clone(); + let mut task_sender = ctx.sender().clone(); ctx.spawn( "collation-builder", Box::pin(async move { - let persisted_validation_data_hash = validation_data.hash(); - let parent_head_data_hash = validation_data.parent_head.hash(); + let collator_fn = match task_config.collator.as_ref() { + Some(x) => x, + None => return, + }; let (collation, result_sender) = - match (task_config.collator)(relay_parent, &validation_data).await { + match collator_fn(relay_parent, &validation_data).await { Some(collation) => collation.into_inner(), None => { gum::debug!( @@ -312,108 +320,21 @@ async fn handle_new_activations( }, }; - // Apply compression to the block data. - let pov = { - let pov = collation.proof_of_validity.into_compressed(); - let encoded_size = pov.encoded_size(); - - // As long as `POV_BOMB_LIMIT` is at least `max_pov_size`, this ensures - // that honest collators never produce a PoV which is uncompressed. - // - // As such, honest collators never produce an uncompressed PoV which starts with - // a compression magic number, which would lead validators to reject the collation. - if encoded_size > validation_data.max_pov_size as usize { - gum::debug!( - target: LOG_TARGET, - para_id = %scheduled_core.para_id, - size = encoded_size, - max_size = validation_data.max_pov_size, - "PoV exceeded maximum size" - ); - - return - } - - pov - }; - - let pov_hash = pov.hash(); - - let signature_payload = collator_signature_payload( - &relay_parent, - &scheduled_core.para_id, - &persisted_validation_data_hash, - &pov_hash, - &validation_code_hash, - ); - - let erasure_root = - match erasure_root(n_validators, validation_data, pov.clone()) { - Ok(erasure_root) => erasure_root, - Err(err) => { - gum::error!( - target: LOG_TARGET, - para_id = %scheduled_core.para_id, - err = ?err, - "failed to calculate erasure root", - ); - return - }, - }; - - let commitments = CandidateCommitments { - upward_messages: collation.upward_messages, - horizontal_messages: collation.horizontal_messages, - new_validation_code: collation.new_validation_code, - head_data: collation.head_data, - processed_downward_messages: collation.processed_downward_messages, - hrmp_watermark: collation.hrmp_watermark, - }; - - let ccr = CandidateReceipt { - commitments_hash: commitments.hash(), - descriptor: CandidateDescriptor { - signature: task_config.key.sign(&signature_payload), - para_id: scheduled_core.para_id, + construct_and_distribute_receipt( + PreparedCollation { + collation, + para_id: task_config.para_id, relay_parent, - collator: task_config.key.public(), - persisted_validation_data_hash, - pov_hash, - erasure_root, - para_head: commitments.head_data.hash(), + validation_data, validation_code_hash, + n_validators, }, - }; - - gum::debug!( - target: LOG_TARGET, - candidate_hash = ?ccr.hash(), - ?pov_hash, - ?relay_parent, - para_id = %scheduled_core.para_id, - "candidate is generated", - ); - metrics.on_collation_generated(); - - if let Err(err) = task_sender - .send( - CollatorProtocolMessage::DistributeCollation( - ccr, - parent_head_data_hash, - pov, - result_sender, - ) - .into(), - ) - .await - { - gum::warn!( - target: LOG_TARGET, - para_id = %scheduled_core.para_id, - err = ?err, - "failed to send collation result", - ); - } + task_config.key.clone(), + &mut task_sender, + result_sender, + &metrics, + ) + .await; }), )?; } @@ -422,14 +343,199 @@ async fn handle_new_activations( Ok(()) } -async fn obtain_current_validation_code_hash( +#[overseer::contextbounds(CollationGeneration, prefix = self::overseer)] +async fn handle_submit_collation( + params: SubmitCollationParams, + config: &CollationGenerationConfig, + ctx: &mut Context, + metrics: &Metrics, +) -> crate::error::Result<()> { + let _timer = metrics.time_submit_collation(); + + let SubmitCollationParams { + relay_parent, + collation, + parent_head, + validation_code_hash, + result_sender, + } = params; + + let validators = request_validators(relay_parent, ctx.sender()).await.await??; + let n_validators = validators.len(); + + // We need to swap the parent-head data, but all other fields here will be correct. + let mut validation_data = match request_persisted_validation_data( + relay_parent, + config.para_id, + OccupiedCoreAssumption::TimedOut, + ctx.sender(), + ) + .await + .await?? + { + Some(v) => v, + None => { + gum::debug!( + target: LOG_TARGET, + relay_parent = ?relay_parent, + our_para = %config.para_id, + "No validation data for para - does it exist at this relay-parent?", + ); + return Ok(()) + }, + }; + + validation_data.parent_head = parent_head; + + let collation = PreparedCollation { + collation, + relay_parent, + para_id: config.para_id, + validation_data, + validation_code_hash, + n_validators, + }; + + construct_and_distribute_receipt( + collation, + config.key.clone(), + ctx.sender(), + result_sender, + metrics, + ) + .await; + + Ok(()) +} + +struct PreparedCollation { + collation: Collation, + para_id: ParaId, + relay_parent: Hash, + validation_data: PersistedValidationData, + validation_code_hash: ValidationCodeHash, + n_validators: usize, +} + +/// Takes a prepared collation, along with its context, and produces a candidate receipt +/// which is distributed to validators. +async fn construct_and_distribute_receipt( + collation: PreparedCollation, + key: CollatorPair, + sender: &mut impl overseer::CollationGenerationSenderTrait, + result_sender: Option>, + metrics: &Metrics, +) { + let PreparedCollation { + collation, + para_id, + relay_parent, + validation_data, + validation_code_hash, + n_validators, + } = collation; + + let persisted_validation_data_hash = validation_data.hash(); + let parent_head_data_hash = validation_data.parent_head.hash(); + + // Apply compression to the block data. + let pov = { + let pov = collation.proof_of_validity.into_compressed(); + let encoded_size = pov.encoded_size(); + + // As long as `POV_BOMB_LIMIT` is at least `max_pov_size`, this ensures + // that honest collators never produce a PoV which is uncompressed. + // + // As such, honest collators never produce an uncompressed PoV which starts with + // a compression magic number, which would lead validators to reject the collation. + if encoded_size > validation_data.max_pov_size as usize { + gum::debug!( + target: LOG_TARGET, + para_id = %para_id, + size = encoded_size, + max_size = validation_data.max_pov_size, + "PoV exceeded maximum size" + ); + + return + } + + pov + }; + + let pov_hash = pov.hash(); + + let signature_payload = collator_signature_payload( + &relay_parent, + ¶_id, + &persisted_validation_data_hash, + &pov_hash, + &validation_code_hash, + ); + + let erasure_root = match erasure_root(n_validators, validation_data, pov.clone()) { + Ok(erasure_root) => erasure_root, + Err(err) => { + gum::error!( + target: LOG_TARGET, + para_id = %para_id, + err = ?err, + "failed to calculate erasure root", + ); + return + }, + }; + + let commitments = CandidateCommitments { + upward_messages: collation.upward_messages, + horizontal_messages: collation.horizontal_messages, + new_validation_code: collation.new_validation_code, + head_data: collation.head_data, + processed_downward_messages: collation.processed_downward_messages, + hrmp_watermark: collation.hrmp_watermark, + }; + + let ccr = CandidateReceipt { + commitments_hash: commitments.hash(), + descriptor: CandidateDescriptor { + signature: key.sign(&signature_payload), + para_id, + relay_parent, + collator: key.public(), + persisted_validation_data_hash, + pov_hash, + erasure_root, + para_head: commitments.head_data.hash(), + validation_code_hash, + }, + }; + + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?ccr.hash(), + ?pov_hash, + ?relay_parent, + para_id = %para_id, + "candidate is generated", + ); + metrics.on_collation_generated(); + + sender + .send_message(CollatorProtocolMessage::DistributeCollation( + ccr, + parent_head_data_hash, + pov, + result_sender, + )) + .await; +} + +async fn obtain_validation_code_hash_with_assumption( relay_parent: Hash, para_id: ParaId, assumption: OccupiedCoreAssumption, sender: &mut impl overseer::CollationGenerationSenderTrait, -) -> Result, crate::error::Error> { - use polkadot_node_subsystem::RuntimeApiError; - +) -> crate::error::Result> { match request_validation_code_hash(relay_parent, para_id, assumption, sender) .await .await? diff --git a/node/collation-generation/src/metrics.rs b/node/collation-generation/src/metrics.rs index cb9e4a0c8e85..c7690ec82c4f 100644 --- a/node/collation-generation/src/metrics.rs +++ b/node/collation-generation/src/metrics.rs @@ -22,6 +22,7 @@ pub(crate) struct MetricsInner { pub(crate) new_activations_overall: prometheus::Histogram, pub(crate) new_activations_per_relay_parent: prometheus::Histogram, pub(crate) new_activations_per_availability_core: prometheus::Histogram, + pub(crate) submit_collation: prometheus::Histogram, } /// `CollationGenerationSubsystem` metrics. @@ -57,6 +58,11 @@ impl Metrics { .as_ref() .map(|metrics| metrics.new_activations_per_availability_core.start_timer()) } + + /// Provide a timer for submitting a collation which updates on drop. + pub fn time_submit_collation(&self) -> Option { + self.0.as_ref().map(|metrics| metrics.submit_collation.start_timer()) + } } impl metrics::Metrics for Metrics { @@ -96,6 +102,15 @@ impl metrics::Metrics for Metrics { )?, registry, )?, + submit_collation: prometheus::register( + prometheus::Histogram::with_opts( + prometheus::HistogramOpts::new( + "polkadot_parachain_collation_generation_submit_collation", + "Time spent preparing and submitting a collation to the network protocol", + ) + )?, + registry, + )?, }; Ok(Metrics(Some(metrics))) } diff --git a/node/collation-generation/src/tests.rs b/node/collation-generation/src/tests.rs index b2534bcf36c1..b7ff4ec2a576 100644 --- a/node/collation-generation/src/tests.rs +++ b/node/collation-generation/src/tests.rs @@ -14,472 +14,588 @@ // You should have received a copy of the GNU General Public License // along with Polkadot. If not, see . -mod handle_new_activations { - use super::super::*; - use ::test_helpers::{dummy_hash, dummy_head_data, dummy_validator}; - use futures::{ - lock::Mutex, - task::{Context as FuturesContext, Poll}, - Future, +use super::*; +use assert_matches::assert_matches; +use futures::{ + lock::Mutex, + task::{Context as FuturesContext, Poll}, + Future, +}; +use polkadot_node_primitives::{BlockData, Collation, CollationResult, MaybeCompressedPoV, PoV}; +use polkadot_node_subsystem::{ + errors::RuntimeApiError, + messages::{AllMessages, RuntimeApiMessage, RuntimeApiRequest}, +}; +use polkadot_node_subsystem_test_helpers::{subsystem_test_harness, TestSubsystemContextHandle}; +use polkadot_node_subsystem_util::TimeoutExt; +use polkadot_primitives::{ + CollatorPair, HeadData, Id as ParaId, PersistedValidationData, ScheduledCore, ValidationCode, +}; +use sp_keyring::sr25519::Keyring as Sr25519Keyring; +use std::pin::Pin; +use test_helpers::{dummy_hash, dummy_head_data, dummy_validator}; + +type VirtualOverseer = TestSubsystemContextHandle; + +fn test_harness>(test: impl FnOnce(VirtualOverseer) -> T) { + let pool = sp_core::testing::TaskExecutor::new(); + let (context, virtual_overseer) = + polkadot_node_subsystem_test_helpers::make_subsystem_context(pool); + let subsystem = async move { + let subsystem = crate::CollationGenerationSubsystem::new(Metrics::default()); + + subsystem.run(context).await; }; - use polkadot_node_primitives::{ - BlockData, Collation, CollationResult, MaybeCompressedPoV, PoV, - }; - use polkadot_node_subsystem::{ - errors::RuntimeApiError, - messages::{AllMessages, RuntimeApiMessage, RuntimeApiRequest}, - }; - use polkadot_node_subsystem_test_helpers::{ - subsystem_test_harness, TestSubsystemContextHandle, - }; - use polkadot_primitives::{ - CollatorPair, Id as ParaId, PersistedValidationData, ScheduledCore, ValidationCode, - }; - use std::pin::Pin; - - fn test_collation() -> Collation { - Collation { - upward_messages: Default::default(), - horizontal_messages: Default::default(), - new_validation_code: None, - head_data: dummy_head_data(), - proof_of_validity: MaybeCompressedPoV::Raw(PoV { block_data: BlockData(Vec::new()) }), - processed_downward_messages: 0_u32, - hrmp_watermark: 0_u32.into(), - } - } - fn test_collation_compressed() -> Collation { - let mut collation = test_collation(); - let compressed = collation.proof_of_validity.clone().into_compressed(); - collation.proof_of_validity = MaybeCompressedPoV::Compressed(compressed); - collation - } + let test_fut = test(virtual_overseer); + + futures::pin_mut!(test_fut); + futures::executor::block_on(futures::future::join( + async move { + let mut virtual_overseer = test_fut.await; + // Ensure we have handled all responses. + if let Ok(Some(msg)) = virtual_overseer.rx.try_next() { + panic!("Did not handle all responses: {:?}", msg); + } + // Conclude. + virtual_overseer.send(FromOrchestra::Signal(OverseerSignal::Conclude)).await; + }, + subsystem, + )); +} - fn test_validation_data() -> PersistedValidationData { - let mut persisted_validation_data = PersistedValidationData::default(); - persisted_validation_data.max_pov_size = 1024; - persisted_validation_data +fn test_collation() -> Collation { + Collation { + upward_messages: Default::default(), + horizontal_messages: Default::default(), + new_validation_code: None, + head_data: dummy_head_data(), + proof_of_validity: MaybeCompressedPoV::Raw(PoV { block_data: BlockData(Vec::new()) }), + processed_downward_messages: 0_u32, + hrmp_watermark: 0_u32.into(), } +} - // Box + Unpin + Send - struct TestCollator; +fn test_collation_compressed() -> Collation { + let mut collation = test_collation(); + let compressed = collation.proof_of_validity.clone().into_compressed(); + collation.proof_of_validity = MaybeCompressedPoV::Compressed(compressed); + collation +} - impl Future for TestCollator { - type Output = Option; +fn test_validation_data() -> PersistedValidationData { + let mut persisted_validation_data = PersistedValidationData::default(); + persisted_validation_data.max_pov_size = 1024; + persisted_validation_data +} - fn poll(self: Pin<&mut Self>, _cx: &mut FuturesContext) -> Poll { - Poll::Ready(Some(CollationResult { collation: test_collation(), result_sender: None })) - } +// Box + Unpin + Send +struct TestCollator; + +impl Future for TestCollator { + type Output = Option; + + fn poll(self: Pin<&mut Self>, _cx: &mut FuturesContext) -> Poll { + Poll::Ready(Some(CollationResult { collation: test_collation(), result_sender: None })) } +} + +impl Unpin for TestCollator {} - impl Unpin for TestCollator {} +async fn overseer_recv(overseer: &mut VirtualOverseer) -> AllMessages { + const TIMEOUT: std::time::Duration = std::time::Duration::from_millis(2000); - fn test_config>(para_id: Id) -> Arc { - Arc::new(CollationGenerationConfig { - key: CollatorPair::generate().0, - collator: Box::new(|_: Hash, _vd: &PersistedValidationData| TestCollator.boxed()), - para_id: para_id.into(), - }) + overseer + .recv() + .timeout(TIMEOUT) + .await + .expect(&format!("{:?} is long enough to receive messages", TIMEOUT)) +} + +fn test_config>(para_id: Id) -> CollationGenerationConfig { + CollationGenerationConfig { + key: CollatorPair::generate().0, + collator: Some(Box::new(|_: Hash, _vd: &PersistedValidationData| TestCollator.boxed())), + para_id: para_id.into(), } +} - fn scheduled_core_for>(para_id: Id) -> ScheduledCore { - ScheduledCore { para_id: para_id.into(), collator: None } +fn test_config_no_collator>(para_id: Id) -> CollationGenerationConfig { + CollationGenerationConfig { + key: CollatorPair::generate().0, + collator: None, + para_id: para_id.into(), } +} - #[test] - fn requests_availability_per_relay_parent() { - let activated_hashes: Vec = - vec![[1; 32].into(), [4; 32].into(), [9; 32].into(), [16; 32].into()]; - - let requested_availability_cores = Arc::new(Mutex::new(Vec::new())); - - let overseer_requested_availability_cores = requested_availability_cores.clone(); - let overseer = |mut handle: TestSubsystemContextHandle| async move { - loop { - match handle.try_recv().await { - None => break, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(hash, RuntimeApiRequest::AvailabilityCores(tx)))) => { - overseer_requested_availability_cores.lock().await.push(hash); - tx.send(Ok(vec![])).unwrap(); - } - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(_hash, RuntimeApiRequest::Validators(tx)))) => { - tx.send(Ok(vec![dummy_validator(); 3])).unwrap(); - } - Some(msg) => panic!("didn't expect any other overseer requests given no availability cores; got {:?}", msg), +fn scheduled_core_for>(para_id: Id) -> ScheduledCore { + ScheduledCore { para_id: para_id.into(), collator: None } +} + +#[test] +fn requests_availability_per_relay_parent() { + let activated_hashes: Vec = + vec![[1; 32].into(), [4; 32].into(), [9; 32].into(), [16; 32].into()]; + + let requested_availability_cores = Arc::new(Mutex::new(Vec::new())); + + let overseer_requested_availability_cores = requested_availability_cores.clone(); + let overseer = |mut handle: TestSubsystemContextHandle| async move { + loop { + match handle.try_recv().await { + None => break, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(hash, RuntimeApiRequest::AvailabilityCores(tx)))) => { + overseer_requested_availability_cores.lock().await.push(hash); + tx.send(Ok(vec![])).unwrap(); + } + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(_hash, RuntimeApiRequest::Validators(tx)))) => { + tx.send(Ok(vec![dummy_validator(); 3])).unwrap(); } + Some(msg) => panic!("didn't expect any other overseer requests given no availability cores; got {:?}", msg), } - }; - - let (tx, _rx) = mpsc::channel(0); - - let subsystem_activated_hashes = activated_hashes.clone(); - subsystem_test_harness(overseer, |mut ctx| async move { - handle_new_activations( - test_config(123u32), - subsystem_activated_hashes, - &mut ctx, - Metrics(None), - &tx, - ) - .await - .unwrap(); - }); - - let mut requested_availability_cores = Arc::try_unwrap(requested_availability_cores) - .expect("overseer should have shut down by now") - .into_inner(); - requested_availability_cores.sort(); + } + }; - assert_eq!(requested_availability_cores, activated_hashes); - } + let subsystem_activated_hashes = activated_hashes.clone(); + subsystem_test_harness(overseer, |mut ctx| async move { + handle_new_activations( + Arc::new(test_config(123u32)), + subsystem_activated_hashes, + &mut ctx, + Metrics(None), + ) + .await + .unwrap(); + }); + + let mut requested_availability_cores = Arc::try_unwrap(requested_availability_cores) + .expect("overseer should have shut down by now") + .into_inner(); + requested_availability_cores.sort(); + + assert_eq!(requested_availability_cores, activated_hashes); +} - #[test] - fn requests_validation_data_for_scheduled_matches() { - let activated_hashes: Vec = vec![ - Hash::repeat_byte(1), - Hash::repeat_byte(4), - Hash::repeat_byte(9), - Hash::repeat_byte(16), - ]; - - let requested_validation_data = Arc::new(Mutex::new(Vec::new())); - - let overseer_requested_validation_data = requested_validation_data.clone(); - let overseer = |mut handle: TestSubsystemContextHandle| async move { - loop { - match handle.try_recv().await { - None => break, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - hash, - RuntimeApiRequest::AvailabilityCores(tx), - ))) => { - tx.send(Ok(vec![ - CoreState::Free, - // this is weird, see explanation below - CoreState::Scheduled(scheduled_core_for( - (hash.as_fixed_bytes()[0] * 4) as u32, - )), - CoreState::Scheduled(scheduled_core_for( - (hash.as_fixed_bytes()[0] * 5) as u32, - )), - ])) - .unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - hash, - RuntimeApiRequest::PersistedValidationData( - _para_id, - _occupied_core_assumption, - tx, - ), - ))) => { - overseer_requested_validation_data.lock().await.push(hash); - tx.send(Ok(None)).unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::Validators(tx), - ))) => { - tx.send(Ok(vec![dummy_validator(); 3])).unwrap(); - }, - Some(msg) => { - panic!("didn't expect any other overseer requests; got {:?}", msg) - }, - } +#[test] +fn requests_validation_data_for_scheduled_matches() { + let activated_hashes: Vec = vec![ + Hash::repeat_byte(1), + Hash::repeat_byte(4), + Hash::repeat_byte(9), + Hash::repeat_byte(16), + ]; + + let requested_validation_data = Arc::new(Mutex::new(Vec::new())); + + let overseer_requested_validation_data = requested_validation_data.clone(); + let overseer = |mut handle: TestSubsystemContextHandle| async move { + loop { + match handle.try_recv().await { + None => break, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + hash, + RuntimeApiRequest::AvailabilityCores(tx), + ))) => { + tx.send(Ok(vec![ + CoreState::Free, + // this is weird, see explanation below + CoreState::Scheduled(scheduled_core_for( + (hash.as_fixed_bytes()[0] * 4) as u32, + )), + CoreState::Scheduled(scheduled_core_for( + (hash.as_fixed_bytes()[0] * 5) as u32, + )), + ])) + .unwrap(); + }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + hash, + RuntimeApiRequest::PersistedValidationData( + _para_id, + _occupied_core_assumption, + tx, + ), + ))) => { + overseer_requested_validation_data.lock().await.push(hash); + tx.send(Ok(None)).unwrap(); + }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::Validators(tx), + ))) => { + tx.send(Ok(vec![dummy_validator(); 3])).unwrap(); + }, + Some(msg) => { + panic!("didn't expect any other overseer requests; got {:?}", msg) + }, } - }; + } + }; - let (tx, _rx) = mpsc::channel(0); + subsystem_test_harness(overseer, |mut ctx| async move { + handle_new_activations( + Arc::new(test_config(16)), + activated_hashes, + &mut ctx, + Metrics(None), + ) + .await + .unwrap(); + }); + + let requested_validation_data = Arc::try_unwrap(requested_validation_data) + .expect("overseer should have shut down by now") + .into_inner(); + + // the only activated hash should be from the 4 hash: + // each activated hash generates two scheduled cores: one with its value * 4, one with its value * 5 + // given that the test configuration has a `para_id` of 16, there's only one way to get that value: with the 4 + // hash. + assert_eq!(requested_validation_data, vec![[4; 32].into()]); +} - subsystem_test_harness(overseer, |mut ctx| async move { - handle_new_activations(test_config(16), activated_hashes, &mut ctx, Metrics(None), &tx) - .await - .unwrap(); - }); +#[test] +fn sends_distribute_collation_message() { + let activated_hashes: Vec = vec![ + Hash::repeat_byte(1), + Hash::repeat_byte(4), + Hash::repeat_byte(9), + Hash::repeat_byte(16), + ]; + + // empty vec doesn't allocate on the heap, so it's ok we throw it away + let to_collator_protocol = Arc::new(Mutex::new(Vec::new())); + let inner_to_collator_protocol = to_collator_protocol.clone(); + + let overseer = |mut handle: TestSubsystemContextHandle| async move { + loop { + match handle.try_recv().await { + None => break, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + hash, + RuntimeApiRequest::AvailabilityCores(tx), + ))) => { + tx.send(Ok(vec![ + CoreState::Free, + // this is weird, see explanation below + CoreState::Scheduled(scheduled_core_for( + (hash.as_fixed_bytes()[0] * 4) as u32, + )), + CoreState::Scheduled(scheduled_core_for( + (hash.as_fixed_bytes()[0] * 5) as u32, + )), + ])) + .unwrap(); + }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::PersistedValidationData( + _para_id, + _occupied_core_assumption, + tx, + ), + ))) => { + tx.send(Ok(Some(test_validation_data()))).unwrap(); + }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::Validators(tx), + ))) => { + tx.send(Ok(vec![dummy_validator(); 3])).unwrap(); + }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::ValidationCodeHash( + _para_id, + OccupiedCoreAssumption::Free, + tx, + ), + ))) => { + tx.send(Ok(Some(ValidationCode(vec![1, 2, 3]).hash()))).unwrap(); + }, + Some(msg @ AllMessages::CollatorProtocol(_)) => { + inner_to_collator_protocol.lock().await.push(msg); + }, + Some(msg) => { + panic!("didn't expect any other overseer requests; got {:?}", msg) + }, + } + } + }; - let requested_validation_data = Arc::try_unwrap(requested_validation_data) - .expect("overseer should have shut down by now") - .into_inner(); + let config = Arc::new(test_config(16)); + let subsystem_config = config.clone(); - // the only activated hash should be from the 4 hash: - // each activated hash generates two scheduled cores: one with its value * 4, one with its value * 5 - // given that the test configuration has a `para_id` of 16, there's only one way to get that value: with the 4 - // hash. - assert_eq!(requested_validation_data, vec![[4; 32].into()]); + subsystem_test_harness(overseer, |mut ctx| async move { + handle_new_activations(subsystem_config, activated_hashes, &mut ctx, Metrics(None)) + .await + .unwrap(); + }); + + let mut to_collator_protocol = Arc::try_unwrap(to_collator_protocol) + .expect("subsystem should have shut down by now") + .into_inner(); + + // we expect a single message to be sent, containing a candidate receipt. + // we don't care too much about the `commitments_hash` right now, but let's ensure that we've calculated the + // correct descriptor + let expect_pov_hash = test_collation_compressed().proof_of_validity.into_compressed().hash(); + let expect_validation_data_hash = test_validation_data().hash(); + let expect_relay_parent = Hash::repeat_byte(4); + let expect_validation_code_hash = ValidationCode(vec![1, 2, 3]).hash(); + let expect_payload = collator_signature_payload( + &expect_relay_parent, + &config.para_id, + &expect_validation_data_hash, + &expect_pov_hash, + &expect_validation_code_hash, + ); + let expect_descriptor = CandidateDescriptor { + signature: config.key.sign(&expect_payload), + para_id: config.para_id, + relay_parent: expect_relay_parent, + collator: config.key.public(), + persisted_validation_data_hash: expect_validation_data_hash, + pov_hash: expect_pov_hash, + erasure_root: dummy_hash(), // this isn't something we're checking right now + para_head: test_collation().head_data.hash(), + validation_code_hash: expect_validation_code_hash, + }; + + assert_eq!(to_collator_protocol.len(), 1); + match AllMessages::from(to_collator_protocol.pop().unwrap()) { + AllMessages::CollatorProtocol(CollatorProtocolMessage::DistributeCollation( + CandidateReceipt { descriptor, .. }, + _pov, + .., + )) => { + // signature generation is non-deterministic, so we can't just assert that the + // expected descriptor is correct. What we can do is validate that the produced + // descriptor has a valid signature, then just copy in the generated signature + // and check the rest of the fields for equality. + assert!(CollatorPair::verify( + &descriptor.signature, + &collator_signature_payload( + &descriptor.relay_parent, + &descriptor.para_id, + &descriptor.persisted_validation_data_hash, + &descriptor.pov_hash, + &descriptor.validation_code_hash, + ) + .as_ref(), + &descriptor.collator, + )); + let expect_descriptor = { + let mut expect_descriptor = expect_descriptor; + expect_descriptor.signature = descriptor.signature.clone(); + expect_descriptor.erasure_root = descriptor.erasure_root.clone(); + expect_descriptor + }; + assert_eq!(descriptor, expect_descriptor); + }, + _ => panic!("received wrong message type"), } +} - #[test] - fn sends_distribute_collation_message() { - let activated_hashes: Vec = vec![ - Hash::repeat_byte(1), - Hash::repeat_byte(4), - Hash::repeat_byte(9), - Hash::repeat_byte(16), - ]; - - let overseer = |mut handle: TestSubsystemContextHandle| async move { - loop { - match handle.try_recv().await { - None => break, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - hash, - RuntimeApiRequest::AvailabilityCores(tx), - ))) => { - tx.send(Ok(vec![ - CoreState::Free, - // this is weird, see explanation below - CoreState::Scheduled(scheduled_core_for( - (hash.as_fixed_bytes()[0] * 4) as u32, - )), - CoreState::Scheduled(scheduled_core_for( - (hash.as_fixed_bytes()[0] * 5) as u32, - )), - ])) - .unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::PersistedValidationData( - _para_id, - _occupied_core_assumption, - tx, - ), - ))) => { - tx.send(Ok(Some(test_validation_data()))).unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::Validators(tx), - ))) => { - tx.send(Ok(vec![dummy_validator(); 3])).unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::ValidationCodeHash( - _para_id, - OccupiedCoreAssumption::Free, - tx, - ), - ))) => { - tx.send(Ok(Some(ValidationCode(vec![1, 2, 3]).hash()))).unwrap(); - }, - Some(msg) => { - panic!("didn't expect any other overseer requests; got {:?}", msg) - }, - } +#[test] +fn fallback_when_no_validation_code_hash_api() { + // This is a variant of the above test, but with the validation code hash API disabled. + + let activated_hashes: Vec = vec![ + Hash::repeat_byte(1), + Hash::repeat_byte(4), + Hash::repeat_byte(9), + Hash::repeat_byte(16), + ]; + + // empty vec doesn't allocate on the heap, so it's ok we throw it away + let to_collator_protocol = Arc::new(Mutex::new(Vec::new())); + let inner_to_collator_protocol = to_collator_protocol.clone(); + + let overseer = |mut handle: TestSubsystemContextHandle| async move { + loop { + match handle.try_recv().await { + None => break, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + hash, + RuntimeApiRequest::AvailabilityCores(tx), + ))) => { + tx.send(Ok(vec![ + CoreState::Free, + CoreState::Scheduled(scheduled_core_for( + (hash.as_fixed_bytes()[0] * 4) as u32, + )), + CoreState::Scheduled(scheduled_core_for( + (hash.as_fixed_bytes()[0] * 5) as u32, + )), + ])) + .unwrap(); + }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::PersistedValidationData( + _para_id, + _occupied_core_assumption, + tx, + ), + ))) => { + tx.send(Ok(Some(test_validation_data()))).unwrap(); + }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::Validators(tx), + ))) => { + tx.send(Ok(vec![dummy_validator(); 3])).unwrap(); + }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::ValidationCodeHash( + _para_id, + OccupiedCoreAssumption::Free, + tx, + ), + ))) => { + tx.send(Err(RuntimeApiError::NotSupported { + runtime_api_name: "validation_code_hash", + })) + .unwrap(); + }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::ValidationCode(_para_id, OccupiedCoreAssumption::Free, tx), + ))) => { + tx.send(Ok(Some(ValidationCode(vec![1, 2, 3])))).unwrap(); + }, + Some(msg @ AllMessages::CollatorProtocol(_)) => { + inner_to_collator_protocol.lock().await.push(msg); + }, + Some(msg) => { + panic!("didn't expect any other overseer requests; got {:?}", msg) + }, } - }; - - let config = test_config(16); - let subsystem_config = config.clone(); - - let (tx, rx) = mpsc::channel(0); - - // empty vec doesn't allocate on the heap, so it's ok we throw it away - let sent_messages = Arc::new(Mutex::new(Vec::new())); - let subsystem_sent_messages = sent_messages.clone(); - subsystem_test_harness(overseer, |mut ctx| async move { - handle_new_activations( - subsystem_config, - activated_hashes, - &mut ctx, - Metrics(None), - &tx, - ) + } + }; + + let config = Arc::new(test_config(16u32)); + let subsystem_config = config.clone(); + + // empty vec doesn't allocate on the heap, so it's ok we throw it away + subsystem_test_harness(overseer, |mut ctx| async move { + handle_new_activations(subsystem_config, activated_hashes, &mut ctx, Metrics(None)) .await .unwrap(); + }); + + let to_collator_protocol = Arc::try_unwrap(to_collator_protocol) + .expect("subsystem should have shut down by now") + .into_inner(); + + let expect_validation_code_hash = ValidationCode(vec![1, 2, 3]).hash(); + + assert_eq!(to_collator_protocol.len(), 1); + match &to_collator_protocol[0] { + AllMessages::CollatorProtocol(CollatorProtocolMessage::DistributeCollation( + CandidateReceipt { descriptor, .. }, + _pov, + .., + )) => { + assert_eq!(expect_validation_code_hash, descriptor.validation_code_hash); + }, + _ => panic!("received wrong message type"), + } +} + +#[test] +fn submit_collation_is_no_op_before_initialization() { + test_harness(|mut virtual_overseer| async move { + virtual_overseer + .send(FromOrchestra::Communication { + msg: CollationGenerationMessage::SubmitCollation(SubmitCollationParams { + relay_parent: Hash::repeat_byte(0), + collation: test_collation(), + parent_head: vec![1, 2, 3].into(), + validation_code_hash: Hash::repeat_byte(1).into(), + result_sender: None, + }), + }) + .await; + + virtual_overseer + }); +} + +#[test] +fn submit_collation_leads_to_distribution() { + let relay_parent = Hash::repeat_byte(0); + let validation_code_hash = ValidationCodeHash::from(Hash::repeat_byte(42)); + let parent_head = HeadData::from(vec![1, 2, 3]); + let para_id = ParaId::from(5); + let expected_pvd = PersistedValidationData { + parent_head: parent_head.clone(), + relay_parent_number: 10, + relay_parent_storage_root: Hash::repeat_byte(1), + max_pov_size: 1024, + }; - std::mem::drop(tx); - - // collect all sent messages - *subsystem_sent_messages.lock().await = rx.collect().await; - }); - - let mut sent_messages = Arc::try_unwrap(sent_messages) - .expect("subsystem should have shut down by now") - .into_inner(); - - // we expect a single message to be sent, containing a candidate receipt. - // we don't care too much about the `commitments_hash` right now, but let's ensure that we've calculated the - // correct descriptor - let expect_pov_hash = - test_collation_compressed().proof_of_validity.into_compressed().hash(); - let expect_validation_data_hash = test_validation_data().hash(); - let expect_relay_parent = Hash::repeat_byte(4); - let expect_validation_code_hash = ValidationCode(vec![1, 2, 3]).hash(); - let expect_payload = collator_signature_payload( - &expect_relay_parent, - &config.para_id, - &expect_validation_data_hash, - &expect_pov_hash, - &expect_validation_code_hash, + test_harness(|mut virtual_overseer| async move { + virtual_overseer + .send(FromOrchestra::Communication { + msg: CollationGenerationMessage::Initialize(test_config_no_collator(para_id)), + }) + .await; + + virtual_overseer + .send(FromOrchestra::Communication { + msg: CollationGenerationMessage::SubmitCollation(SubmitCollationParams { + relay_parent, + collation: test_collation(), + parent_head: vec![1, 2, 3].into(), + validation_code_hash, + result_sender: None, + }), + }) + .await; + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(rp, RuntimeApiRequest::Validators(tx))) => { + assert_eq!(rp, relay_parent); + let _ = tx.send(Ok(vec![ + Sr25519Keyring::Alice.public().into(), + Sr25519Keyring::Bob.public().into(), + Sr25519Keyring::Charlie.public().into(), + ])); + } ); - let expect_descriptor = CandidateDescriptor { - signature: config.key.sign(&expect_payload), - para_id: config.para_id, - relay_parent: expect_relay_parent, - collator: config.key.public(), - persisted_validation_data_hash: expect_validation_data_hash, - pov_hash: expect_pov_hash, - erasure_root: dummy_hash(), // this isn't something we're checking right now - para_head: test_collation().head_data.hash(), - validation_code_hash: expect_validation_code_hash, - }; - - assert_eq!(sent_messages.len(), 1); - match AllMessages::from(sent_messages.pop().unwrap()) { + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, + AllMessages::RuntimeApi(RuntimeApiMessage::Request(rp, RuntimeApiRequest::PersistedValidationData(id, a, tx))) => { + assert_eq!(rp, relay_parent); + assert_eq!(id, para_id); + assert_eq!(a, OccupiedCoreAssumption::TimedOut); + + // Candidate receipt should be constructed with the real parent head. + let mut pvd = expected_pvd.clone(); + pvd.parent_head = vec![4, 5, 6].into(); + let _ = tx.send(Ok(Some(pvd))); + } + ); + + assert_matches!( + overseer_recv(&mut virtual_overseer).await, AllMessages::CollatorProtocol(CollatorProtocolMessage::DistributeCollation( - CandidateReceipt { descriptor, .. }, - _pov, - .., + ccr, + parent_head_data_hash, + .. )) => { - // signature generation is non-deterministic, so we can't just assert that the - // expected descriptor is correct. What we can do is validate that the produced - // descriptor has a valid signature, then just copy in the generated signature - // and check the rest of the fields for equality. - assert!(CollatorPair::verify( - &descriptor.signature, - &collator_signature_payload( - &descriptor.relay_parent, - &descriptor.para_id, - &descriptor.persisted_validation_data_hash, - &descriptor.pov_hash, - &descriptor.validation_code_hash, - ) - .as_ref(), - &descriptor.collator, - )); - let expect_descriptor = { - let mut expect_descriptor = expect_descriptor; - expect_descriptor.signature = descriptor.signature.clone(); - expect_descriptor.erasure_root = descriptor.erasure_root.clone(); - expect_descriptor - }; - assert_eq!(descriptor, expect_descriptor); - }, - _ => panic!("received wrong message type"), - } - } - - #[test] - fn fallback_when_no_validation_code_hash_api() { - // This is a variant of the above test, but with the validation code hash API disabled. - - let activated_hashes: Vec = vec![ - Hash::repeat_byte(1), - Hash::repeat_byte(4), - Hash::repeat_byte(9), - Hash::repeat_byte(16), - ]; - - let overseer = |mut handle: TestSubsystemContextHandle| async move { - loop { - match handle.try_recv().await { - None => break, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - hash, - RuntimeApiRequest::AvailabilityCores(tx), - ))) => { - tx.send(Ok(vec![ - CoreState::Free, - CoreState::Scheduled(scheduled_core_for( - (hash.as_fixed_bytes()[0] * 4) as u32, - )), - CoreState::Scheduled(scheduled_core_for( - (hash.as_fixed_bytes()[0] * 5) as u32, - )), - ])) - .unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::PersistedValidationData( - _para_id, - _occupied_core_assumption, - tx, - ), - ))) => { - tx.send(Ok(Some(test_validation_data()))).unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::Validators(tx), - ))) => { - tx.send(Ok(vec![dummy_validator(); 3])).unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::ValidationCodeHash( - _para_id, - OccupiedCoreAssumption::Free, - tx, - ), - ))) => { - tx.send(Err(RuntimeApiError::NotSupported { - runtime_api_name: "validation_code_hash", - })) - .unwrap(); - }, - Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( - _hash, - RuntimeApiRequest::ValidationCode( - _para_id, - OccupiedCoreAssumption::Free, - tx, - ), - ))) => { - tx.send(Ok(Some(ValidationCode(vec![1, 2, 3])))).unwrap(); - }, - Some(msg) => { - panic!("didn't expect any other overseer requests; got {:?}", msg) - }, - } + assert_eq!(parent_head_data_hash, parent_head.hash()); + assert_eq!(ccr.descriptor().persisted_validation_data_hash, expected_pvd.hash()); + assert_eq!(ccr.descriptor().para_head, dummy_head_data().hash()); + assert_eq!(ccr.descriptor().validation_code_hash, validation_code_hash); } - }; - - let config = test_config(16u32); - let subsystem_config = config.clone(); - - let (tx, rx) = mpsc::channel(0); - - // empty vec doesn't allocate on the heap, so it's ok we throw it away - let sent_messages = Arc::new(Mutex::new(Vec::new())); - let subsystem_sent_messages = sent_messages.clone(); - subsystem_test_harness(overseer, |mut ctx| async move { - handle_new_activations( - subsystem_config, - activated_hashes, - &mut ctx, - Metrics(None), - &tx, - ) - .await - .unwrap(); + ); - std::mem::drop(tx); - - *subsystem_sent_messages.lock().await = rx.collect().await; - }); - - let sent_messages = Arc::try_unwrap(sent_messages) - .expect("subsystem should have shut down by now") - .into_inner(); - - let expect_validation_code_hash = ValidationCode(vec![1, 2, 3]).hash(); - - assert_eq!(sent_messages.len(), 1); - match &sent_messages[0] { - overseer::CollationGenerationOutgoingMessages::CollatorProtocolMessage( - CollatorProtocolMessage::DistributeCollation( - CandidateReceipt { descriptor, .. }, - _pov, - .., - ), - ) => { - assert_eq!(expect_validation_code_hash, descriptor.validation_code_hash); - }, - _ => panic!("received wrong message type"), - } - } + virtual_overseer + }); } diff --git a/node/overseer/src/tests.rs b/node/overseer/src/tests.rs index e39689816d91..0390c04bfbab 100644 --- a/node/overseer/src/tests.rs +++ b/node/overseer/src/tests.rs @@ -797,7 +797,7 @@ fn test_chain_api_msg() -> ChainApiMessage { fn test_collator_generation_msg() -> CollationGenerationMessage { CollationGenerationMessage::Initialize(CollationGenerationConfig { key: CollatorPair::generate().0, - collator: Box::new(|_, _| TestCollator.boxed()), + collator: Some(Box::new(|_, _| TestCollator.boxed())), para_id: Default::default(), }) } diff --git a/node/primitives/src/lib.rs b/node/primitives/src/lib.rs index b9aa449188e9..9f031d772b06 100644 --- a/node/primitives/src/lib.rs +++ b/node/primitives/src/lib.rs @@ -32,8 +32,8 @@ use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use polkadot_primitives::{ BlakeTwo256, BlockNumber, CandidateCommitments, CandidateHash, CollatorPair, CommittedCandidateReceipt, CompactStatement, EncodeAs, Hash, HashT, HeadData, Id as ParaId, - PersistedValidationData, SessionIndex, Signed, UncheckedSigned, ValidationCode, ValidatorIndex, - MAX_CODE_SIZE, MAX_POV_SIZE, + PersistedValidationData, SessionIndex, Signed, UncheckedSigned, ValidationCode, + ValidationCodeHash, ValidatorIndex, MAX_CODE_SIZE, MAX_POV_SIZE, }; pub use sp_consensus_babe::{ AllowedSlots as BabeAllowedSlots, BabeEpochConfiguration, Epoch as BabeEpoch, @@ -380,6 +380,18 @@ pub enum MaybeCompressedPoV { Compressed(PoV), } +#[cfg(not(target_os = "unknown"))] +impl std::fmt::Debug for MaybeCompressedPoV { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let (variant, size) = match self { + MaybeCompressedPoV::Raw(pov) => ("Raw", pov.block_data.0.len()), + MaybeCompressedPoV::Compressed(pov) => ("Compressed", pov.block_data.0.len()), + }; + + write!(f, "{} PoV ({} bytes)", variant, size) + } +} + #[cfg(not(target_os = "unknown"))] impl MaybeCompressedPoV { /// Convert into a compressed [`PoV`]. @@ -399,7 +411,7 @@ impl MaybeCompressedPoV { /// /// - does not contain the erasure root; that's computed at the Polkadot level, not at Cumulus /// - contains a proof of validity. -#[derive(Clone, Encode, Decode)] +#[derive(Debug, Clone, Encode, Decode)] #[cfg(not(target_os = "unknown"))] pub struct Collation { /// Messages destined to be interpreted by the Relay chain itself. @@ -475,7 +487,10 @@ pub struct CollationGenerationConfig { /// Collator's authentication key, so it can sign things. pub key: CollatorPair, /// Collation function. See [`CollatorFn`] for more details. - pub collator: CollatorFn, + /// + /// If this is `None`, it implies that collations are intended to be submitted + /// out-of-band and not pulled out of the function. + pub collator: Option, /// The parachain that this collator collates for pub para_id: ParaId, } @@ -487,6 +502,25 @@ impl std::fmt::Debug for CollationGenerationConfig { } } +/// Parameters for [`CollationGenerationMessage::SubmitCollation`]. +#[derive(Debug)] +pub struct SubmitCollationParams { + /// The relay-parent the collation is built against. + pub relay_parent: Hash, + /// The collation itself (PoV and commitments) + pub collation: Collation, + /// The parent block's head-data. + pub parent_head: HeadData, + /// The hash of the validation code the collation was created against. + pub validation_code_hash: ValidationCodeHash, + /// An optional result sender that should be informed about a successfully seconded collation. + /// + /// There is no guarantee that this sender is informed ever about any result, it is completely okay to just drop it. + /// However, if it is called, it should be called with the signed statement of a parachain validator seconding the + /// collation. + pub result_sender: Option>, +} + /// This is the data we keep available for each candidate included in the relay chain. #[derive(Clone, Encode, Decode, PartialEq, Eq, Debug)] pub struct AvailableData { diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index 761cc314bfa8..a9445038c23b 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -35,7 +35,8 @@ use polkadot_node_primitives::{ approval::{BlockApprovalMeta, IndirectAssignmentCert, IndirectSignedApprovalVote}, AvailableData, BabeEpoch, BlockWeight, CandidateVotes, CollationGenerationConfig, CollationSecondedSignal, DisputeMessage, DisputeStatus, ErasureChunk, PoV, - SignedDisputeStatement, SignedFullStatement, SignedFullStatementWithPVD, ValidationResult, + SignedDisputeStatement, SignedFullStatement, SignedFullStatementWithPVD, SubmitCollationParams, + ValidationResult, }; use polkadot_primitives::{ slashing, vstaging as vstaging_primitives, AuthorityDiscoveryId, BackedCandidate, BlockNumber, @@ -769,6 +770,11 @@ pub enum ProvisionerMessage { pub enum CollationGenerationMessage { /// Initialize the collation generation subsystem Initialize(CollationGenerationConfig), + /// Submit a collation to the subsystem. This will package it into a signed + /// [`CommittedCandidateReceipt`] and distribute along the network to validators. + /// + /// If sent before `Initialize`, this will be ignored. + SubmitCollation(SubmitCollationParams), } /// The result type of [`ApprovalVotingMessage::CheckAndImportAssignment`] request. diff --git a/node/test/service/src/lib.rs b/node/test/service/src/lib.rs index 08d09f8fe69a..98254c08a2bc 100644 --- a/node/test/service/src/lib.rs +++ b/node/test/service/src/lib.rs @@ -331,7 +331,8 @@ impl PolkadotTestNode { para_id: ParaId, collator: CollatorFn, ) { - let config = CollationGenerationConfig { key: collator_key, collator, para_id }; + let config = + CollationGenerationConfig { key: collator_key, collator: Some(collator), para_id }; self.overseer_handle .send_msg(CollationGenerationMessage::Initialize(config), "Collator") diff --git a/parachain/src/primitives.rs b/parachain/src/primitives.rs index 18da89aa97a1..65f058a6819e 100644 --- a/parachain/src/primitives.rs +++ b/parachain/src/primitives.rs @@ -80,7 +80,8 @@ impl ValidationCode { } } -/// Unit type wrapper around [`type@Hash`] that represents a validation code hash. +/// Unit type wrapper around [`type@Hash`] that represents the blake2-256 hash +/// of validation code in particular. /// /// This type is produced by [`ValidationCode::hash`]. /// diff --git a/parachain/test-parachains/adder/collator/src/main.rs b/parachain/test-parachains/adder/collator/src/main.rs index 699cee202cb8..4b959ced4cf9 100644 --- a/parachain/test-parachains/adder/collator/src/main.rs +++ b/parachain/test-parachains/adder/collator/src/main.rs @@ -87,8 +87,9 @@ fn main() -> Result<()> { let config = CollationGenerationConfig { key: collator.collator_key(), - collator: collator - .create_collation_function(full_node.task_manager.spawn_handle()), + collator: Some( + collator.create_collation_function(full_node.task_manager.spawn_handle()), + ), para_id, }; overseer_handle diff --git a/parachain/test-parachains/undying/collator/src/main.rs b/parachain/test-parachains/undying/collator/src/main.rs index 189674b82a97..df1fec00ee86 100644 --- a/parachain/test-parachains/undying/collator/src/main.rs +++ b/parachain/test-parachains/undying/collator/src/main.rs @@ -87,8 +87,9 @@ fn main() -> Result<()> { let config = CollationGenerationConfig { key: collator.collator_key(), - collator: collator - .create_collation_function(full_node.task_manager.spawn_handle()), + collator: Some( + collator.create_collation_function(full_node.task_manager.spawn_handle()), + ), para_id, }; overseer_handle diff --git a/roadmap/implementers-guide/src/node/collators/collation-generation.md b/roadmap/implementers-guide/src/node/collators/collation-generation.md index 2f0d4742496d..9053ea40f89e 100644 --- a/roadmap/implementers-guide/src/node/collators/collation-generation.md +++ b/roadmap/implementers-guide/src/node/collators/collation-generation.md @@ -9,7 +9,7 @@ Collation generation for Parachains currently works in the following way: 1. A new relay chain block is imported. 2. The collation generation subsystem checks if the core associated to the parachain is free and if yes, continues. -3. Collation generation calls our collator callback to generate a PoV. +3. Collation generation calls our collator callback, if present, to generate a PoV. If none exists, do nothing. 4. Authoring logic determines if the current node should build a PoV. 5. Build new PoV and give it back to collation generation. @@ -25,6 +25,10 @@ Collation generation for Parachains currently works in the following way: - No more than one initialization message should ever be sent to the collation generation subsystem. - Sent by a collator to initialize this subsystem. +- `CollationGenerationMessage::SubmitCollation` + - If the subsystem isn't initialized or the relay-parent is too old to be relevant, ignore the message. + - Otherwise, use the provided parameters to generate a [`CommittedCandidateReceipt`] + - Submit the collation to the collator-protocol with `CollatorProtocolMessage::DistributeCollation`. ### Outgoing @@ -101,7 +105,7 @@ pub struct CollationGenerationConfig { /// Collator's authentication key, so it can sign things. pub key: CollatorPair, /// Collation function. See [`CollatorFn`] for more details. - pub collator: CollatorFn, + pub collator: Option, /// The parachain that this collator collates for pub para_id: ParaId, } @@ -136,7 +140,7 @@ The configuration should be optional, to allow for the case where the node is no - **Collation generation config** - - Contains collator's authentication key, collator function, and + - Contains collator's authentication key, optional collator function, and parachain ID. [CP]: collator-protocol.md diff --git a/roadmap/implementers-guide/src/types/overseer-protocol.md b/roadmap/implementers-guide/src/types/overseer-protocol.md index 30e6dc848802..1fc0c505a1cc 100644 --- a/roadmap/implementers-guide/src/types/overseer-protocol.md +++ b/roadmap/implementers-guide/src/types/overseer-protocol.md @@ -403,6 +403,57 @@ enum CollatorProtocolMessage { } ``` +## Collation Generation Message + +Messages received by the [Collation Generation subsystem](../node/collators/collation-generation.md) + +This is the core interface by which collators built on top of a Polkadot node submit collations to validators. As such, these messages are not sent by any subsystem but are instead sent from outside of the overseer. + +```rust +/// A function provided to the subsystem which it uses to pull new collations. +/// +/// This mode of querying collations is obsoleted by `CollationGenerationMessages::SubmitCollation` +/// +/// The response channel, if present, is meant to receive a `Seconded` statement as a +/// form of authentication, for collation mechanisms which rely on this for anti-spam. +type CollatorFn = Fn(Hash, PersistedValidationData) -> Future>)>; + +/// Configuration for the collation generator +struct CollationGenerationConfig { + /// Collator's authentication key, so it can sign things. + key: CollatorPair, + /// Collation function. See [`CollatorFn`] for more details. + collator: CollatorFn, + /// The parachain that this collator collates for + para_id: ParaId, +} + +/// Parameters for submitting a collation +struct SubmitCollationParams { + /// The relay-parent the collation is built against. + relay_parent: Hash, + /// The collation itself (PoV and commitments) + collation: Collation, + /// The parent block's head-data. + parent_head: HeadData, + /// The hash of the validation code the collation was created against. + validation_code_hash: ValidationCodeHash, + /// A response channel for receiving a `Seconded` message about the candidate + /// once produced by a validator. This is not guaranteed to provide anything. + result_sender: Option>, +} + +enum CollationGenerationMessage { + /// Initialize the collation generation subsystem + Initialize(CollationGenerationConfig), + /// Submit a collation to the subsystem. This will package it into a signed + /// [`CommittedCandidateReceipt`] and distribute along the network to validators. + /// + /// If sent before `Initialize`, this will be ignored. + SubmitCollation(SubmitCollationParams), +} +``` + ## Dispute Coordinator Message Messages received by the [Dispute Coordinator subsystem](../node/disputes/dispute-coordinator.md) From 5207e17139488d39ff3c8537c3b79bc926ba4e68 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 13 Jul 2023 17:52:34 +0100 Subject: [PATCH 61/76] fix some more fallout from merge --- node/core/runtime-api/src/tests.rs | 15 +++++++++++++++ node/subsystem-types/src/runtime_client.rs | 4 ++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/node/core/runtime-api/src/tests.rs b/node/core/runtime-api/src/tests.rs index 27090a102ec2..868d98de764c 100644 --- a/node/core/runtime-api/src/tests.rs +++ b/node/core/runtime-api/src/tests.rs @@ -249,6 +249,21 @@ impl RuntimeApiSubsystemClient for MockSubsystemClient { async fn authorities(&self, _: Hash) -> Result, ApiError> { Ok(self.authorities.clone()) } + + async fn staging_async_backing_params( + &self, + _: Hash, + ) -> Result { + todo!("Not required for tests") + } + + async fn staging_para_backing_state( + &self, + _: Hash, + _: ParaId, + ) -> Result, ApiError> { + todo!("Not required for tests") + } } #[test] diff --git a/node/subsystem-types/src/runtime_client.rs b/node/subsystem-types/src/runtime_client.rs index f9fbe9e2bf61..89d14d643f73 100644 --- a/node/subsystem-types/src/runtime_client.rs +++ b/node/subsystem-types/src/runtime_client.rs @@ -477,7 +477,7 @@ where at: Hash, para_id: Id, ) -> Result, ApiError> { - self.runtime_api().staging_para_backing_state(at, para_id) + self.client.runtime_api().staging_para_backing_state(at, para_id) } /// Returns candidate's acceptance limitations for asynchronous backing for a relay parent. @@ -485,6 +485,6 @@ where &self, at: Hash, ) -> Result { - self.runtime_api().staging_async_backing_params(at) + self.client.runtime_api().staging_async_backing_params(at) } } From 6e3efdb269588bb0c105d14f70d5560c0c120cfe Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 13 Jul 2023 17:52:44 +0100 Subject: [PATCH 62/76] fmt --- runtime/parachains/src/runtime_api_impl/vstaging.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/runtime/parachains/src/runtime_api_impl/vstaging.rs b/runtime/parachains/src/runtime_api_impl/vstaging.rs index 2523e4208cb2..5406428377d0 100644 --- a/runtime/parachains/src/runtime_api_impl/vstaging.rs +++ b/runtime/parachains/src/runtime_api_impl/vstaging.rs @@ -17,6 +17,7 @@ //! Put implementations of functions from staging APIs here. use crate::{configuration, dmp, hrmp, inclusion, initializer, paras, shared}; +use frame_system::pallet_prelude::BlockNumberFor; use primitives::{ vstaging::{ AsyncBackingParams, BackingState, CandidatePendingAvailability, Constraints, @@ -24,7 +25,6 @@ use primitives::{ }, Id as ParaId, }; -use frame_system::pallet_prelude::BlockNumberFor; use sp_std::prelude::*; /// Implementation for `StagingParaBackingState` function from the runtime API From c2f3daa04f8f964db7395467a053d28e2e413f80 Mon Sep 17 00:00:00 2001 From: asynchronous rob Date: Tue, 18 Jul 2023 17:53:17 -0500 Subject: [PATCH 63/76] remove staging APIs from Rococo & Westend (#7513) --- runtime/rococo/src/lib.rs | 9 --------- runtime/westend/src/lib.rs | 9 --------- 2 files changed, 18 deletions(-) diff --git a/runtime/rococo/src/lib.rs b/runtime/rococo/src/lib.rs index 734f31482d65..3afe4a8e69bd 100644 --- a/runtime/rococo/src/lib.rs +++ b/runtime/rococo/src/lib.rs @@ -1805,7 +1805,6 @@ sp_api::impl_runtime_apis! { } } - #[api_version(99)] impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { parachains_runtime_api_impl::validators::() @@ -1936,14 +1935,6 @@ sp_api::impl_runtime_apis! { key_ownership_proof, ) } - - fn staging_para_backing_state(para_id: ParaId) -> Option { - runtime_parachains::runtime_api_impl::vstaging::backing_state::(para_id) - } - - fn staging_async_backing_params() -> primitives::vstaging::AsyncBackingParams { - runtime_parachains::runtime_api_impl::vstaging::async_backing_params::() - } } #[api_version(2)] diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs index ec93708e740e..21dca533cf5a 100644 --- a/runtime/westend/src/lib.rs +++ b/runtime/westend/src/lib.rs @@ -1481,7 +1481,6 @@ sp_api::impl_runtime_apis! { } } - #[api_version(99)] impl primitives::runtime_api::ParachainHost for Runtime { fn validators() -> Vec { parachains_runtime_api_impl::validators::() @@ -1612,14 +1611,6 @@ sp_api::impl_runtime_apis! { key_ownership_proof, ) } - - fn staging_para_backing_state(para_id: ParaId) -> Option { - runtime_parachains::runtime_api_impl::vstaging::backing_state::(para_id) - } - - fn staging_async_backing_params() -> primitives::vstaging::AsyncBackingParams { - runtime_parachains::runtime_api_impl::vstaging::async_backing_params::() - } } impl beefy_primitives::BeefyApi for Runtime { From 2933d349f39a9011c0cfe64b9cc18d2c8ca3f7ef Mon Sep 17 00:00:00 2001 From: asynchronous rob Date: Wed, 19 Jul 2023 13:55:43 -0500 Subject: [PATCH 64/76] send network messages on main protocol name (#7515) --- node/network/bridge/src/network.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/node/network/bridge/src/network.rs b/node/network/bridge/src/network.rs index 5e733b44dd1c..cfff97a7eb2d 100644 --- a/node/network/bridge/src/network.rs +++ b/node/network/bridge/src/network.rs @@ -68,8 +68,11 @@ pub(crate) fn send_message( // list. The message payload can be quite large. If the underlying // network used `Bytes` this would not be necessary. let last_peer = peers.pop(); - // optimization: generate the protocol name once. - let protocol_name = protocol_names.get_name(peer_set, version); + + // We always send messages on the "main" name even when a negotiated + // fallback is used. The libp2p implementation handles the fallback + // under the hood. + let protocol_name = protocol_names.get_main_name(peer_set); peers.into_iter().for_each(|peer| { net.write_notification(peer, protocol_name.clone(), message.clone()); }); From d3f0fe63f47215f50581dd0ea3f8e91081e644cc Mon Sep 17 00:00:00 2001 From: Chris Sosnin <48099298+slumber@users.noreply.github.com> Date: Fri, 21 Jul 2023 19:15:38 +0300 Subject: [PATCH 65/76] misc async backing improvements for allowed ancestry blocks (#7532) * shared: fix acquire_info * backwards-compat test for prospective parachains * same relay parent is allowed --- node/core/prospective-parachains/src/tests.rs | 130 ++++++++++++++++-- runtime/parachains/src/shared.rs | 7 +- runtime/parachains/src/shared/tests.rs | 41 ++++++ 3 files changed, 162 insertions(+), 16 deletions(-) diff --git a/node/core/prospective-parachains/src/tests.rs b/node/core/prospective-parachains/src/tests.rs index 8b38064ad818..3a1bb36494d9 100644 --- a/node/core/prospective-parachains/src/tests.rs +++ b/node/core/prospective-parachains/src/tests.rs @@ -185,6 +185,15 @@ async fn activate_leaf( virtual_overseer: &mut VirtualOverseer, leaf: &TestLeaf, test_state: &TestState, +) { + activate_leaf_with_params(virtual_overseer, leaf, test_state, ASYNC_BACKING_PARAMETERS).await; +} + +async fn activate_leaf_with_params( + virtual_overseer: &mut VirtualOverseer, + leaf: &TestLeaf, + test_state: &TestState, + async_backing_params: AsyncBackingParams, ) { let TestLeaf { number, hash, .. } = leaf; @@ -201,13 +210,14 @@ async fn activate_leaf( )))) .await; - handle_leaf_activation(virtual_overseer, leaf, test_state).await; + handle_leaf_activation(virtual_overseer, leaf, test_state, async_backing_params).await; } async fn handle_leaf_activation( virtual_overseer: &mut VirtualOverseer, leaf: &TestLeaf, test_state: &TestState, + async_backing_params: AsyncBackingParams, ) { let TestLeaf { number, hash, para_data } = leaf; @@ -216,7 +226,7 @@ async fn handle_leaf_activation( AllMessages::RuntimeApi( RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx)) ) if parent == *hash => { - tx.send(Ok(ASYNC_BACKING_PARAMETERS)).unwrap(); + tx.send(Ok(async_backing_params)).unwrap(); } ); @@ -241,14 +251,16 @@ async fn handle_leaf_activation( .collect(); let ancestry_numbers = (min_min..*number).rev(); let ancestry_iter = ancestry_hashes.clone().into_iter().zip(ancestry_numbers).peekable(); - assert_matches!( - virtual_overseer.recv().await, - AllMessages::ChainApi( - ChainApiMessage::Ancestors{hash: block_hash, k, response_channel: tx} - ) if block_hash == *hash && k == ALLOWED_ANCESTRY_LEN as usize => { - tx.send(Ok(ancestry_hashes.clone())).unwrap(); - } - ); + if ancestry_len > 0 { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ChainApi( + ChainApiMessage::Ancestors{hash: block_hash, k, response_channel: tx} + ) if block_hash == *hash && k == ALLOWED_ANCESTRY_LEN as usize => { + tx.send(Ok(ancestry_hashes.clone())).unwrap(); + } + ); + } for (hash, number) in ancestry_iter { send_block_header(virtual_overseer, hash, number).await; @@ -1301,7 +1313,13 @@ fn correctly_updates_leaves() { virtual_overseer .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update))) .await; - handle_leaf_activation(&mut virtual_overseer, &leaf_c, &test_state).await; + handle_leaf_activation( + &mut virtual_overseer, + &leaf_c, + &test_state, + ASYNC_BACKING_PARAMETERS, + ) + .await; // Remove all remaining leaves. let update = ActiveLeavesUpdate { @@ -1326,7 +1344,13 @@ fn correctly_updates_leaves() { virtual_overseer .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves(update))) .await; - handle_leaf_activation(&mut virtual_overseer, &leaf_a, &test_state).await; + handle_leaf_activation( + &mut virtual_overseer, + &leaf_a, + &test_state, + ASYNC_BACKING_PARAMETERS, + ) + .await; // Remove the leaf again. Send some unnecessary hashes. let update = ActiveLeavesUpdate { @@ -1440,3 +1464,85 @@ fn persists_pending_availability_candidate() { virtual_overseer }); } + +#[test] +fn backwards_compatible() { + let mut test_state = TestState::default(); + let para_id = ParaId::from(1); + test_state.availability_cores = test_state + .availability_cores + .into_iter() + .filter(|core| core.para_id().map_or(false, |id| id == para_id)) + .collect(); + assert_eq!(test_state.availability_cores.len(), 1); + + test_harness(|mut virtual_overseer| async move { + let para_head = HeadData(vec![1, 2, 3]); + + let leaf_b_hash = Hash::repeat_byte(15); + let candidate_relay_parent = get_parent_hash(leaf_b_hash); + let candidate_relay_parent_number = 100; + + let leaf_a = TestLeaf { + number: candidate_relay_parent_number, + hash: candidate_relay_parent, + para_data: vec![( + para_id, + PerParaData::new(candidate_relay_parent_number, para_head.clone()), + )], + }; + + // Activate leaf. + activate_leaf_with_params( + &mut virtual_overseer, + &leaf_a, + &test_state, + AsyncBackingParams { allowed_ancestry_len: 0, max_candidate_depth: 0 }, + ) + .await; + + // Candidate A + let (candidate_a, pvd_a) = make_candidate( + candidate_relay_parent, + candidate_relay_parent_number, + para_id, + para_head.clone(), + HeadData(vec![1]), + test_state.validation_code_hash, + ); + let candidate_hash_a = candidate_a.hash(); + + introduce_candidate(&mut virtual_overseer, candidate_a.clone(), pvd_a).await; + second_candidate(&mut virtual_overseer, candidate_a.clone()).await; + back_candidate(&mut virtual_overseer, &candidate_a, candidate_hash_a).await; + + get_backable_candidate( + &mut virtual_overseer, + &leaf_a, + para_id, + vec![], + Some(candidate_hash_a), + ) + .await; + + let leaf_b = TestLeaf { + number: candidate_relay_parent_number + 1, + hash: leaf_b_hash, + para_data: vec![( + para_id, + PerParaData::new(candidate_relay_parent_number + 1, para_head.clone()), + )], + }; + activate_leaf_with_params( + &mut virtual_overseer, + &leaf_b, + &test_state, + AsyncBackingParams { allowed_ancestry_len: 0, max_candidate_depth: 0 }, + ) + .await; + + get_backable_candidate(&mut virtual_overseer, &leaf_b, para_id, vec![], None).await; + + virtual_overseer + }); +} diff --git a/runtime/parachains/src/shared.rs b/runtime/parachains/src/shared.rs index ffc676ba79d6..6380a8c73e54 100644 --- a/runtime/parachains/src/shared.rs +++ b/runtime/parachains/src/shared.rs @@ -94,16 +94,15 @@ impl prev: Option, ) -> Option<(Hash, BlockNumber)> { let pos = self.buffer.iter().position(|(rp, _)| rp == &relay_parent)?; + let age = (self.buffer.len() - 1) - pos; + let number = self.latest_number - BlockNumber::from(age as u32); if let Some(prev) = prev { - if prev >= self.latest_number { + if prev > number { return None } } - let age = (self.buffer.len() - 1) - pos; - let number = self.latest_number - BlockNumber::from(age as u32); - Some((self.buffer[pos].1, number)) } diff --git a/runtime/parachains/src/shared/tests.rs b/runtime/parachains/src/shared/tests.rs index 59fe8f8cefb3..91891ba8d75b 100644 --- a/runtime/parachains/src/shared/tests.rs +++ b/runtime/parachains/src/shared/tests.rs @@ -19,6 +19,7 @@ use crate::{ configuration::HostConfiguration, mock::{new_test_ext, MockGenesisConfig, ParasShared}, }; +use assert_matches::assert_matches; use keyring::Sr25519Keyring; use primitives::Hash; @@ -53,6 +54,46 @@ fn tracker_earliest_block_number() { assert_eq!(tracker.hypothetical_earliest_block_number(now + 1, max_ancestry_len), 1); } +#[test] +fn tracker_acquire_info() { + let mut tracker = AllowedRelayParentsTracker::::default(); + let max_ancestry_len = 2; + + // (relay_parent, state_root) pairs. + let blocks = &[ + (Hash::repeat_byte(0), Hash::repeat_byte(10)), + (Hash::repeat_byte(1), Hash::repeat_byte(11)), + (Hash::repeat_byte(2), Hash::repeat_byte(12)), + ]; + + let (relay_parent, state_root) = blocks[0]; + tracker.update(relay_parent, state_root, 0, max_ancestry_len); + assert_matches!( + tracker.acquire_info(relay_parent, None), + Some((s, b)) if s == state_root && b == 0 + ); + + let (relay_parent, state_root) = blocks[1]; + tracker.update(relay_parent, state_root, 1u32, max_ancestry_len); + let (relay_parent, state_root) = blocks[2]; + tracker.update(relay_parent, state_root, 2u32, max_ancestry_len); + for (block_num, (rp, state_root)) in blocks.iter().enumerate().take(2) { + assert_matches!( + tracker.acquire_info(*rp, None), + Some((s, b)) if &s == state_root && b == block_num as u32 + ); + + assert!(tracker.acquire_info(*rp, Some(2)).is_none()); + } + + for (block_num, (rp, state_root)) in blocks.iter().enumerate().skip(1) { + assert_matches!( + tracker.acquire_info(*rp, Some(block_num as u32 - 1)), + Some((s, b)) if &s == state_root && b == block_num as u32 + ); + } +} + #[test] fn sets_and_shuffles_validators() { let validators = vec![ From a16c843deee1af57de64e2a4cd50df5798164ab1 Mon Sep 17 00:00:00 2001 From: Chris Sosnin <48099298+slumber@users.noreply.github.com> Date: Sat, 22 Jul 2023 00:27:02 +0300 Subject: [PATCH 66/76] provisioner: request candidate receipt by relay parent (#7527) * return candidates hash from prospective parachains * update provisioner * update tests * guide changes * send a single message to backing * fix test --- node/core/backing/src/lib.rs | 32 ++++--- node/core/backing/src/tests/mod.rs | 6 +- .../src/fragment_tree.rs | 13 ++- node/core/prospective-parachains/src/lib.rs | 19 +++- node/core/prospective-parachains/src/tests.rs | 12 +-- node/core/provisioner/src/lib.rs | 18 ++-- node/core/provisioner/src/tests.rs | 87 +++++++++++++++++-- node/overseer/src/tests.rs | 2 +- node/subsystem-types/src/messages.rs | 20 +++-- .../node/backing/prospective-parachains.md | 4 +- .../src/types/overseer-protocol.md | 7 +- 11 files changed, 164 insertions(+), 56 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 4364100e14fb..2dcc6363105b 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -738,16 +738,8 @@ async fn handle_communication( CandidateBackingMessage::Statement(relay_parent, statement) => { handle_statement_message(ctx, state, relay_parent, statement, metrics).await?; }, - CandidateBackingMessage::GetBackedCandidates(relay_parent, requested_candidates, tx) => - if let Some(rp_state) = state.per_relay_parent.get(&relay_parent) { - handle_get_backed_candidates_message(rp_state, requested_candidates, tx, metrics)?; - } else { - gum::debug!( - target: LOG_TARGET, - ?relay_parent, - "Received `GetBackedCandidates` for an unknown relay parent." - ); - }, + CandidateBackingMessage::GetBackedCandidates(requested_candidates, tx) => + handle_get_backed_candidates_message(state, requested_candidates, tx, metrics)?, CandidateBackingMessage::CanSecond(request, tx) => handle_can_second_request(ctx, state, request, tx).await, } @@ -1985,8 +1977,8 @@ async fn handle_statement_message( } fn handle_get_backed_candidates_message( - rp_state: &PerRelayParentState, - requested_candidates: Vec, + state: &State, + requested_candidates: Vec<(CandidateHash, Hash)>, tx: oneshot::Sender>, metrics: &Metrics, ) -> Result<(), Error> { @@ -1994,10 +1986,22 @@ fn handle_get_backed_candidates_message( let backed = requested_candidates .into_iter() - .filter_map(|hash| { + .filter_map(|(candidate_hash, relay_parent)| { + let rp_state = match state.per_relay_parent.get(&relay_parent) { + Some(rp_state) => rp_state, + None => { + gum::debug!( + target: LOG_TARGET, + ?relay_parent, + ?candidate_hash, + "Requested candidate's relay parent is out of view", + ); + return None + }, + }; rp_state .table - .attested_candidate(&hash, &rp_state.table_context) + .attested_candidate(&candidate_hash, &rp_state.table_context) .and_then(|attested| table_attested_to_backed(attested, &rp_state.table_context)) }) .collect(); diff --git a/node/core/backing/src/tests/mod.rs b/node/core/backing/src/tests/mod.rs index 9d372ab1335a..c0112d6dde4a 100644 --- a/node/core/backing/src/tests/mod.rs +++ b/node/core/backing/src/tests/mod.rs @@ -732,8 +732,7 @@ fn backing_works_while_validation_ongoing() { let (tx, rx) = oneshot::channel(); let msg = CandidateBackingMessage::GetBackedCandidates( - test_state.relay_parent, - vec![candidate_a.hash()], + vec![(candidate_a.hash(), test_state.relay_parent)], tx, ); @@ -1386,8 +1385,7 @@ fn backing_works_after_failed_validation() { // and check that it is still alive. let (tx, rx) = oneshot::channel(); let msg = CandidateBackingMessage::GetBackedCandidates( - test_state.relay_parent, - vec![candidate.hash()], + vec![(candidate.hash(), test_state.relay_parent)], tx, ); diff --git a/node/core/prospective-parachains/src/fragment_tree.rs b/node/core/prospective-parachains/src/fragment_tree.rs index 943d3191909b..b90145dfa6a7 100644 --- a/node/core/prospective-parachains/src/fragment_tree.rs +++ b/node/core/prospective-parachains/src/fragment_tree.rs @@ -251,6 +251,14 @@ impl CandidateStorage { }) } + /// Returns candidate's relay parent, if present. + pub(crate) fn relay_parent_by_candidate_hash( + &self, + candidate_hash: &CandidateHash, + ) -> Option { + self.by_candidate_hash.get(candidate_hash).map(|entry| entry.relay_parent) + } + fn iter_para_children<'a>( &'a self, parent_head_hash: &Hash, @@ -1152,10 +1160,11 @@ mod tests { #[test] fn storage_add_candidate() { let mut storage = CandidateStorage::new(); + let relay_parent = Hash::repeat_byte(69); let (pvd, candidate) = make_committed_candidate( ParaId::from(5u32), - Hash::repeat_byte(69), + relay_parent, 8, vec![4, 5, 6].into(), vec![1, 2, 3].into(), @@ -1168,6 +1177,8 @@ mod tests { storage.add_candidate(candidate, pvd).unwrap(); assert!(storage.contains(&candidate_hash)); assert_eq!(storage.iter_para_children(&parent_head_hash).count(), 1); + + assert_eq!(storage.relay_parent_by_candidate_hash(&candidate_hash), Some(relay_parent)); } #[test] diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index ae7f2f318345..a61bc2c46544 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -547,7 +547,7 @@ fn answer_get_backable_candidate( relay_parent: Hash, para: ParaId, required_path: Vec, - tx: oneshot::Sender>, + tx: oneshot::Sender>, ) { let data = match view.active_leaves.get(&relay_parent) { None => { @@ -594,7 +594,22 @@ fn answer_get_backable_candidate( Some(s) => s, }; - let _ = tx.send(tree.select_child(&required_path, |candidate| storage.is_backed(candidate))); + let Some(child_hash) = tree.select_child(&required_path, |candidate| storage.is_backed(candidate)) else { + let _ = tx.send(None); + return + }; + let Some(candidate_relay_parent) = storage.relay_parent_by_candidate_hash(&child_hash) else { + gum::error!( + target: LOG_TARGET, + ?child_hash, + para_id = ?para, + "Candidate is present in fragment tree but not in candidate's storage!", + ); + let _ = tx.send(None); + return + }; + + let _ = tx.send(Some((child_hash, candidate_relay_parent))); } fn answer_hypothetical_frontier_request( diff --git a/node/core/prospective-parachains/src/tests.rs b/node/core/prospective-parachains/src/tests.rs index 3a1bb36494d9..2220de71090a 100644 --- a/node/core/prospective-parachains/src/tests.rs +++ b/node/core/prospective-parachains/src/tests.rs @@ -399,7 +399,7 @@ async fn get_backable_candidate( leaf: &TestLeaf, para_id: ParaId, required_path: Vec, - expected_candidate_hash: Option, + expected_result: Option<(CandidateHash, Hash)>, ) { let (tx, rx) = oneshot::channel(); virtual_overseer @@ -413,7 +413,7 @@ async fn get_backable_candidate( }) .await; let resp = rx.await.unwrap(); - assert_eq!(resp, expected_candidate_hash); + assert_eq!(resp, expected_result); } async fn get_hypothetical_frontier( @@ -924,7 +924,7 @@ fn check_backable_query() { &leaf_a, 1.into(), vec![], - Some(candidate_hash_a), + Some((candidate_hash_a, leaf_a.hash)), ) .await; get_backable_candidate( @@ -932,7 +932,7 @@ fn check_backable_query() { &leaf_a, 1.into(), vec![candidate_hash_a], - Some(candidate_hash_b), + Some((candidate_hash_b, leaf_a.hash)), ) .await; @@ -1457,7 +1457,7 @@ fn persists_pending_availability_candidate() { &leaf_b, para_id, vec![candidate_hash_a], - Some(candidate_hash_b), + Some((candidate_hash_b, leaf_b_hash)), ) .await; @@ -1521,7 +1521,7 @@ fn backwards_compatible() { &leaf_a, para_id, vec![], - Some(candidate_hash_a), + Some((candidate_hash_a, candidate_relay_parent)), ) .await; diff --git a/node/core/provisioner/src/lib.rs b/node/core/provisioner/src/lib.rs index e1d450112ff8..d4dd2af822c7 100644 --- a/node/core/provisioner/src/lib.rs +++ b/node/core/provisioner/src/lib.rs @@ -558,7 +558,7 @@ async fn select_candidate_hashes_from_tracked( candidates: &[CandidateReceipt], relay_parent: Hash, sender: &mut impl overseer::ProvisionerSenderTrait, -) -> Result, Error> { +) -> Result, Error> { let block_number = get_block_number_under_construction(relay_parent, sender).await?; let mut selected_candidates = @@ -628,7 +628,7 @@ async fn select_candidate_hashes_from_tracked( "Selected candidate receipt", ); - selected_candidates.push(candidate_hash); + selected_candidates.push((candidate_hash, candidate.descriptor.relay_parent)); } } @@ -644,7 +644,7 @@ async fn request_backable_candidates( bitfields: &[SignedAvailabilityBitfield], relay_parent: Hash, sender: &mut impl overseer::ProvisionerSenderTrait, -) -> Result, Error> { +) -> Result, Error> { let block_number = get_block_number_under_construction(relay_parent, sender).await?; let mut selected_candidates = Vec::with_capacity(availability_cores.len()); @@ -685,11 +685,10 @@ async fn request_backable_candidates( CoreState::Free => continue, }; - let candidate_hash = - get_backable_candidate(relay_parent, para_id, required_path, sender).await?; + let response = get_backable_candidate(relay_parent, para_id, required_path, sender).await?; - match candidate_hash { - Some(hash) => selected_candidates.push(hash), + match response { + Some((hash, relay_parent)) => selected_candidates.push((hash, relay_parent)), None => { gum::debug!( target: LOG_TARGET, @@ -734,7 +733,6 @@ async fn select_candidates( // now get the backed candidates corresponding to these candidate receipts let (tx, rx) = oneshot::channel(); sender.send_unbounded_message(CandidateBackingMessage::GetBackedCandidates( - relay_parent, selected_candidates.clone(), tx, )); @@ -750,7 +748,7 @@ async fn select_candidates( // in order, we can ensure that the backed candidates are also in order. let mut backed_idx = 0; for selected in selected_candidates { - if selected == + if selected.0 == candidates.get(backed_idx).ok_or(Error::BackedCandidateOrderingProblem)?.hash() { backed_idx += 1; @@ -808,7 +806,7 @@ async fn get_backable_candidate( para_id: ParaId, required_path: Vec, sender: &mut impl overseer::ProvisionerSenderTrait, -) -> Result, Error> { +) -> Result, Error> { let (tx, rx) = oneshot::channel(); sender .send_message(ProspectiveParachainsMessage::GetBackableCandidate( diff --git a/node/core/provisioner/src/tests.rs b/node/core/provisioner/src/tests.rs index 7807be892d1a..03a1cb54f331 100644 --- a/node/core/provisioner/src/tests.rs +++ b/node/core/provisioner/src/tests.rs @@ -340,7 +340,11 @@ mod select_candidates { use ChainApiMessage::BlockNumber; use RuntimeApiMessage::Request; - let mut candidates = expected.iter().map(BackedCandidate::hash); + let mut candidates_iter = expected + .iter() + .map(|candidate| (candidate.hash(), candidate.descriptor().relay_parent)); + + let mut backed_iter = expected.clone().into_iter(); while let Some(from_job) = receiver.next().await { match from_job { @@ -353,20 +357,25 @@ mod select_candidates { AllMessages::RuntimeApi(Request(_parent_hash, AvailabilityCores(tx))) => tx.send(Ok(mock_availability_cores())).unwrap(), AllMessages::CandidateBacking(CandidateBackingMessage::GetBackedCandidates( - _, hashes, sender, )) => { - let expected_hashes: Vec = - expected.iter().map(BackedCandidate::hash).collect(); + let response: Vec = + backed_iter.by_ref().take(hashes.len()).collect(); + let expected_hashes: Vec<(CandidateHash, Hash)> = response + .iter() + .map(|candidate| (candidate.hash(), candidate.descriptor().relay_parent)) + .collect(); + assert_eq!(expected_hashes, hashes); - let _ = sender.send(expected.clone()); + + let _ = sender.send(response); }, AllMessages::ProspectiveParachains( ProspectiveParachainsMessage::GetBackableCandidate(.., tx), ) => match prospective_parachains_mode { ProspectiveParachainsMode::Enabled { .. } => { - let _ = tx.send(candidates.next()); + let _ = tx.send(candidates_iter.next()); }, ProspectiveParachainsMode::Disabled => panic!("unexpected prospective parachains request"), @@ -624,4 +633,70 @@ mod select_candidates { }, ) } + + #[test] + fn request_receipts_based_on_relay_parent() { + let mock_cores = mock_availability_cores(); + let empty_hash = PersistedValidationData::::default().hash(); + + let mut descriptor_template = dummy_candidate_descriptor(dummy_hash()); + descriptor_template.persisted_validation_data_hash = empty_hash; + let candidate_template = CandidateReceipt { + descriptor: descriptor_template, + commitments_hash: CandidateCommitments::default().hash(), + }; + + let candidates: Vec<_> = std::iter::repeat(candidate_template) + .take(mock_cores.len()) + .enumerate() + .map(|(idx, mut candidate)| { + candidate.descriptor.para_id = idx.into(); + candidate.descriptor.relay_parent = Hash::repeat_byte(idx as u8); + candidate + }) + .collect(); + + // why those particular indices? see the comments on mock_availability_cores() + let expected_candidates: Vec<_> = + [1, 4, 7, 8, 10].iter().map(|&idx| candidates[idx].clone()).collect(); + // Expect prospective parachains subsystem requests. + let prospective_parachains_mode = + ProspectiveParachainsMode::Enabled { max_candidate_depth: 0, allowed_ancestry_len: 0 }; + + let expected_backed = expected_candidates + .iter() + .map(|c| BackedCandidate { + candidate: CommittedCandidateReceipt { + descriptor: c.descriptor.clone(), + commitments: Default::default(), + }, + validity_votes: Vec::new(), + validator_indices: default_bitvec(MOCK_GROUP_SIZE), + }) + .collect(); + + test_harness( + |r| mock_overseer(r, expected_backed, prospective_parachains_mode), + |mut tx: TestSubsystemSender| async move { + let result = select_candidates( + &mock_cores, + &[], + &[], + prospective_parachains_mode, + Default::default(), + &mut tx, + ) + .await + .unwrap(); + + result.into_iter().for_each(|c| { + assert!( + expected_candidates.iter().any(|c2| c.candidate.corresponds_to(c2)), + "Failed to find candidate: {:?}", + c, + ) + }); + }, + ) + } } diff --git a/node/overseer/src/tests.rs b/node/overseer/src/tests.rs index 0390c04bfbab..4ac538a7fd3a 100644 --- a/node/overseer/src/tests.rs +++ b/node/overseer/src/tests.rs @@ -786,7 +786,7 @@ fn test_candidate_validation_msg() -> CandidateValidationMessage { fn test_candidate_backing_msg() -> CandidateBackingMessage { let (sender, _) = oneshot::channel(); - CandidateBackingMessage::GetBackedCandidates(Default::default(), Vec::new(), sender) + CandidateBackingMessage::GetBackedCandidates(Vec::new(), sender) } fn test_chain_api_msg() -> ChainApiMessage { diff --git a/node/subsystem-types/src/messages.rs b/node/subsystem-types/src/messages.rs index a6d5878ef2e5..f2e2d6df80e0 100644 --- a/node/subsystem-types/src/messages.rs +++ b/node/subsystem-types/src/messages.rs @@ -75,9 +75,10 @@ pub struct CanSecondRequest { /// Messages received by the Candidate Backing subsystem. #[derive(Debug)] pub enum CandidateBackingMessage { - /// Requests a set of backable candidates that could be backed in a child of the given - /// relay-parent, referenced by its hash. - GetBackedCandidates(Hash, Vec, oneshot::Sender>), + /// Requests a set of backable candidates attested by the subsystem. + /// + /// Each pair is (candidate_hash, candidate_relay_parent). + GetBackedCandidates(Vec<(CandidateHash, Hash)>, oneshot::Sender>), /// Request the subsystem to check whether it's allowed to second given candidate. /// The rule is to only fetch collations that are either built on top of the root /// of some fragment tree or have a parent node which represents backed candidate. @@ -1080,10 +1081,15 @@ pub enum ProspectiveParachainsMessage { /// has been backed. This requires that the candidate was successfully introduced in /// the past. CandidateBacked(ParaId, CandidateHash), - /// Get a backable candidate hash for the given parachain, under the given relay-parent hash, - /// which is a descendant of the given candidate hashes. Returns `None` on the channel - /// if no such candidate exists. - GetBackableCandidate(Hash, ParaId, Vec, oneshot::Sender>), + /// Get a backable candidate hash along with its relay parent for the given parachain, + /// under the given relay-parent hash, which is a descendant of the given candidate hashes. + /// Returns `None` on the channel if no such candidate exists. + GetBackableCandidate( + Hash, + ParaId, + Vec, + oneshot::Sender>, + ), /// Get the hypothetical frontier membership of candidates with the given properties /// under the specified active leaves' fragment trees. /// diff --git a/roadmap/implementers-guide/src/node/backing/prospective-parachains.md b/roadmap/implementers-guide/src/node/backing/prospective-parachains.md index c25635fe2a17..a48444a46e40 100644 --- a/roadmap/implementers-guide/src/node/backing/prospective-parachains.md +++ b/roadmap/implementers-guide/src/node/backing/prospective-parachains.md @@ -93,8 +93,8 @@ prospective validation data. This is unlikely to change. - Sent by the Backing Subsystem after it successfully imports a statement giving a candidate the necessary quorum of backing votes. - `ProspectiveParachainsMessage::GetBackableCandidate` - - Get a backable candidate hash for a given parachain, under a given - relay-parent hash, which is a descendant of given candidate hashes. + - Get a backable candidate hash along with its relay parent for a given parachain, + under a given relay-parent (leaf) hash, which is a descendant of given candidate hashes. - Sent by the Provisioner when requesting backable candidates, when selecting candidates for a given relay-parent. - `ProspectiveParachainsMessage::GetHypotheticalFrontier` diff --git a/roadmap/implementers-guide/src/types/overseer-protocol.md b/roadmap/implementers-guide/src/types/overseer-protocol.md index d11538991d83..3d9037699da6 100644 --- a/roadmap/implementers-guide/src/types/overseer-protocol.md +++ b/roadmap/implementers-guide/src/types/overseer-protocol.md @@ -345,9 +345,10 @@ enum BitfieldSigningMessage { } ```rust enum CandidateBackingMessage { - /// Requests a set of backable candidates that could be backed in a child of the given - /// relay-parent, referenced by its hash. - GetBackedCandidates(Hash, Vec, ResponseChannel>), + /// Requests a set of backable candidates attested by the subsystem. + /// + /// Each pair is (candidate_hash, candidate_relay_parent). + GetBackedCandidates(Vec<(CandidateHash, Hash)>, oneshot::Sender>), /// Note that the Candidate Backing subsystem should second the given candidate in the context of the /// given relay-parent (ref. by hash). This candidate must be validated using the provided PoV. /// The PoV is expected to match the `pov_hash` in the descriptor. From f2aa3b3c3ed342c1b98fb22ce20199861c7d90ce Mon Sep 17 00:00:00 2001 From: asynchronous rob Date: Thu, 27 Jul 2023 21:31:14 -0500 Subject: [PATCH 67/76] revert to old `handle_new_activations` logic in some cases (#7514) * revert to old `handle_new_activations` logic * gate sending messages on scheduled cores to max_depth >= 2 * fmt * 2->1 --- node/collation-generation/src/lib.rs | 40 +++++++++++++++++++------- node/collation-generation/src/tests.rs | 35 ++++++++++++++++++++++ 2 files changed, 64 insertions(+), 11 deletions(-) diff --git a/node/collation-generation/src/lib.rs b/node/collation-generation/src/lib.rs index 521011119b3d..e5f80ccb1af3 100644 --- a/node/collation-generation/src/lib.rs +++ b/node/collation-generation/src/lib.rs @@ -41,8 +41,9 @@ use polkadot_node_subsystem::{ SubsystemContext, SubsystemError, SubsystemResult, }; use polkadot_node_subsystem_util::{ - request_availability_cores, request_persisted_validation_data, request_validation_code, - request_validation_code_hash, request_validators, + request_availability_cores, request_persisted_validation_data, + request_staging_async_backing_params, request_validation_code, request_validation_code_hash, + request_validators, }; use polkadot_primitives::{ collator_signature_payload, CandidateCommitments, CandidateDescriptor, CandidateReceipt, @@ -201,13 +202,15 @@ async fn handle_new_activations( for relay_parent in activated { let _relay_parent_timer = metrics.time_new_activations_relay_parent(); - let (availability_cores, validators) = join!( + let (availability_cores, validators, async_backing_params) = join!( request_availability_cores(relay_parent, ctx.sender()).await, request_validators(relay_parent, ctx.sender()).await, + request_staging_async_backing_params(relay_parent, ctx.sender()).await, ); let availability_cores = availability_cores??; let n_validators = validators??.len(); + let async_backing_params = async_backing_params?.ok(); for (core_idx, core) in availability_cores.into_iter().enumerate() { let _availability_core_timer = metrics.time_new_activations_availability_core(); @@ -215,15 +218,30 @@ async fn handle_new_activations( let (scheduled_core, assumption) = match core { CoreState::Scheduled(scheduled_core) => (scheduled_core, OccupiedCoreAssumption::Free), - CoreState::Occupied(occupied_core) => { - // TODO [now]: this assumes that next up == current. - // in practice we should only set `OccupiedCoreAssumption::Included` - // when the candidate occupying the core is also of the same para. - if let Some(scheduled) = occupied_core.next_up_on_available { - (scheduled, OccupiedCoreAssumption::Included) - } else { + CoreState::Occupied(occupied_core) => match async_backing_params { + Some(params) if params.max_candidate_depth >= 1 => { + // maximum candidate depth when building on top of a block + // pending availability is necessarily 1 - the depth of the + // pending block is 0 so the child has depth 1. + + // TODO [now]: this assumes that next up == current. + // in practice we should only set `OccupiedCoreAssumption::Included` + // when the candidate occupying the core is also of the same para. + if let Some(scheduled) = occupied_core.next_up_on_available { + (scheduled, OccupiedCoreAssumption::Included) + } else { + continue + } + }, + _ => { + gum::trace!( + target: LOG_TARGET, + core_idx = %core_idx, + relay_parent = ?relay_parent, + "core is occupied. Keep going.", + ); continue - } + }, }, CoreState::Free => { gum::trace!( diff --git a/node/collation-generation/src/tests.rs b/node/collation-generation/src/tests.rs index b7ff4ec2a576..7f55176deed8 100644 --- a/node/collation-generation/src/tests.rs +++ b/node/collation-generation/src/tests.rs @@ -151,6 +151,14 @@ fn requests_availability_per_relay_parent() { Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request(_hash, RuntimeApiRequest::Validators(tx)))) => { tx.send(Ok(vec![dummy_validator(); 3])).unwrap(); } + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::StagingAsyncBackingParams( + tx, + ), + ))) => { + tx.send(Err(RuntimeApiError::NotSupported { runtime_api_name: "doesnt_matter" })).unwrap(); + }, Some(msg) => panic!("didn't expect any other overseer requests given no availability cores; got {:?}", msg), } } @@ -225,6 +233,15 @@ fn requests_validation_data_for_scheduled_matches() { ))) => { tx.send(Ok(vec![dummy_validator(); 3])).unwrap(); }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::StagingAsyncBackingParams(tx), + ))) => { + tx.send(Err(RuntimeApiError::NotSupported { + runtime_api_name: "doesnt_matter", + })) + .unwrap(); + }, Some(msg) => { panic!("didn't expect any other overseer requests; got {:?}", msg) }, @@ -313,6 +330,15 @@ fn sends_distribute_collation_message() { ))) => { tx.send(Ok(Some(ValidationCode(vec![1, 2, 3]).hash()))).unwrap(); }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::StagingAsyncBackingParams(tx), + ))) => { + tx.send(Err(RuntimeApiError::NotSupported { + runtime_api_name: "doesnt_matter", + })) + .unwrap(); + }, Some(msg @ AllMessages::CollatorProtocol(_)) => { inner_to_collator_protocol.lock().await.push(msg); }, @@ -466,6 +492,15 @@ fn fallback_when_no_validation_code_hash_api() { ))) => { tx.send(Ok(Some(ValidationCode(vec![1, 2, 3])))).unwrap(); }, + Some(AllMessages::RuntimeApi(RuntimeApiMessage::Request( + _hash, + RuntimeApiRequest::StagingAsyncBackingParams(tx), + ))) => { + tx.send(Err(RuntimeApiError::NotSupported { + runtime_api_name: "doesnt_matter", + })) + .unwrap(); + }, Some(msg @ AllMessages::CollatorProtocol(_)) => { inner_to_collator_protocol.lock().await.push(msg); }, From 5625f42a77b0b96a1994ef5c5fcf3bde4fa0a472 Mon Sep 17 00:00:00 2001 From: asynchronous rob Date: Fri, 28 Jul 2023 12:59:57 -0500 Subject: [PATCH 68/76] Omnibus asynchronous backing bugfix PR (#7529) * fix a bug in backing * add some more logs * prospective parachains: take ancestry only up to session bounds * add test --- node/core/backing/src/lib.rs | 10 +- node/core/backing/src/tests/mod.rs | 59 ++++++++++ node/core/prospective-parachains/src/lib.rs | 33 ++++-- node/core/prospective-parachains/src/tests.rs | 102 ++++++++++++++++++ .../src/vstaging/grid.rs | 7 ++ .../src/vstaging/mod.rs | 62 ++++++++++- 6 files changed, 263 insertions(+), 10 deletions(-) diff --git a/node/core/backing/src/lib.rs b/node/core/backing/src/lib.rs index 2dcc6363105b..9b1770c2097d 100644 --- a/node/core/backing/src/lib.rs +++ b/node/core/backing/src/lib.rs @@ -789,9 +789,15 @@ async fn handle_active_leaves_update( // as a result. // // when prospective parachains are disabled, the implicit view is empty, - // which means we'll clean up everything. This is correct. + // which means we'll clean up everything that's not a leaf - the expected behavior + // for pre-asynchronous backing. { - let remaining: HashSet<_> = state.implicit_view.all_allowed_relay_parents().collect(); + let remaining: HashSet<_> = state + .per_leaf + .keys() + .chain(state.implicit_view.all_allowed_relay_parents()) + .collect(); + state.per_relay_parent.retain(|r, _| remaining.contains(&r)); } diff --git a/node/core/backing/src/tests/mod.rs b/node/core/backing/src/tests/mod.rs index c0112d6dde4a..f8546a7fb6ec 100644 --- a/node/core/backing/src/tests/mod.rs +++ b/node/core/backing/src/tests/mod.rs @@ -2060,3 +2060,62 @@ fn cannot_second_multiple_candidates_per_parent() { virtual_overseer }); } + +#[test] +fn new_leaf_view_doesnt_clobber_old() { + let mut test_state = TestState::default(); + let relay_parent_2 = Hash::repeat_byte(1); + assert_ne!(test_state.relay_parent, relay_parent_2); + test_harness(test_state.keystore.clone(), |mut virtual_overseer| async move { + test_startup(&mut virtual_overseer, &test_state).await; + + // New leaf that doesn't clobber old. + { + let old_relay_parent = test_state.relay_parent; + test_state.relay_parent = relay_parent_2; + test_startup(&mut virtual_overseer, &test_state).await; + test_state.relay_parent = old_relay_parent; + } + + let pov = PoV { block_data: BlockData(vec![42, 43, 44]) }; + let pvd = dummy_pvd(); + let validation_code = ValidationCode(vec![1, 2, 3]); + + let expected_head_data = test_state.head_data.get(&test_state.chain_ids[0]).unwrap(); + + let pov_hash = pov.hash(); + let candidate = TestCandidateBuilder { + para_id: test_state.chain_ids[0], + relay_parent: test_state.relay_parent, + pov_hash, + head_data: expected_head_data.clone(), + erasure_root: make_erasure_root(&test_state, pov.clone(), pvd.clone()), + persisted_validation_data_hash: pvd.hash(), + validation_code: validation_code.0.clone(), + ..Default::default() + } + .build(); + + let second = CandidateBackingMessage::Second( + test_state.relay_parent, + candidate.to_plain(), + pvd.clone(), + pov.clone(), + ); + + virtual_overseer.send(FromOrchestra::Communication { msg: second }).await; + + // If the old leaf was clobbered by the first, the seconded candidate + // would be ignored. + assert!( + virtual_overseer + .recv() + .timeout(std::time::Duration::from_millis(500)) + .await + .is_some(), + "first leaf appears to be inactive" + ); + + virtual_overseer + }); +} diff --git a/node/core/prospective-parachains/src/lib.rs b/node/core/prospective-parachains/src/lib.rs index a61bc2c46544..db383f795d0c 100644 --- a/node/core/prospective-parachains/src/lib.rs +++ b/node/core/prospective-parachains/src/lib.rs @@ -43,6 +43,7 @@ use polkadot_node_subsystem::{ }; use polkadot_node_subsystem_util::{ inclusion_emulator::staging::{Constraints, RelayChainBlockInfo}, + request_session_index_for_child, runtime::{prospective_parachains_mode, ProspectiveParachainsMode}, }; use polkadot_primitives::vstaging::{ @@ -594,7 +595,9 @@ fn answer_get_backable_candidate( Some(s) => s, }; - let Some(child_hash) = tree.select_child(&required_path, |candidate| storage.is_backed(candidate)) else { + let Some(child_hash) = + tree.select_child(&required_path, |candidate| storage.is_backed(candidate)) + else { let _ = tx.send(None); return }; @@ -861,9 +864,14 @@ async fn fetch_ancestry( .await; let hashes = rx.map_err(JfyiError::ChainApiRequestCanceled).await??; + let required_session = request_session_index_for_child(relay_hash, ctx.sender()) + .await + .await + .map_err(JfyiError::RuntimeApiRequestCanceled)??; + let mut block_info = Vec::with_capacity(hashes.len()); for hash in hashes { - match fetch_block_info(ctx, cache, hash).await? { + let info = match fetch_block_info(ctx, cache, hash).await? { None => { gum::warn!( target: LOG_TARGET, @@ -872,11 +880,24 @@ async fn fetch_ancestry( ); // Return, however far we got. - return Ok(block_info) - }, - Some(info) => { - block_info.push(info); + break }, + Some(info) => info, + }; + + // The relay chain cannot accept blocks backed from previous sessions, with + // potentially previous validators. This is a technical limitation we need to + // respect here. + + let session = request_session_index_for_child(hash, ctx.sender()) + .await + .await + .map_err(JfyiError::RuntimeApiRequestCanceled)??; + + if session == required_session { + block_info.push(info); + } else { + break } } diff --git a/node/core/prospective-parachains/src/tests.rs b/node/core/prospective-parachains/src/tests.rs index 2220de71090a..af6b1ee6cf55 100644 --- a/node/core/prospective-parachains/src/tests.rs +++ b/node/core/prospective-parachains/src/tests.rs @@ -260,10 +260,27 @@ async fn handle_leaf_activation( tx.send(Ok(ancestry_hashes.clone())).unwrap(); } ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx)) + ) if parent == *hash => { + tx.send(Ok(1)).unwrap(); + } + ); } for (hash, number) in ancestry_iter { send_block_header(virtual_overseer, hash, number).await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx)) + ) if parent == hash => { + tx.send(Ok(1)).unwrap(); + } + ); } for _ in 0..test_state.availability_cores.len() { @@ -1546,3 +1563,88 @@ fn backwards_compatible() { virtual_overseer }); } + +#[test] +fn uses_ancestry_only_within_session() { + test_harness(|mut virtual_overseer| async move { + let number = 5; + let hash = Hash::repeat_byte(5); + let ancestry_len = 3; + let session = 2; + + let ancestry_hashes = + vec![Hash::repeat_byte(4), Hash::repeat_byte(3), Hash::repeat_byte(2)]; + let session_change_hash = Hash::repeat_byte(3); + + let activated = ActivatedLeaf { + hash, + number, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + }; + + virtual_overseer + .send(FromOrchestra::Signal(OverseerSignal::ActiveLeaves( + ActiveLeavesUpdate::start_work(activated), + ))) + .await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::StagingAsyncBackingParams(tx)) + ) if parent == hash => { + tx.send(Ok(AsyncBackingParams { max_candidate_depth: 0, allowed_ancestry_len: ancestry_len })).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::AvailabilityCores(tx)) + ) if parent == hash => { + tx.send(Ok(Vec::new())).unwrap(); + } + ); + + send_block_header(&mut virtual_overseer, hash, number).await; + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ChainApi( + ChainApiMessage::Ancestors{hash: block_hash, k, response_channel: tx} + ) if block_hash == hash && k == ancestry_len as usize => { + tx.send(Ok(ancestry_hashes.clone())).unwrap(); + } + ); + + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx)) + ) if parent == hash => { + tx.send(Ok(session)).unwrap(); + } + ); + + for (i, hash) in ancestry_hashes.into_iter().enumerate() { + let number = number - (i + 1) as BlockNumber; + send_block_header(&mut virtual_overseer, hash, number).await; + assert_matches!( + virtual_overseer.recv().await, + AllMessages::RuntimeApi( + RuntimeApiMessage::Request(parent, RuntimeApiRequest::SessionIndexForChild(tx)) + ) if parent == hash => { + if hash == session_change_hash { + tx.send(Ok(session - 1)).unwrap(); + break + } else { + tx.send(Ok(session)).unwrap(); + } + } + ); + } + + virtual_overseer + }); +} diff --git a/node/network/statement-distribution/src/vstaging/grid.rs b/node/network/statement-distribution/src/vstaging/grid.rs index 5934e05378e5..aca98d6cc229 100644 --- a/node/network/statement-distribution/src/vstaging/grid.rs +++ b/node/network/statement-distribution/src/vstaging/grid.rs @@ -429,6 +429,13 @@ impl GridTracker { // and receiving groups, we may overwrite a `Full` manifest with a `Acknowledgement` // one. for (v, manifest_mode) in sending_group_manifests.chain(receiving_group_manifests) { + gum::trace!( + target: LOG_TARGET, + validator_index = ?v, + ?manifest_mode, + "Preparing to send manifest/acknowledgement" + ); + self.pending_manifests .entry(v) .or_default() diff --git a/node/network/statement-distribution/src/vstaging/mod.rs b/node/network/statement-distribution/src/vstaging/mod.rs index c86014dab281..372113ab6c14 100644 --- a/node/network/statement-distribution/src/vstaging/mod.rs +++ b/node/network/statement-distribution/src/vstaging/mod.rs @@ -924,6 +924,12 @@ pub(crate) async fn share_local_statement( Some(x) => x, }; + gum::debug!( + target: LOG_TARGET, + statement = ?statement.payload().to_compact(), + "Sharing Statement", + ); + let per_session = match state.per_session.get(&per_relay_parent.session) { Some(s) => s, None => return Ok(()), @@ -1169,8 +1175,14 @@ async fn circulate_statement( } // ship off the network messages to the network bridge. - if !statement_to.is_empty() { + gum::debug!( + target: LOG_TARGET, + ?compact_statement, + n_peers = ?statement_to.len(), + "Sending statement to peers", + ); + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( statement_to, Versioned::VStaging(protocol_vstaging::StatementDistributionMessage::Statement( @@ -1615,7 +1627,7 @@ async fn provide_candidate_to_grid( let grid_view = match per_session.grid_view { Some(ref t) => t, None => { - gum::trace!( + gum::debug!( target: LOG_TARGET, session = relay_parent_state.session, "Cannot handle backable candidate due to lack of topology", @@ -1717,6 +1729,13 @@ async fn provide_candidate_to_grid( } if !manifest_peers.is_empty() { + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + n_peers = manifest_peers.len(), + "Sending manifest to peers" + ); + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( manifest_peers, manifest_message.into(), @@ -1725,6 +1744,13 @@ async fn provide_candidate_to_grid( } if !ack_peers.is_empty() { + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + n_peers = ack_peers.len(), + "Sending acknowledgement to peers" + ); + ctx.send_message(NetworkBridgeTxMessage::SendValidationMessage( ack_peers, ack_message.into(), @@ -2051,6 +2077,13 @@ async fn handle_incoming_manifest( manifest: net_protocol::vstaging::BackedCandidateManifest, reputation: &mut ReputationAggregator, ) { + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?manifest.candidate_hash, + ?peer, + "Received incoming manifest", + ); + let x = match handle_incoming_manifest_common( ctx, peer, @@ -2079,6 +2112,11 @@ async fn handle_incoming_manifest( if acknowledge { // 4. if already known within grid (confirmed & backed), acknowledge candidate + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?manifest.candidate_hash, + "Known candidate - acknowledging manifest", + ); let local_knowledge = { let group_size = match per_session.groups.get(manifest.group_index) { @@ -2110,6 +2148,12 @@ async fn handle_incoming_manifest( } } else if !state.candidates.is_confirmed(&manifest.candidate_hash) { // 5. if unconfirmed, add request entry + gum::trace!( + target: LOG_TARGET, + candidate_hash = ?manifest.candidate_hash, + "Unknown candidate - requesting", + ); + state .request_manager .get_or_insert(manifest.relay_parent, manifest.candidate_hash, manifest.group_index) @@ -2179,6 +2223,13 @@ async fn handle_incoming_acknowledgement( // the candidate hash is included alongside the bitfields, so the candidate // must be confirmed for us to even process it. + gum::debug!( + target: LOG_TARGET, + candidate_hash = ?acknowledgement.candidate_hash, + ?peer, + "Received incoming acknowledgement", + ); + let candidate_hash = acknowledgement.candidate_hash; let (relay_parent, parent_head_data_hash, group_index, para_id) = { match state.candidates.get_confirmed(&candidate_hash) { @@ -2277,6 +2328,13 @@ pub(crate) async fn handle_backed_candidate_message( Some(s) => s, }; + gum::debug!( + target: LOG_TARGET, + ?candidate_hash, + group_index = ?confirmed.group_index(), + "Candidate Backed - initiating grid distribution & child fetches" + ); + provide_candidate_to_grid( ctx, candidate_hash, From bd6b87d9f61427ebf73575b22d3b9d77faed7ab0 Mon Sep 17 00:00:00 2001 From: Andrei Sandu <54316454+sandreim@users.noreply.github.com> Date: Mon, 14 Aug 2023 21:03:23 +0300 Subject: [PATCH 69/76] fix zombienet tests (#7614) Signed-off-by: Andrei Sandu --- .../async_backing/002-async-backing-runtime-upgrade.toml | 7 ++++++- .../async_backing/003-async-backing-collator-mix.zndsl | 2 -- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.toml b/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.toml index cce8510fccbd..e61f7dd47ef6 100644 --- a/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.toml +++ b/zombienet_tests/async_backing/002-async-backing-runtime-upgrade.toml @@ -46,4 +46,9 @@ addToGenesis = true name = "collator02" image = "{{COL_IMAGE}}" command = "undying-collator" - args = ["-lparachain=debug"] \ No newline at end of file + args = ["-lparachain=debug"] + +[types.Header] +number = "u64" +parent_hash = "Hash" +post_state = "Hash" diff --git a/zombienet_tests/async_backing/003-async-backing-collator-mix.zndsl b/zombienet_tests/async_backing/003-async-backing-collator-mix.zndsl index 7eb14836d7e3..98436b0459cf 100644 --- a/zombienet_tests/async_backing/003-async-backing-collator-mix.zndsl +++ b/zombienet_tests/async_backing/003-async-backing-collator-mix.zndsl @@ -5,8 +5,6 @@ Creds: config # General alice: is up bob: is up -charlie: is up -dave: is up # Check peers alice: reports peers count is at least 3 within 20 seconds From eb4ae4e5f557ecd416d361a0b3606de7a8804745 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 14 Aug 2023 13:51:56 -0500 Subject: [PATCH 70/76] fix runtime compilation --- runtime/parachains/src/inclusion/mod.rs | 3 +-- runtime/parachains/src/scheduler.rs | 5 +---- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/runtime/parachains/src/inclusion/mod.rs b/runtime/parachains/src/inclusion/mod.rs index 27fbcd0636dd..b2f4137736a2 100644 --- a/runtime/parachains/src/inclusion/mod.rs +++ b/runtime/parachains/src/inclusion/mod.rs @@ -999,8 +999,7 @@ impl Pallet { // make sure that the queue is not overfilled. // we do it here only once since returning false invalidates the whole relay-chain // block. - if para_queue_size.saturating_add(msg_size as u64) > config.max_upward_queue_size as u64 - { + if para_queue_size.saturating_add(msg_size) > config.max_upward_queue_size { return Err(UmpAcceptanceCheckErr::TotalSizeExceeded { total_size: para_queue_size.saturating_add(msg_size).into(), limit: config.max_upward_queue_size.into(), diff --git a/runtime/parachains/src/scheduler.rs b/runtime/parachains/src/scheduler.rs index 300cf2fb24a5..1cfe314bbc5c 100644 --- a/runtime/parachains/src/scheduler.rs +++ b/runtime/parachains/src/scheduler.rs @@ -434,10 +434,7 @@ impl Pallet { /// Schedule all unassigned cores, where possible. Provide a list of cores that should be /// considered newly-freed along with the reason for them being freed. The list is assumed to be /// sorted in ascending order by core index. - pub(crate) fn schedule( - just_freed_cores: impl IntoIterator, - now: BlockNumberFor, - ) { + pub(crate) fn schedule(just_freed_cores: impl IntoIterator) { Self::free_cores(just_freed_cores); let cores = AvailabilityCores::::get(); From 876fc04cefbbfeebea9d5ee1cfed606ba12d4d8b Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Mon, 14 Aug 2023 15:01:18 -0500 Subject: [PATCH 71/76] make bitfield distribution tests compile --- node/network/bitfield-distribution/src/tests.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/node/network/bitfield-distribution/src/tests.rs b/node/network/bitfield-distribution/src/tests.rs index e3f5cfae77fa..f79aaa252006 100644 --- a/node/network/bitfield-distribution/src/tests.rs +++ b/node/network/bitfield-distribution/src/tests.rs @@ -509,7 +509,7 @@ fn delay_reputation_change() { msg: BitfieldDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerMessage( peer.clone(), - msg.clone().into_network_message(), + msg.clone().into_network_message(ValidationVersion::V1.into()), ), ), }) @@ -535,7 +535,7 @@ fn delay_reputation_change() { msg: BitfieldDistributionMessage::NetworkBridgeUpdate( NetworkBridgeEvent::PeerMessage( peer.clone(), - msg.clone().into_network_message(), + msg.clone().into_network_message(ValidationVersion::V1.into()), ), ), }) From ebe28f81b5af2febd332d286fdc9ca5fb70a0bb8 Mon Sep 17 00:00:00 2001 From: asynchronous rob Date: Tue, 15 Aug 2023 12:06:25 -0500 Subject: [PATCH 72/76] attempt to fix zombienet disputes (#7618) * update metric name * update some metric names * avoid cycles when creating fake candidates * make undying collator more friendly to malformed parents * fix a bug in malus --- .../0001-dispute-valid-block.zndsl | 22 +++--- node/malus/src/variants/common.rs | 73 ++++++++++++++++--- .../undying/collator/src/lib.rs | 59 ++++++++------- .../functional/0002-parachains-disputes.zndsl | 6 +- .../0003-parachains-garbage-candidate.zndsl | 10 +-- ...004-parachains-disputes-past-session.zndsl | 6 +- zombienet_tests/misc/0001-paritydb.zndsl | 20 ++--- 7 files changed, 129 insertions(+), 67 deletions(-) diff --git a/node/malus/integrationtests/0001-dispute-valid-block.zndsl b/node/malus/integrationtests/0001-dispute-valid-block.zndsl index f778b0231ba9..737cd4ebd521 100644 --- a/node/malus/integrationtests/0001-dispute-valid-block.zndsl +++ b/node/malus/integrationtests/0001-dispute-valid-block.zndsl @@ -16,14 +16,14 @@ bob: reports block height is at least 2 bob: reports peers count is at least 2 charlie: reports block height is at least 2 charlie: reports peers count is at least 2 -alice: reports parachain_candidate_disputes_total is at least 1 within 250 seconds -bob: reports parachain_candidate_disputes_total is at least 1 within 90 seconds -charlie: reports parachain_candidate_disputes_total is at least 1 within 90 seconds -alice: reports parachain_candidate_dispute_votes{validity="valid"} is at least 1 within 90 seconds -bob: reports parachain_candidate_dispute_votes{validity="valid"} is at least 2 within 90 seconds -charlie: reports parachain_candidate_dispute_votes{validity="valid"} is at least 2 within 90 seconds -alice: reports parachain_candidate_dispute_concluded{validity="valid"} is at least 1 within 90 seconds -alice: reports parachain_candidate_dispute_concluded{validity="invalid"} is 0 within 90 seconds -bob: reports parachain_candidate_dispute_concluded{validity="valid"} is at least 1 within 90 seconds -charlie: reports parachain_candidate_dispute_concluded{validity="valid"} is at least 1 within 90 seconds -charlie: reports parachain_candidate_dispute_concluded{validity="valid"} is at least 1 within 90 seconds +alice: reports polkadot_parachain_candidate_disputes_total is at least 1 within 250 seconds +bob: reports polkadot_parachain_candidate_disputes_total is at least 1 within 90 seconds +charlie: reports polkadot_parachain_candidate_disputes_total is at least 1 within 90 seconds +alice: reports polkadot_parachain_candidate_dispute_votes{validity="valid"} is at least 1 within 90 seconds +bob: reports polkadot_parachain_candidate_dispute_votes{validity="valid"} is at least 2 within 90 seconds +charlie: reports polkadot_parachain_candidate_dispute_votes{validity="valid"} is at least 2 within 90 seconds +alice: reports polkadot_parachain_candidate_dispute_concluded{validity="valid"} is at least 1 within 90 seconds +alice: reports polkadot_parachain_candidate_dispute_concluded{validity="invalid"} is 0 within 90 seconds +bob: reports polkadot_parachain_candidate_dispute_concluded{validity="valid"} is at least 1 within 90 seconds +charlie: reports polkadot_parachain_candidate_dispute_concluded{validity="valid"} is at least 1 within 90 seconds +charlie: reports polkadot_parachain_candidate_dispute_concluded{validity="valid"} is at least 1 within 90 seconds diff --git a/node/malus/src/variants/common.rs b/node/malus/src/variants/common.rs index ab1dfbbb360a..2a0b926f4853 100644 --- a/node/malus/src/variants/common.rs +++ b/node/malus/src/variants/common.rs @@ -30,6 +30,7 @@ use polkadot_node_subsystem::{ use polkadot_primitives::{ CandidateCommitments, CandidateDescriptor, CandidateReceipt, PersistedValidationData, + PvfExecTimeoutKind, }; use futures::channel::oneshot; @@ -48,6 +49,55 @@ pub enum FakeCandidateValidation { BackingAndApprovalValid, } +impl FakeCandidateValidation { + fn misbehaves_valid(&self) -> bool { + use FakeCandidateValidation::*; + + match *self { + BackingValid | ApprovalValid | BackingAndApprovalValid => true, + _ => false, + } + } + + fn misbehaves_invalid(&self) -> bool { + use FakeCandidateValidation::*; + + match *self { + BackingInvalid | ApprovalInvalid | BackingAndApprovalInvalid => true, + _ => false, + } + } + + fn includes_backing(&self) -> bool { + use FakeCandidateValidation::*; + + match *self { + BackingInvalid | BackingAndApprovalInvalid | BackingValid | BackingAndApprovalValid => + true, + _ => false, + } + } + + fn includes_approval(&self) -> bool { + use FakeCandidateValidation::*; + + match *self { + ApprovalInvalid | + BackingAndApprovalInvalid | + ApprovalValid | + BackingAndApprovalValid => true, + _ => false, + } + } + + fn should_misbehave(&self, timeout: PvfExecTimeoutKind) -> bool { + match timeout { + PvfExecTimeoutKind::Backing => self.includes_backing(), + PvfExecTimeoutKind::Approval => self.includes_approval(), + } + } +} + /// Candidate invalidity details #[derive(clap::ValueEnum, Clone, Copy, Debug, PartialEq)] #[value(rename_all = "kebab-case")] @@ -162,11 +212,20 @@ where pub fn create_fake_candidate_commitments( persisted_validation_data: &PersistedValidationData, ) -> CandidateCommitments { + // Backing rejects candidates which output the same head as the parent, + // therefore we must create a new head which is not equal to the parent. + let mut head_data = persisted_validation_data.parent_head.clone(); + if head_data.0.is_empty() { + head_data.0.push(0); + } else { + head_data.0[0] = head_data.0[0].wrapping_add(1); + }; + CandidateCommitments { upward_messages: Default::default(), horizontal_messages: Default::default(), new_validation_code: None, - head_data: persisted_validation_data.parent_head.clone(), + head_data, processed_downward_messages: 0, hrmp_watermark: persisted_validation_data.relay_parent_number, } @@ -224,8 +283,7 @@ where ), } => { match self.fake_validation { - FakeCandidateValidation::ApprovalValid | - FakeCandidateValidation::BackingAndApprovalValid => { + x if x.misbehaves_valid() && x.should_misbehave(timeout) => { // Behave normally if the `PoV` is not known to be malicious. if pov.block_data.0.as_slice() != MALICIOUS_POV { return Some(FromOrchestra::Communication { @@ -278,8 +336,7 @@ where }, } }, - FakeCandidateValidation::ApprovalInvalid | - FakeCandidateValidation::BackingAndApprovalInvalid => { + x if x.misbehaves_invalid() && x.should_misbehave(timeout) => { // Set the validation result to invalid with probability `p` and trigger a // dispute let behave_maliciously = self.distribution.sample(&mut rand::thread_rng()); @@ -342,8 +399,7 @@ where ), } => { match self.fake_validation { - FakeCandidateValidation::BackingValid | - FakeCandidateValidation::BackingAndApprovalValid => { + x if x.misbehaves_valid() && x.should_misbehave(timeout) => { // Behave normally if the `PoV` is not known to be malicious. if pov.block_data.0.as_slice() != MALICIOUS_POV { return Some(FromOrchestra::Communication { @@ -385,8 +441,7 @@ where }), } }, - FakeCandidateValidation::BackingInvalid | - FakeCandidateValidation::BackingAndApprovalInvalid => { + x if x.misbehaves_invalid() && x.should_misbehave(timeout) => { // Maliciously set the validation result to invalid for a valid candidate // with probability `p` let behave_maliciously = self.distribution.sample(&mut rand::thread_rng()); diff --git a/parachain/test-parachains/undying/collator/src/lib.rs b/parachain/test-parachains/undying/collator/src/lib.rs index cc0f592dc253..e0ecc6b0997d 100644 --- a/parachain/test-parachains/undying/collator/src/lib.rs +++ b/parachain/test-parachains/undying/collator/src/lib.rs @@ -33,7 +33,9 @@ use std::{ }, time::Duration, }; -use test_parachain_undying::{execute, hash_state, BlockData, GraveyardState, HeadData}; +use test_parachain_undying::{ + execute, hash_state, BlockData, GraveyardState, HeadData, StateMismatch, +}; /// Default PoV size which also drives state size. const DEFAULT_POV_SIZE: usize = 1000; @@ -45,7 +47,7 @@ fn calculate_head_and_state_for_number( number: u64, graveyard_size: usize, pvf_complexity: u32, -) -> (HeadData, GraveyardState) { +) -> Result<(HeadData, GraveyardState), StateMismatch> { let index = 0u64; let mut graveyard = vec![0u8; graveyard_size * graveyard_size]; let zombies = 0; @@ -62,13 +64,12 @@ fn calculate_head_and_state_for_number( while head.number < number { let block = BlockData { state, tombstones: 1_000, iterations: pvf_complexity }; - let (new_head, new_state) = - execute(head.hash(), head.clone(), block).expect("Produces valid block"); + let (new_head, new_state) = execute(head.hash(), head.clone(), block)?; head = new_head; state = new_state; } - (head, state) + Ok((head, state)) } /// The state of the undying parachain. @@ -122,39 +123,35 @@ impl State { /// Advance the state and produce a new block based on the given `parent_head`. /// /// Returns the new [`BlockData`] and the new [`HeadData`]. - fn advance(&mut self, parent_head: HeadData) -> (BlockData, HeadData) { + fn advance(&mut self, parent_head: HeadData) -> Result<(BlockData, HeadData), StateMismatch> { self.best_block = parent_head.number; - let state = if let Some(head_data) = self.number_to_head.get(&self.best_block) { - self.head_to_state.get(head_data).cloned().unwrap_or_else(|| { - calculate_head_and_state_for_number( - parent_head.number, - self.graveyard_size, - self.pvf_complexity, - ) - .1 - }) + let state = if let Some(state) = self + .number_to_head + .get(&self.best_block) + .and_then(|head_data| self.head_to_state.get(head_data).cloned()) + { + state } else { let (_, state) = calculate_head_and_state_for_number( parent_head.number, self.graveyard_size, self.pvf_complexity, - ); + )?; state }; // Start with prev state and transaction to execute (place 1000 tombstones). let block = BlockData { state, tombstones: 1000, iterations: self.pvf_complexity }; - let (new_head, new_state) = - execute(parent_head.hash(), parent_head, block.clone()).expect("Produces valid block"); + let (new_head, new_state) = execute(parent_head.hash(), parent_head, block.clone())?; let new_head_arc = Arc::new(new_head.clone()); self.head_to_state.insert(new_head_arc.clone(), new_state); self.number_to_head.insert(new_head.number, new_head_arc); - (block, new_head) + Ok((block, new_head)) } } @@ -233,10 +230,21 @@ impl Collator { let seconded_collations = self.seconded_collations.clone(); Box::new(move |relay_parent, validation_data| { - let parent = HeadData::decode(&mut &validation_data.parent_head.0[..]) - .expect("Decodes parent head"); + let parent = match HeadData::decode(&mut &validation_data.parent_head.0[..]) { + Err(err) => { + log::error!("Requested to build on top of malformed head-data: {:?}", err); + return futures::future::ready(None).boxed() + }, + Ok(p) => p, + }; - let (block_data, head_data) = state.lock().unwrap().advance(parent); + let (block_data, head_data) = match state.lock().unwrap().advance(parent.clone()) { + Err(err) => { + log::error!("Unable to build on top of {:?}: {:?}", parent, err); + return futures::future::ready(None).boxed() + }, + Ok(x) => x, + }; log::info!( "created a new collation on relay-parent({}): {:?}", @@ -280,7 +288,6 @@ impl Collator { "Seconded statement should match our collation: {:?}", res.statement.payload() ); - std::process::exit(-1); } seconded_collations.fetch_add(1, Ordering::Relaxed); @@ -394,10 +401,10 @@ mod tests { let collator = Collator::new(1_000, 1); let graveyard_size = collator.state.lock().unwrap().graveyard_size; - let mut head = calculate_head_and_state_for_number(10, graveyard_size, 1).0; + let mut head = calculate_head_and_state_for_number(10, graveyard_size, 1).unwrap().0; for i in 1..10 { - head = collator.state.lock().unwrap().advance(head).1; + head = collator.state.lock().unwrap().advance(head).unwrap().1; assert_eq!(10 + i, head.number); } @@ -414,7 +421,7 @@ mod tests { .clone(); for _ in 1..20 { - second_head = collator.state.lock().unwrap().advance(second_head.clone()).1; + second_head = collator.state.lock().unwrap().advance(second_head.clone()).unwrap().1; } assert_eq!(second_head, head); diff --git a/zombienet_tests/functional/0002-parachains-disputes.zndsl b/zombienet_tests/functional/0002-parachains-disputes.zndsl index 31f2b372f894..b7d797a496bb 100644 --- a/zombienet_tests/functional/0002-parachains-disputes.zndsl +++ b/zombienet_tests/functional/0002-parachains-disputes.zndsl @@ -26,9 +26,9 @@ alice: parachain 2003 block height is at least 10 within 200 seconds # Check if disputes are initiated and concluded. # TODO: check if disputes are concluded faster than initiated. -eve: reports parachain_candidate_disputes_total is at least 10 within 15 seconds -eve: reports parachain_candidate_dispute_concluded{validity="valid"} is at least 10 within 15 seconds -eve: reports parachain_candidate_dispute_concluded{validity="invalid"} is 0 within 15 seconds +eve: reports polkadot_parachain_candidate_disputes_total is at least 10 within 15 seconds +eve: reports polkadot_parachain_candidate_dispute_concluded{validity="valid"} is at least 10 within 15 seconds +eve: reports polkadot_parachain_candidate_dispute_concluded{validity="invalid"} is 0 within 15 seconds # As of , we don't slash on disputes # with `valid` outcome, so there is no offence reported. diff --git a/zombienet_tests/functional/0003-parachains-garbage-candidate.zndsl b/zombienet_tests/functional/0003-parachains-garbage-candidate.zndsl index ccc1ea258f52..60917e1b065a 100644 --- a/zombienet_tests/functional/0003-parachains-garbage-candidate.zndsl +++ b/zombienet_tests/functional/0003-parachains-garbage-candidate.zndsl @@ -32,13 +32,13 @@ honest-validator-2: reports polkadot_parachain_disputes_finality_lag is lower th sleep 30 seconds # Check that garbage parachain blocks included by malicious validators are being disputed. -honest-validator-0: reports parachain_candidate_disputes_total is at least 2 within 15 seconds -honest-validator-1: reports parachain_candidate_disputes_total is at least 2 within 15 seconds -honest-validator-2: reports parachain_candidate_disputes_total is at least 2 within 15 seconds +honest-validator-0: reports polkadot_parachain_candidate_disputes_total is at least 2 within 15 seconds +honest-validator-1: reports polkadot_parachain_candidate_disputes_total is at least 2 within 15 seconds +honest-validator-2: reports polkadot_parachain_candidate_disputes_total is at least 2 within 15 seconds # Disputes should always end as "invalid" -honest-validator-0: reports parachain_candidate_dispute_concluded{validity="invalid"} is at least 2 within 15 seconds -honest-validator-1: reports parachain_candidate_dispute_concluded{validity="valid"} is 0 within 15 seconds +honest-validator-0: reports polkadot_parachain_candidate_dispute_concluded{validity="invalid"} is at least 2 within 15 seconds +honest-validator-1: reports polkadot_parachain_candidate_dispute_concluded{validity="valid"} is 0 within 15 seconds # Check participating in the losing side of a dispute logged malus-validator: log line contains "Voted for a candidate that was concluded invalid." within 180 seconds diff --git a/zombienet_tests/functional/0004-parachains-disputes-past-session.zndsl b/zombienet_tests/functional/0004-parachains-disputes-past-session.zndsl index e86cbb398357..8e792f974fe3 100644 --- a/zombienet_tests/functional/0004-parachains-disputes-past-session.zndsl +++ b/zombienet_tests/functional/0004-parachains-disputes-past-session.zndsl @@ -14,7 +14,7 @@ honest-validator-0: parachain 1000 is registered within 100 seconds honest-validator-0: parachain 1000 block height is at least 1 within 300 seconds # There should be disputes initiated -honest-validator-0: reports parachain_candidate_disputes_total is at least 2 within 200 seconds +honest-validator-0: reports polkadot_parachain_candidate_disputes_total is at least 2 within 200 seconds # Stop issuing disputes malus-validator-0: pause @@ -29,9 +29,9 @@ honest-validator-0: reports block height minus finalised block is at least 10 wi alice: resume # Disputes should start concluding now -honest-validator-0: reports parachain_candidate_dispute_concluded{validity="invalid"} is at least 1 within 200 seconds +honest-validator-0: reports polkadot_parachain_candidate_dispute_concluded{validity="invalid"} is at least 1 within 200 seconds # Disputes should always end as "invalid" -honest-validator-0: reports parachain_candidate_dispute_concluded{validity="valid"} is 0 +honest-validator-0: reports polkadot_parachain_candidate_dispute_concluded{validity="valid"} is 0 # Check an unsigned extrinsic is submitted honest-validator: log line contains "Successfully reported pending slash" within 180 seconds diff --git a/zombienet_tests/misc/0001-paritydb.zndsl b/zombienet_tests/misc/0001-paritydb.zndsl index eede8bc11142..4a22311de764 100644 --- a/zombienet_tests/misc/0001-paritydb.zndsl +++ b/zombienet_tests/misc/0001-paritydb.zndsl @@ -55,14 +55,14 @@ validator-8: reports polkadot_parachain_approval_checking_finality_lag is 0 validator-9: reports polkadot_parachain_approval_checking_finality_lag is 0 # Check lag - dispute conclusion -validator-0: reports parachain_candidate_disputes_total is 0 -validator-1: reports parachain_candidate_disputes_total is 0 -validator-2: reports parachain_candidate_disputes_total is 0 -validator-3: reports parachain_candidate_disputes_total is 0 -validator-4: reports parachain_candidate_disputes_total is 0 -validator-5: reports parachain_candidate_disputes_total is 0 -validator-6: reports parachain_candidate_disputes_total is 0 -validator-7: reports parachain_candidate_disputes_total is 0 -validator-8: reports parachain_candidate_disputes_total is 0 -validator-9: reports parachain_candidate_disputes_total is 0 +validator-0: reports polkadot_parachain_candidate_disputes_total is 0 +validator-1: reports polkadot_parachain_candidate_disputes_total is 0 +validator-2: reports polkadot_parachain_candidate_disputes_total is 0 +validator-3: reports polkadot_parachain_candidate_disputes_total is 0 +validator-4: reports polkadot_parachain_candidate_disputes_total is 0 +validator-5: reports polkadot_parachain_candidate_disputes_total is 0 +validator-6: reports polkadot_parachain_candidate_disputes_total is 0 +validator-7: reports polkadot_parachain_candidate_disputes_total is 0 +validator-8: reports polkadot_parachain_candidate_disputes_total is 0 +validator-9: reports polkadot_parachain_candidate_disputes_total is 0 From 5115d952dd1ff54d87ada02463724d55450b7444 Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Tue, 15 Aug 2023 12:51:55 -0500 Subject: [PATCH 73/76] fmt --- node/network/collator-protocol/src/lib.rs | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/node/network/collator-protocol/src/lib.rs b/node/network/collator-protocol/src/lib.rs index 063518861610..62c033954f75 100644 --- a/node/network/collator-protocol/src/lib.rs +++ b/node/network/collator-protocol/src/lib.rs @@ -123,17 +123,16 @@ impl CollatorProtocolSubsystem { request_receiver_v1, request_receiver_vstaging, metrics, - } => - collator_side::run( - ctx, - peer_id, - collator_pair, - request_receiver_v1, - request_receiver_vstaging, - metrics, - ) - .map_err(|e| SubsystemError::with_origin("collator-protocol", e)) - .boxed(), + } => collator_side::run( + ctx, + peer_id, + collator_pair, + request_receiver_v1, + request_receiver_vstaging, + metrics, + ) + .map_err(|e| SubsystemError::with_origin("collator-protocol", e)) + .boxed(), ProtocolSide::None => return DummySubsystem.start(ctx), }; From e0d0d877336fe2d7cc785f9b132e40f8103b59da Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Fri, 18 Aug 2023 00:23:47 +0400 Subject: [PATCH 74/76] clippy --- node/subsystem-util/src/inclusion_emulator/staging.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/subsystem-util/src/inclusion_emulator/staging.rs b/node/subsystem-util/src/inclusion_emulator/staging.rs index ca1e14cae04b..a4b85775981d 100644 --- a/node/subsystem-util/src/inclusion_emulator/staging.rs +++ b/node/subsystem-util/src/inclusion_emulator/staging.rs @@ -1226,7 +1226,7 @@ mod tests { let constraints = make_constraints(); let mut candidate = make_candidate(&constraints, &relay_parent); - let expected_code = constraints.validation_code_hash.clone(); + let expected_code = constraints.validation_code_hash; let got_code = ValidationCode(vec![9, 9, 9]).hash(); candidate.validation_code_hash = got_code; From 17e2e2ecb8608e834333c9be3b363f7361ca179f Mon Sep 17 00:00:00 2001 From: Robert Habermeier Date: Thu, 17 Aug 2023 16:21:12 -0500 Subject: [PATCH 75/76] add RUN_IN_CONTAINER to new ZombieNet tests (#7631) --- scripts/ci/gitlab/pipeline/zombienet.yml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/scripts/ci/gitlab/pipeline/zombienet.yml b/scripts/ci/gitlab/pipeline/zombienet.yml index 828506dfcb7a..62e081d1de0e 100644 --- a/scripts/ci/gitlab/pipeline/zombienet.yml +++ b/scripts/ci/gitlab/pipeline/zombienet.yml @@ -355,6 +355,7 @@ zombienet-tests-async-backing-compatibility: - job: build-linux-stable artifacts: true variables: + RUN_IN_CONTAINER: "1" GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/async_backing" before_script: - echo "Zombie-net Tests Config" @@ -387,6 +388,7 @@ zombienet-tests-async-backing-runtime-upgrade: - job: build-linux-stable artifacts: true variables: + RUN_IN_CONTAINER: "1" GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/async_backing" before_script: - echo "Zombie-net Tests Config" @@ -407,7 +409,7 @@ zombienet-tests-async-backing-runtime-upgrade: retry: 2 tags: - zombienet-polkadot-integration-test - + zombienet-tests-async-backing-collator-mix: stage: zombienet extends: @@ -420,6 +422,7 @@ zombienet-tests-async-backing-collator-mix: - job: build-linux-stable artifacts: true variables: + RUN_IN_CONTAINER: "1" GH_DIR: "https://github.com/paritytech/polkadot/tree/${CI_COMMIT_SHORT_SHA}/zombienet_tests/async_backing" before_script: - echo "Zombie-net Tests Config" @@ -438,4 +441,4 @@ zombienet-tests-async-backing-collator-mix: allow_failure: false retry: 2 tags: - - zombienet-polkadot-integration-test \ No newline at end of file + - zombienet-polkadot-integration-test From 27bcad9564fbe3142af6806f5cbfbcfc7f2d73d3 Mon Sep 17 00:00:00 2001 From: Chris Sosnin Date: Fri, 18 Aug 2023 11:28:29 +0400 Subject: [PATCH 76/76] remove duplicated migration happened because of master-merge --- runtime/rococo/src/lib.rs | 2 -- runtime/westend/src/lib.rs | 2 -- 2 files changed, 4 deletions(-) diff --git a/runtime/rococo/src/lib.rs b/runtime/rococo/src/lib.rs index 8ca2322f8e36..fb2a56c8100c 100644 --- a/runtime/rococo/src/lib.rs +++ b/runtime/rococo/src/lib.rs @@ -1549,8 +1549,6 @@ pub mod migrations { pub type Unreleased = ( pallet_society::migrations::VersionCheckedMigrateToV2, pallet_im_online::migration::v1::Migration, - /* Asynchronous backing migration */ - parachains_scheduler::migration::v1::MigrateToV1, parachains_configuration::migration::v7::MigrateToV7, assigned_slots::migration::v1::VersionCheckedMigrateToV1, parachains_scheduler::migration::v1::MigrateToV1, diff --git a/runtime/westend/src/lib.rs b/runtime/westend/src/lib.rs index fc5e7f1e692d..3ade28c51fba 100644 --- a/runtime/westend/src/lib.rs +++ b/runtime/westend/src/lib.rs @@ -1287,8 +1287,6 @@ pub mod migrations { /// Unreleased migrations. Add new ones here: pub type Unreleased = ( pallet_im_online::migration::v1::Migration, - /* Asynchronous backing migration */ - parachains_scheduler::migration::v1::MigrateToV1, parachains_configuration::migration::v7::MigrateToV7, assigned_slots::migration::v1::VersionCheckedMigrateToV1, parachains_scheduler::migration::v1::MigrateToV1,