diff --git a/Cargo.lock b/Cargo.lock index e2427d06fd28..bd41f402725a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6109,6 +6109,25 @@ dependencies = [ "tracing", ] +[[package]] +name = "polkadot-node-core-chain-selection" +version = "0.1.0" +dependencies = [ + "assert_matches", + "futures 0.3.15", + "kvdb", + "parity-scale-codec", + "parking_lot 0.11.1", + "polkadot-node-primitives", + "polkadot-node-subsystem", + "polkadot-node-subsystem-test-helpers", + "polkadot-node-subsystem-util", + "polkadot-primitives", + "sp-core", + "thiserror", + "tracing", +] + [[package]] name = "polkadot-node-core-dispute-coordinator" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index be022f7b8771..ef5368e58e3c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -48,6 +48,7 @@ members = [ "node/core/bitfield-signing", "node/core/candidate-validation", "node/core/chain-api", + "node/core/chain-selection", "node/core/dispute-coordinator", "node/core/dispute-participation", "node/core/parachains-inherent", diff --git a/node/core/chain-selection/Cargo.toml b/node/core/chain-selection/Cargo.toml new file mode 100644 index 000000000000..ee498427ea0d --- /dev/null +++ b/node/core/chain-selection/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "polkadot-node-core-chain-selection" +description = "Chain Selection Subsystem" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" + +[dependencies] +futures = "0.3.15" +tracing = "0.1.26" +polkadot-primitives = { path = "../../../primitives" } +polkadot-node-primitives = { path = "../../primitives" } +polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } +polkadot-node-subsystem-util = { path = "../../subsystem-util" } +kvdb = "0.9.0" +thiserror = "1.0.23" +parity-scale-codec = "2" + +[dev-dependencies] +polkadot-node-subsystem-test-helpers = { path = "../../subsystem-test-helpers" } +sp-core = { git = "https://github.com/paritytech/substrate", branch = "master" } +parking_lot = "0.11" +assert_matches = "1" diff --git a/node/core/chain-selection/src/backend.rs b/node/core/chain-selection/src/backend.rs new file mode 100644 index 000000000000..160825b757e7 --- /dev/null +++ b/node/core/chain-selection/src/backend.rs @@ -0,0 +1,235 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! An abstraction over storage used by the chain selection subsystem. +//! +//! This provides both a [`Backend`] trait and an [`OverlayedBackend`] +//! struct which allows in-memory changes to be applied on top of a +//! [`Backend`], maintaining consistency between queries and temporary writes, +//! before any commit to the underlying storage is made. + +use polkadot_primitives::v1::{BlockNumber, Hash}; + +use std::collections::HashMap; + +use crate::{Error, LeafEntrySet, BlockEntry, Timestamp}; + +pub(super) enum BackendWriteOp { + WriteBlockEntry(BlockEntry), + WriteBlocksByNumber(BlockNumber, Vec), + WriteViableLeaves(LeafEntrySet), + WriteStagnantAt(Timestamp, Vec), + DeleteBlocksByNumber(BlockNumber), + DeleteBlockEntry(Hash), + DeleteStagnantAt(Timestamp), +} + +/// An abstraction over backend storage for the logic of this subsystem. +pub(super) trait Backend { + /// Load a block entry from the DB. + fn load_block_entry(&self, hash: &Hash) -> Result, Error>; + /// Load the active-leaves set. + fn load_leaves(&self) -> Result; + /// Load the stagnant list at the given timestamp. + fn load_stagnant_at(&self, timestamp: Timestamp) -> Result, Error>; + /// Load all stagnant lists up to and including the given unix timestamp + /// in ascending order. + fn load_stagnant_at_up_to(&self, up_to: Timestamp) + -> Result)>, Error>; + /// Load the earliest kept block number. + fn load_first_block_number(&self) -> Result, Error>; + /// Load blocks by number. + fn load_blocks_by_number(&self, number: BlockNumber) -> Result, Error>; + + /// Atomically write the list of operations, with later operations taking precedence over prior. + fn write(&mut self, ops: I) -> Result<(), Error> + where I: IntoIterator; +} + +/// An in-memory overlay over the backend. +/// +/// This maintains read-only access to the underlying backend, but can be +/// converted into a set of write operations which will, when written to +/// the underlying backend, give the same view as the state of the overlay. +pub(super) struct OverlayedBackend<'a, B: 'a> { + inner: &'a B, + + // `None` means 'deleted', missing means query inner. + block_entries: HashMap>, + // `None` means 'deleted', missing means query inner. + blocks_by_number: HashMap>>, + // 'None' means 'deleted', missing means query inner. + stagnant_at: HashMap>>, + // 'None' means query inner. + leaves: Option, +} + +impl<'a, B: 'a + Backend> OverlayedBackend<'a, B> { + pub(super) fn new(backend: &'a B) -> Self { + OverlayedBackend { + inner: backend, + block_entries: HashMap::new(), + blocks_by_number: HashMap::new(), + stagnant_at: HashMap::new(), + leaves: None, + } + } + + pub(super) fn load_block_entry(&self, hash: &Hash) -> Result, Error> { + if let Some(val) = self.block_entries.get(&hash) { + return Ok(val.clone()) + } + + self.inner.load_block_entry(hash) + } + + pub(super) fn load_blocks_by_number(&self, number: BlockNumber) -> Result, Error> { + if let Some(val) = self.blocks_by_number.get(&number) { + return Ok(val.as_ref().map_or(Vec::new(), Clone::clone)); + } + + self.inner.load_blocks_by_number(number) + } + + pub(super) fn load_leaves(&self) -> Result { + if let Some(ref set) = self.leaves { + return Ok(set.clone()) + } + + self.inner.load_leaves() + } + + pub(super) fn load_stagnant_at(&self, timestamp: Timestamp) -> Result, Error> { + if let Some(val) = self.stagnant_at.get(×tamp) { + return Ok(val.as_ref().map_or(Vec::new(), Clone::clone)); + } + + self.inner.load_stagnant_at(timestamp) + } + + pub(super) fn write_block_entry(&mut self, entry: BlockEntry) { + self.block_entries.insert(entry.block_hash, Some(entry)); + } + + pub(super) fn delete_block_entry(&mut self, hash: &Hash) { + self.block_entries.insert(*hash, None); + } + + pub(super) fn write_blocks_by_number(&mut self, number: BlockNumber, blocks: Vec) { + if blocks.is_empty() { + self.blocks_by_number.insert(number, None); + } else { + self.blocks_by_number.insert(number, Some(blocks)); + } + } + + pub(super) fn delete_blocks_by_number(&mut self, number: BlockNumber) { + self.blocks_by_number.insert(number, None); + } + + pub(super) fn write_leaves(&mut self, leaves: LeafEntrySet) { + self.leaves = Some(leaves); + } + + pub(super) fn write_stagnant_at(&mut self, timestamp: Timestamp, hashes: Vec) { + self.stagnant_at.insert(timestamp, Some(hashes)); + } + + pub(super) fn delete_stagnant_at(&mut self, timestamp: Timestamp) { + self.stagnant_at.insert(timestamp, None); + } + + /// Transform this backend into a set of write-ops to be written to the + /// inner backend. + pub(super) fn into_write_ops(self) -> impl Iterator { + let block_entry_ops = self.block_entries.into_iter().map(|(h, v)| match v { + Some(v) => BackendWriteOp::WriteBlockEntry(v), + None => BackendWriteOp::DeleteBlockEntry(h), + }); + + let blocks_by_number_ops = self.blocks_by_number.into_iter().map(|(n, v)| match v { + Some(v) => BackendWriteOp::WriteBlocksByNumber(n, v), + None => BackendWriteOp::DeleteBlocksByNumber(n), + }); + + let leaf_ops = self.leaves.into_iter().map(BackendWriteOp::WriteViableLeaves); + + let stagnant_at_ops = self.stagnant_at.into_iter().map(|(n, v)| match v { + Some(v) => BackendWriteOp::WriteStagnantAt(n, v), + None => BackendWriteOp::DeleteStagnantAt(n), + }); + + block_entry_ops + .chain(blocks_by_number_ops) + .chain(leaf_ops) + .chain(stagnant_at_ops) + } +} + +/// Attempt to find the given ancestor in the chain with given head. +/// +/// If the ancestor is the most recently finalized block, and the `head` is +/// a known unfinalized block, this will return `true`. +/// +/// If the ancestor is an unfinalized block and `head` is known, this will +/// return true if `ancestor` is in `head`'s chain. +/// +/// If the ancestor is an older finalized block, this will return `false`. +fn contains_ancestor( + backend: &impl Backend, + head: Hash, + ancestor: Hash, +) -> Result { + let mut current_hash = head; + loop { + if current_hash == ancestor { return Ok(true) } + match backend.load_block_entry(¤t_hash)? { + Some(e) => { current_hash = e.parent_hash } + None => break + } + } + + Ok(false) +} + +/// This returns the best unfinalized leaf containing the required block. +/// +/// If the required block is finalized but not the most recent finalized block, +/// this will return `None`. +/// +/// If the required block is unfinalized but not an ancestor of any viable leaf, +/// this will return `None`. +// +// Note: this is O(N^2) in the depth of `required` and the number of leaves. +// We expect the number of unfinalized blocks to be small, as in, to not exceed +// single digits in practice, and exceedingly unlikely to surpass 1000. +// +// However, if we need to, we could implement some type of skip-list for +// fast ancestry checks. +pub(super) fn find_best_leaf_containing( + backend: &impl Backend, + required: Hash, +) -> Result, Error> { + let leaves = backend.load_leaves()?; + for leaf in leaves.into_hashes_descending() { + if contains_ancestor(backend, leaf, required)? { + return Ok(Some(leaf)) + } + } + + // If there are no viable leaves containing the ancestor + Ok(None) +} diff --git a/node/core/chain-selection/src/lib.rs b/node/core/chain-selection/src/lib.rs new file mode 100644 index 000000000000..dddfc2590d33 --- /dev/null +++ b/node/core/chain-selection/src/lib.rs @@ -0,0 +1,574 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Implements the Chain Selection Subsystem. + +use polkadot_primitives::v1::{BlockNumber, Hash, Header, ConsensusLog}; +use polkadot_node_primitives::BlockWeight; +use polkadot_subsystem::{ + Subsystem, SubsystemContext, SubsystemError, SpawnedSubsystem, + OverseerSignal, FromOverseer, + messages::{ChainSelectionMessage, ChainApiMessage}, + errors::ChainApiError, +}; + +use parity_scale_codec::Error as CodecError; +use futures::channel::oneshot; +use futures::prelude::*; + +use std::time::{UNIX_EPOCH, SystemTime}; + +use crate::backend::{Backend, OverlayedBackend, BackendWriteOp}; + +mod backend; +mod tree; + +#[cfg(test)] +mod tests; + +const LOG_TARGET: &str = "parachain::chain-selection"; +/// Timestamp based on the 1 Jan 1970 UNIX base, which is persistent across node restarts and OS reboots. +type Timestamp = u64; + +#[derive(Debug, Clone)] +enum Approval { + // Approved + Approved, + // Unapproved but not stagnant + Unapproved, + // Unapproved and stagnant. + Stagnant, +} + +impl Approval { + fn is_stagnant(&self) -> bool { + matches!(*self, Approval::Stagnant) + } +} + +#[derive(Debug, Clone)] +struct ViabilityCriteria { + // Whether this block has been explicitly reverted by one of its descendants. + explicitly_reverted: bool, + // The approval state of this block specifically. + approval: Approval, + // The earliest unviable ancestor - the hash of the earliest unfinalized + // block in the ancestry which is explicitly reverted or stagnant. + earliest_unviable_ancestor: Option, +} + +impl ViabilityCriteria { + fn is_viable(&self) -> bool { + self.is_parent_viable() && self.is_explicitly_viable() + } + + // Whether the current block is explicitly viable. + // That is, whether the current block is neither reverted nor stagnant. + fn is_explicitly_viable(&self) -> bool { + !self.explicitly_reverted && !self.approval.is_stagnant() + } + + // Whether the parent is viable. This assumes that the parent + // descends from the finalized chain. + fn is_parent_viable(&self) -> bool { + self.earliest_unviable_ancestor.is_none() + } +} + +// Light entries describing leaves of the chain. +// +// These are ordered first by weight and then by block number. +#[derive(Debug, Clone, PartialEq)] +struct LeafEntry { + weight: BlockWeight, + block_number: BlockNumber, + block_hash: Hash, +} + +impl PartialOrd for LeafEntry { + fn partial_cmp(&self, other: &Self) -> Option { + let ord = self.weight.cmp(&other.weight) + .then(self.block_number.cmp(&other.block_number)); + + if !matches!(ord, std::cmp::Ordering::Equal) { Some(ord) } else { None } + } +} + +#[derive(Debug, Default, Clone)] +struct LeafEntrySet { + inner: Vec +} + +impl LeafEntrySet { + fn remove(&mut self, hash: &Hash) -> bool { + match self.inner.iter().position(|e| &e.block_hash == hash) { + None => false, + Some(i) => { + self.inner.remove(i); + true + } + } + } + + fn insert(&mut self, new: LeafEntry) { + let mut pos = None; + for (i, e) in self.inner.iter().enumerate() { + if e == &new { return } + if e < &new { + pos = Some(i); + break + } + } + + match pos { + None => self.inner.push(new), + Some(i) => self.inner.insert(i, new), + } + } + + fn into_hashes_descending(self) -> impl Iterator { + self.inner.into_iter().map(|e| e.block_hash) + } +} + +#[derive(Debug, Clone)] +struct BlockEntry { + block_hash: Hash, + block_number: BlockNumber, + parent_hash: Hash, + children: Vec, + viability: ViabilityCriteria, + weight: BlockWeight, +} + +impl BlockEntry { + fn leaf_entry(&self) -> LeafEntry { + LeafEntry { + block_hash: self.block_hash, + block_number: self.block_number, + weight: self.weight, + } + } + + fn non_viable_ancestor_for_child(&self) -> Option { + if self.viability.is_viable() { + None + } else { + self.viability.earliest_unviable_ancestor.or(Some(self.block_hash)) + } + } +} + +#[derive(Debug, thiserror::Error)] +#[allow(missing_docs)] +pub enum Error { + #[error(transparent)] + ChainApi(#[from] ChainApiError), + + #[error(transparent)] + Io(#[from] std::io::Error), + + #[error(transparent)] + Oneshot(#[from] oneshot::Canceled), + + #[error(transparent)] + Subsystem(#[from] SubsystemError), + + #[error(transparent)] + Codec(#[from] CodecError), +} + +impl Error { + fn trace(&self) { + match self { + // don't spam the log with spurious errors + Self::Oneshot(_) => tracing::debug!(target: LOG_TARGET, err = ?self), + // it's worth reporting otherwise + _ => tracing::warn!(target: LOG_TARGET, err = ?self), + } + } +} + +fn timestamp_now() -> Timestamp { + // `SystemTime` is notoriously non-monotonic, so our timers might not work + // exactly as expected. Regardless, stagnation is detected on the order of minutes, + // and slippage of a few seconds in either direction won't cause any major harm. + // + // The exact time that a block becomes stagnant in the local node is always expected + // to differ from other nodes due to network asynchrony and delays in block propagation. + // Non-monotonicity exarcerbates that somewhat, but not meaningfully. + + match SystemTime::now().duration_since(UNIX_EPOCH) { + Ok(d) => d.as_secs(), + Err(e) => { + tracing::warn!( + target: LOG_TARGET, + err = ?e, + "Current time is before unix epoch. Validation will not work correctly." + ); + + 0 + } + } +} + +fn stagnant_timeout_from_now() -> Timestamp { + // If a block isn't approved in 120 seconds, nodes will abandon it + // and begin building on another chain. + const STAGNANT_TIMEOUT: Timestamp = 120; + + timestamp_now() + STAGNANT_TIMEOUT +} + +// TODO https://github.com/paritytech/polkadot/issues/3293: +// +// This is used just so we can have a public function that calls +// `run` and eliminates all the unused errors. +// +// Should be removed when the real implementation is done. +struct VoidBackend; + +impl Backend for VoidBackend { + fn load_block_entry(&self, _: &Hash) -> Result, Error> { + Ok(None) + } + fn load_leaves(&self) -> Result { + Ok(LeafEntrySet::default()) + } + fn load_stagnant_at(&self, _: Timestamp) -> Result, Error> { + Ok(Vec::new()) + } + fn load_stagnant_at_up_to(&self, _: Timestamp) + -> Result)>, Error> + { + Ok(Vec::new()) + } + fn load_first_block_number(&self) -> Result, Error> { + Ok(None) + } + fn load_blocks_by_number(&self, _: BlockNumber) -> Result, Error> { + Ok(Vec::new()) + } + + fn write(&mut self, _: I) -> Result<(), Error> + where I: IntoIterator + { + Ok(()) + } +} + +/// The chain selection subsystem. +pub struct ChainSelectionSubsystem; + +impl Subsystem for ChainSelectionSubsystem + where Context: SubsystemContext +{ + fn start(self, ctx: Context) -> SpawnedSubsystem { + let backend = VoidBackend; + SpawnedSubsystem { + future: run(ctx, backend).map(|()| Ok(())).boxed(), + name: "chain-selection-subsystem", + } + } +} + +async fn run(mut ctx: Context, mut backend: B) + where + Context: SubsystemContext, + B: Backend, +{ + loop { + let res = run_iteration(&mut ctx, &mut backend).await; + match res { + Err(e) => { + e.trace(); + + if let Error::Subsystem(SubsystemError::Context(_)) = e { + break; + } + } + Ok(()) => { + tracing::info!(target: LOG_TARGET, "received `Conclude` signal, exiting"); + break; + } + } + } +} + +// Run the subsystem until an error is encountered or a `conclude` signal is received. +// Most errors are non-fatal and should lead to another call to this function. +// +// A return value of `Ok` indicates that an exit should be made, while non-fatal errors +// lead to another call to this function. +async fn run_iteration(ctx: &mut Context, backend: &mut B) + -> Result<(), Error> + where + Context: SubsystemContext, + B: Backend, +{ + // TODO https://github.com/paritytech/polkadot/issues/3293: Add stagnant checking timer loop. + loop { + match ctx.recv().await? { + FromOverseer::Signal(OverseerSignal::Conclude) => { + return Ok(()) + } + FromOverseer::Signal(OverseerSignal::ActiveLeaves(update)) => { + for leaf in update.activated { + let write_ops = handle_active_leaf( + ctx, + &*backend, + leaf.hash, + ).await?; + + backend.write(write_ops)?; + } + } + FromOverseer::Signal(OverseerSignal::BlockFinalized(h, n)) => { + handle_finalized_block(backend, h, n)? + } + FromOverseer::Communication { msg } => match msg { + ChainSelectionMessage::Approved(hash) => { + handle_approved_block(backend, hash)? + } + ChainSelectionMessage::Leaves(tx) => { + let leaves = load_leaves(ctx, &*backend).await?; + let _ = tx.send(leaves); + } + ChainSelectionMessage::BestLeafContaining(required, tx) => { + let best_containing = crate::backend::find_best_leaf_containing( + &*backend, + required, + )?; + + // note - this may be none if the finalized block is + // a leaf. this is fine according to the expected usage of the + // function. `None` responses should just `unwrap_or(required)`, + // so if the required block is the finalized block, then voilá. + + let _ = tx.send(best_containing); + } + } + }; + } +} + +async fn fetch_finalized( + ctx: &mut impl SubsystemContext, +) -> Result, Error> { + let (number_tx, number_rx) = oneshot::channel(); + let (hash_tx, hash_rx) = oneshot::channel(); + + ctx.send_message(ChainApiMessage::FinalizedBlockNumber(number_tx).into()).await; + + let number = number_rx.await??; + + ctx.send_message(ChainApiMessage::FinalizedBlockHash(number, hash_tx).into()).await; + + match hash_rx.await?? { + None => { + tracing::warn!( + target: LOG_TARGET, + number, + "Missing hash for finalized block number" + ); + + return Ok(None) + } + Some(h) => Ok(Some((h, number))) + } +} + +async fn fetch_header( + ctx: &mut impl SubsystemContext, + hash: Hash, +) -> Result, Error> { + let (h_tx, h_rx) = oneshot::channel(); + ctx.send_message(ChainApiMessage::BlockHeader(hash, h_tx).into()).await; + + h_rx.await?.map_err(Into::into) +} + +async fn fetch_block_weight( + ctx: &mut impl SubsystemContext, + hash: Hash, +) -> Result, Error> { + let (tx, rx) = oneshot::channel(); + ctx.send_message(ChainApiMessage::BlockWeight(hash, tx).into()).await; + + rx.await?.map_err(Into::into) +} + +// Handle a new active leaf. +async fn handle_active_leaf( + ctx: &mut impl SubsystemContext, + backend: &impl Backend, + hash: Hash, +) -> Result, Error> { + let lower_bound = match backend.load_first_block_number()? { + Some(l) => { + // We want to iterate back to finalized, and first block number + // is assumed to be 1 above finalized - the implicit root of the + // tree. + l.saturating_sub(1) + }, + None => fetch_finalized(ctx).await?.map_or(1, |(_, n)| n), + }; + + let header = match fetch_header(ctx, hash).await? { + None => { + tracing::warn!( + target: LOG_TARGET, + ?hash, + "Missing header for new head", + ); + return Ok(Vec::new()) + } + Some(h) => h, + }; + + let new_blocks = polkadot_node_subsystem_util::determine_new_blocks( + ctx.sender(), + |h| backend.load_block_entry(h).map(|b| b.is_some()), + hash, + &header, + lower_bound, + ).await?; + + let mut overlay = OverlayedBackend::new(backend); + + // determine_new_blocks gives blocks in descending order. + // for this, we want ascending order. + for (hash, header) in new_blocks.into_iter().rev() { + let weight = match fetch_block_weight(ctx, hash).await? { + None => { + tracing::warn!( + target: LOG_TARGET, + ?hash, + "Missing block weight for new head. Skipping chain.", + ); + + // If we don't know the weight, we can't import the block. + // And none of its descendents either. + break; + } + Some(w) => w, + }; + + let reversion_logs = extract_reversion_logs(&header); + crate::tree::import_block( + &mut overlay, + hash, + header.number, + header.parent_hash, + reversion_logs, + weight, + )?; + } + + Ok(overlay.into_write_ops().collect()) +} + +// Extract all reversion logs from a header in ascending order. +// +// Ignores logs with number >= the block header number. +fn extract_reversion_logs(header: &Header) -> Vec { + let number = header.number; + let mut logs = header.digest.logs() + .iter() + .enumerate() + .filter_map(|(i, d)| match ConsensusLog::from_digest_item(d) { + Err(e) => { + tracing::warn!( + target: LOG_TARGET, + err = ?e, + index = i, + block_hash = ?header.hash(), + "Digest item failed to encode" + ); + + None + } + Ok(Some(ConsensusLog::Revert(b))) if b < number => Some(b), + Ok(Some(ConsensusLog::Revert(b))) => { + tracing::warn!( + target: LOG_TARGET, + revert_target = b, + block_number = number, + block_hash = ?header.hash(), + "Block issued invalid revert digest targeting itself or future" + ); + + None + } + Ok(_) => None, + }) + .collect::>(); + + logs.sort(); + + logs +} + +// Handle a finalized block event. +fn handle_finalized_block( + backend: &mut impl Backend, + finalized_hash: Hash, + finalized_number: BlockNumber, +) -> Result<(), Error> { + let ops = crate::tree::finalize_block( + &*backend, + finalized_hash, + finalized_number, + )?.into_write_ops(); + + backend.write(ops) +} + +// Handle an approved block event. +fn handle_approved_block( + backend: &mut impl Backend, + approved_block: Hash, +) -> Result<(), Error> { + let ops = { + let mut overlay = OverlayedBackend::new(&*backend); + + crate::tree::approve_block( + &mut overlay, + approved_block, + )?; + + overlay.into_write_ops() + }; + + backend.write(ops) +} + +// Load the leaves from the backend. If there are no leaves, then return +// the finalized block. +async fn load_leaves( + ctx: &mut impl SubsystemContext, + backend: &impl Backend, +) -> Result, Error> { + let leaves: Vec<_> = backend.load_leaves()? + .into_hashes_descending() + .collect(); + + if leaves.is_empty() { + Ok(fetch_finalized(ctx).await?.map_or(Vec::new(), |(h, _)| vec![h])) + } else { + Ok(leaves) + } +} diff --git a/node/core/chain-selection/src/tests.rs b/node/core/chain-selection/src/tests.rs new file mode 100644 index 000000000000..945578a47e6e --- /dev/null +++ b/node/core/chain-selection/src/tests.rs @@ -0,0 +1,1909 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Tests for the subsystem. +//! +//! These primarily revolve around having a backend which is shared between +//! both the test code and the tested subsystem, and which also gives the +//! test code the ability to wait for write operations to occur. + +use super::*; +use std::collections::{HashMap, HashSet, BTreeMap}; +use std::sync::Arc; + +use futures::channel::oneshot; +use parity_scale_codec::Encode; +use parking_lot::Mutex; +use sp_core::testing::TaskExecutor; +use assert_matches::assert_matches; + +use polkadot_primitives::v1::{BlakeTwo256, HashT, ConsensusLog}; +use polkadot_subsystem::{jaeger, ActiveLeavesUpdate, ActivatedLeaf, LeafStatus}; +use polkadot_subsystem::messages::AllMessages; +use polkadot_node_subsystem_test_helpers as test_helpers; + +#[derive(Default)] +struct TestBackendInner { + leaves: LeafEntrySet, + block_entries: HashMap, + blocks_by_number: BTreeMap>, + stagnant_at: BTreeMap>, + // earlier wakers at the back. + write_wakers: Vec>, +} + +#[derive(Clone)] +struct TestBackend { + inner: Arc>, +} + +impl TestBackend { + // Yields a receiver which will be woken up on some future write + // to the backend along with its position (starting at 0) in the + // queue. + // + // Our tests assume that there is only one task calling this function + // and the index is useful to get a waker that will trigger after + // some known amount of writes to the backend that happen internally + // inside the subsystem. + // + // It's important to call this function at points where no writes + // are pending to the backend. This requires knowing some details + // about the internals of the subsystem, so the abstraction leaks + // somewhat, but this is acceptable enough. + fn await_next_write(&self) -> (usize, oneshot::Receiver<()>) { + let (tx, rx) = oneshot::channel(); + + let mut inner = self.inner.lock(); + let pos = inner.write_wakers.len(); + inner.write_wakers.insert(0, tx); + + (pos, rx) + } + + // Assert the backend contains only the given blocks and no others. + // This does not check the stagnant_at mapping because that is + // pruned lazily by the subsystem as opposed to eagerly. + fn assert_contains_only( + &self, + blocks: Vec<(BlockNumber, Hash)>, + ) { + let hashes: Vec<_> = blocks.iter().map(|(_, h)| *h).collect(); + let mut by_number: HashMap<_, HashSet<_>> = HashMap::new(); + + for (number, hash) in blocks { + by_number.entry(number).or_default().insert(hash); + } + + let inner = self.inner.lock(); + assert_eq!(inner.block_entries.len(), hashes.len()); + assert_eq!(inner.blocks_by_number.len(), by_number.len()); + + for leaf in inner.leaves.clone().into_hashes_descending() { + assert!(hashes.contains(&leaf)); + } + + for (number, hashes_at_number) in by_number { + let at = inner.blocks_by_number.get(&number).unwrap(); + for hash in at { + assert!(hashes_at_number.contains(&hash)); + } + } + } +} + +impl Default for TestBackend { + fn default() -> Self { + TestBackend { + inner: Default::default(), + } + } +} + +impl Backend for TestBackend { + fn load_block_entry(&self, hash: &Hash) -> Result, Error> { + Ok(self.inner.lock().block_entries.get(hash).map(|e| e.clone())) + } + fn load_leaves(&self) -> Result { + Ok(self.inner.lock().leaves.clone()) + } + fn load_stagnant_at(&self, timestamp: Timestamp) -> Result, Error> { + Ok(self.inner.lock().stagnant_at.get(×tamp).map_or(Vec::new(), |s| s.clone())) + } + fn load_stagnant_at_up_to(&self, up_to: Timestamp) + -> Result)>, Error> + { + Ok(self.inner.lock().stagnant_at.range(..=up_to).map(|(t, v)| (*t, v.clone())).collect()) + } + fn load_first_block_number(&self) -> Result, Error> { + Ok(self.inner.lock().blocks_by_number.range(..).map(|(k, _)| *k).next()) + } + fn load_blocks_by_number(&self, number: BlockNumber) -> Result, Error> { + Ok(self.inner.lock().blocks_by_number.get(&number).map_or(Vec::new(), |v| v.clone())) + } + + fn write(&mut self, ops: I) -> Result<(), Error> + where I: IntoIterator + { + let mut inner = self.inner.lock(); + + for op in ops { + match op { + BackendWriteOp::WriteBlockEntry(entry) => { + inner.block_entries.insert(entry.block_hash, entry); + } + BackendWriteOp::WriteBlocksByNumber(number, hashes) => { + inner.blocks_by_number.insert(number, hashes); + } + BackendWriteOp::WriteViableLeaves(leaves) => { + inner.leaves = leaves; + } + BackendWriteOp::WriteStagnantAt(time, hashes) => { + inner.stagnant_at.insert(time, hashes); + } + BackendWriteOp::DeleteBlocksByNumber(number) => { + inner.blocks_by_number.remove(&number); + } + BackendWriteOp::DeleteBlockEntry(hash) => { + inner.block_entries.remove(&hash); + } + BackendWriteOp::DeleteStagnantAt(time) => { + inner.stagnant_at.remove(&time); + } + } + } + + if let Some(waker) = inner.write_wakers.pop() { + let _ = waker.send(()); + } + Ok(()) + } +} + +type VirtualOverseer = test_helpers::TestSubsystemContextHandle; + +fn test_harness>( + test: impl FnOnce(TestBackend, VirtualOverseer) -> T +) { + let pool = TaskExecutor::new(); + let (context, virtual_overseer) = test_helpers::make_subsystem_context(pool); + + let backend = TestBackend::default(); + let subsystem = crate::run(context, backend.clone()); + + let test_fut = test(backend, virtual_overseer); + let test_and_conclude = async move { + let mut virtual_overseer = test_fut.await; + virtual_overseer.send(OverseerSignal::Conclude.into()).await; + + // Ensure no messages are pending when the subsystem shuts down. + assert!(virtual_overseer.try_recv().await.is_none()); + }; + futures::executor::block_on(futures::future::join(subsystem, test_and_conclude)); +} + +// Answer requests from the subsystem about the finalized block. +async fn answer_finalized_block_info( + overseer: &mut VirtualOverseer, + finalized_number: BlockNumber, + finalized_hash: Hash, +) { + assert_matches!( + overseer.recv().await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockNumber(tx)) => { + let _ = tx.send(Ok(finalized_number)); + } + ); + + assert_matches!( + overseer.recv().await, + AllMessages::ChainApi(ChainApiMessage::FinalizedBlockHash(n, tx)) => { + assert_eq!(n, finalized_number); + let _ = tx.send(Ok(Some(finalized_hash))); + } + ); +} + +async fn answer_header_request( + overseer: &mut VirtualOverseer, + maybe_header: impl Into>, +) { + assert_matches!( + overseer.recv().await, + AllMessages::ChainApi(ChainApiMessage::BlockHeader(hash, tx)) => { + let maybe_header = maybe_header.into(); + assert!(maybe_header.as_ref().map_or(true, |h| h.hash() == hash)); + let _ = tx.send(Ok(maybe_header)); + } + ) +} + +async fn answer_weight_request( + overseer: &mut VirtualOverseer, + hash: Hash, + weight: impl Into>, +) { + assert_matches!( + overseer.recv().await, + AllMessages::ChainApi(ChainApiMessage::BlockWeight(h, tx)) => { + assert_eq!(h, hash); + let _ = tx.send(Ok(weight.into())); + } + ) +} + +fn child_header(parent_number: BlockNumber, parent_hash: Hash) -> Header { + Header { + parent_hash, + number: parent_number + 1, + state_root: Default::default(), + extrinsics_root: Default::default(), + digest: Default::default() + } +} + +fn salt_header(header: &mut Header, salt: impl Encode) { + header.state_root = BlakeTwo256::hash_of(&salt) +} + +fn add_reversions( + header: &mut Header, + reversions: impl IntoIterator, +) { + for log in reversions.into_iter().map(ConsensusLog::Revert) { + header.digest.logs.push(log.into()) + } +} + +// Builds a chain on top of the given base, with one block for each +// provided weight. +fn construct_chain_on_base( + weights: impl IntoIterator, + base_number: BlockNumber, + base_hash: Hash, + mut mutate: impl FnMut(&mut Header), +) -> (Hash, Vec<(Header, BlockWeight)>) { + let mut parent_number = base_number; + let mut parent_hash = base_hash; + + let mut chain = Vec::new(); + for weight in weights { + let mut header = child_header(parent_number, parent_hash); + mutate(&mut header); + + parent_number = header.number; + parent_hash = header.hash(); + chain.push((header, weight)); + } + + (parent_hash, chain) +} + +// import blocks 1-by-1. If `finalized_base` is supplied, +// it will be answered before the first block in `answers. +async fn import_blocks_into( + virtual_overseer: &mut VirtualOverseer, + backend: &TestBackend, + mut finalized_base: Option<(BlockNumber, Hash)>, + blocks: Vec<(Header, BlockWeight)>, +) { + for (header, weight) in blocks { + let (_, write_rx) = backend.await_next_write(); + + let hash = header.hash(); + virtual_overseer.send(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work( + ActivatedLeaf { + hash, + number: header.number, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + } + )).into()).await; + + if let Some((f_n, f_h)) = finalized_base.take() { + answer_finalized_block_info(virtual_overseer, f_n, f_h).await; + } + + answer_header_request(virtual_overseer, header.clone()).await; + answer_weight_request(virtual_overseer, hash, weight).await; + + write_rx.await.unwrap(); + } +} + +async fn import_chains_into_empty( + virtual_overseer: &mut VirtualOverseer, + backend: &TestBackend, + finalized_number: BlockNumber, + finalized_hash: Hash, + chains: Vec>, +) { + for (i, chain)in chains.into_iter().enumerate() { + let finalized_base = Some((finalized_number, finalized_hash)).filter(|_| i == 0); + import_blocks_into( + virtual_overseer, + backend, + finalized_base, + chain, + ).await; + } +} + +// Import blocks all at once. This assumes that the ancestor is known/finalized +// but none of the other blocks. +// import blocks 1-by-1. If `finalized_base` is supplied, +// it will be answered before the first block. +// +// some pre-blocks may need to be supplied to answer ancestry requests +// that gather batches beyond the beginning of the new chain. +// pre-blocks are those already known by the subsystem, however, +// the subsystem has no way of knowin that until requesting ancestry. +async fn import_all_blocks_into( + virtual_overseer: &mut VirtualOverseer, + backend: &TestBackend, + finalized_base: Option<(BlockNumber, Hash)>, + pre_blocks: Vec
, + blocks: Vec<(Header, BlockWeight)>, +) { + assert!(blocks.len() > 1, "gap only makes sense if importing multiple blocks"); + + let head = blocks.last().unwrap().0.clone(); + let head_hash = head.hash(); + + let (_, write_rx) = backend.await_next_write(); + virtual_overseer.send(OverseerSignal::ActiveLeaves(ActiveLeavesUpdate::start_work( + ActivatedLeaf { + hash: head_hash, + number: head.number, + status: LeafStatus::Fresh, + span: Arc::new(jaeger::Span::Disabled), + } + )).into()).await; + + if let Some((f_n, f_h)) = finalized_base { + answer_finalized_block_info(virtual_overseer, f_n, f_h).await; + } + + // Head is always fetched first. + answer_header_request(virtual_overseer, head).await; + + // Answer header and ancestry requests until the parent of head + // is imported. + { + let find_block_header = |expected_hash| { + pre_blocks.iter().cloned() + .chain(blocks.iter().map(|(h, _)| h.clone())) + .find(|hdr| hdr.hash() == expected_hash) + .unwrap() + }; + + let mut behind_head = 0; + loop { + let nth_ancestor_of_head = |n: usize| { + // blocks: [d, e, f, head] + // pre: [a, b, c] + // + // [a, b, c, d, e, f, head] + // [6, 5, 4, 3, 2, 1, 0] + + let new_ancestry_end = blocks.len() - 1; + if n > new_ancestry_end { + // [6, 5, 4] -> [2, 1, 0] + let n_in_pre = n - blocks.len(); + let pre_blocks_end = pre_blocks.len() - 1; + pre_blocks[pre_blocks_end - n_in_pre].clone() + } else { + let blocks_end = blocks.len() - 1; + blocks[blocks_end - n].0.clone() + } + }; + + match virtual_overseer.recv().await { + AllMessages::ChainApi(ChainApiMessage::Ancestors { + hash: h, + k, + response_channel: tx, + }) => { + let prev_response = nth_ancestor_of_head(behind_head); + assert_eq!(h, prev_response.hash()); + + let _ = tx.send(Ok( + (0..k as usize).map(|n| n + behind_head + 1) + .map(nth_ancestor_of_head) + .map(|h| h.hash()) + .collect() + )); + + for _ in 0..k { + assert_matches!( + virtual_overseer.recv().await, + AllMessages::ChainApi(ChainApiMessage::BlockHeader(h, tx)) => { + let header = find_block_header(h); + let _ = tx.send(Ok(Some(header))); + } + ) + } + + behind_head = behind_head + k as usize; + } + AllMessages::ChainApi(ChainApiMessage::BlockHeader(h, tx)) => { + let header = find_block_header(h); + let _ = tx.send(Ok(Some(header))); + + // Assuming that `determine_new_blocks` uses these + // instead of ancestry: 1. + behind_head += 1; + } + AllMessages::ChainApi(ChainApiMessage::BlockWeight(h, tx)) => { + let (_, weight) = blocks.iter().find(|(hdr, _)| hdr.hash() == h).unwrap(); + let _ = tx.send(Ok(Some(*weight))); + + // Last weight has been returned. Time to go. + if h == head_hash { break } + } + _ => panic!("unexpected message"), + } + } + } + write_rx.await.unwrap(); +} + +async fn finalize_block( + virtual_overseer: &mut VirtualOverseer, + backend: &TestBackend, + block_number: BlockNumber, + block_hash: Hash, +) { + let (_, write_tx) = backend.await_next_write(); + + virtual_overseer.send( + OverseerSignal::BlockFinalized(block_hash, block_number).into() + ).await; + + write_tx.await.unwrap(); +} + +fn extract_info_from_chain(i: usize, chain: &[(Header, BlockWeight)]) + -> (BlockNumber, Hash, BlockWeight) +{ + let &(ref header, weight) = &chain[i]; + + (header.number, header.hash(), weight) +} + +fn assert_backend_contains<'a>( + backend: &TestBackend, + headers: impl IntoIterator, +) { + for header in headers { + let hash = header.hash(); + assert!( + backend.load_blocks_by_number(header.number).unwrap().contains(&hash), + "blocks at {} does not contain {}", + header.number, + hash, + ); + assert!( + backend.load_block_entry(&hash).unwrap().is_some(), + "no entry found for {}", + hash, + ); + } +} + +fn assert_backend_contains_chains( + backend: &TestBackend, + chains: Vec>, +) { + for chain in chains { + assert_backend_contains( + backend, + chain.iter().map(|&(ref hdr, _)| hdr) + ) + } +} + +fn assert_leaves( + backend: &TestBackend, + leaves: Vec, +) { + assert_eq!( + backend.load_leaves().unwrap().into_hashes_descending().into_iter().collect::>(), + leaves, + ); +} + +async fn assert_leaves_query( + virtual_overseer: &mut VirtualOverseer, + leaves: Vec, +) { + assert!(!leaves.is_empty(), "empty leaves impossible. answer finalized query"); + + let (tx, rx) = oneshot::channel(); + virtual_overseer.send(FromOverseer::Communication { + msg: ChainSelectionMessage::Leaves(tx) + }).await; + + assert_eq!(rx.await.unwrap(), leaves); +} + +async fn assert_finalized_leaves_query( + virtual_overseer: &mut VirtualOverseer, + finalized_number: BlockNumber, + finalized_hash: Hash, +) { + let (tx, rx) = oneshot::channel(); + virtual_overseer.send(FromOverseer::Communication { + msg: ChainSelectionMessage::Leaves(tx) + }).await; + + answer_finalized_block_info(virtual_overseer, finalized_number, finalized_hash).await; + + assert_eq!(rx.await.unwrap(), vec![finalized_hash]); +} + +async fn best_leaf_containing( + virtual_overseer: &mut VirtualOverseer, + required: Hash, +) -> Option { + let (tx, rx) = oneshot::channel(); + virtual_overseer.send(FromOverseer::Communication { + msg: ChainSelectionMessage::BestLeafContaining(required, tx) + }).await; + + rx.await.unwrap() +} + +async fn approve_block( + virtual_overseer: &mut VirtualOverseer, + backend: &TestBackend, + approved: Hash, +) { + let (_, write_rx) = backend.await_next_write(); + virtual_overseer.send(FromOverseer::Communication { + msg: ChainSelectionMessage::Approved(approved) + }).await; + + write_rx.await.unwrap() +} + +#[test] +fn no_op_subsystem_run() { + test_harness(|_, virtual_overseer| async move { virtual_overseer }); +} + +#[test] +fn import_direct_child_of_finalized_on_empty() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + let child = child_header(finalized_number, finalized_hash); + let child_hash = child.hash(); + let child_weight = 1; + let child_number = child.number; + + import_blocks_into( + &mut virtual_overseer, + &backend, + Some((finalized_number, finalized_hash)), + vec![(child.clone(), child_weight)], + ).await; + + assert_eq!(backend.load_first_block_number().unwrap().unwrap(), child_number); + assert_backend_contains(&backend, &[child]); + assert_leaves(&backend, vec![child_hash]); + assert_leaves_query(&mut virtual_overseer, vec![child_hash]).await; + + virtual_overseer + }) +} + +#[test] +fn import_chain_on_finalized_incrementally() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + let (head_hash, chain) = construct_chain_on_base( + vec![1, 2, 3, 4, 5], + finalized_number, + finalized_hash, + |_| {} + ); + + import_blocks_into( + &mut virtual_overseer, + &backend, + Some((finalized_number, finalized_hash)), + chain.clone(), + ).await; + + assert_eq!(backend.load_first_block_number().unwrap().unwrap(), 1); + assert_backend_contains(&backend, chain.iter().map(|&(ref h, _)| h)); + assert_leaves(&backend, vec![head_hash]); + assert_leaves_query(&mut virtual_overseer, vec![head_hash]).await; + + virtual_overseer + }) +} + +#[test] +fn import_two_subtrees_on_finalized() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + let (a_hash, chain_a) = construct_chain_on_base( + vec![1], + finalized_number, + finalized_hash, + |_| {} + ); + + let (b_hash, chain_b) = construct_chain_on_base( + vec![2], + finalized_number, + finalized_hash, + |h| salt_header(h, b"b"), + ); + + import_blocks_into( + &mut virtual_overseer, + &backend, + Some((finalized_number, finalized_hash)), + chain_a.clone(), + ).await; + + import_blocks_into( + &mut virtual_overseer, + &backend, + None, + chain_b.clone(), + ).await; + + assert_eq!(backend.load_first_block_number().unwrap().unwrap(), 1); + assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_b.iter().map(|&(ref h, _)| h)); + assert_leaves(&backend, vec![b_hash, a_hash]); + assert_leaves_query(&mut virtual_overseer, vec![b_hash, a_hash]).await; + + virtual_overseer + }) +} + +#[test] +fn import_two_subtrees_on_nonzero_finalized() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 100; + let finalized_hash = Hash::repeat_byte(0); + + let (a_hash, chain_a) = construct_chain_on_base( + vec![1], + finalized_number, + finalized_hash, + |_| {} + ); + + let (b_hash, chain_b) = construct_chain_on_base( + vec![2], + finalized_number, + finalized_hash, + |h| salt_header(h, b"b"), + ); + + import_blocks_into( + &mut virtual_overseer, + &backend, + Some((finalized_number, finalized_hash)), + chain_a.clone(), + ).await; + + import_blocks_into( + &mut virtual_overseer, + &backend, + None, + chain_b.clone(), + ).await; + + assert_eq!(backend.load_first_block_number().unwrap().unwrap(), 101); + assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_b.iter().map(|&(ref h, _)| h)); + assert_leaves(&backend, vec![b_hash, a_hash]); + assert_leaves_query(&mut virtual_overseer, vec![b_hash, a_hash]).await; + + virtual_overseer + }) +} + +#[test] +fn leaves_ordered_by_weight_and_then_number() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3 + // A1 <- B2 + // F <- C1 <- C2 + // + // expected_leaves: [(C2, 3), (A3, 2), (B2, 2)] + + let (a3_hash, chain_a) = construct_chain_on_base( + vec![1, 1, 2], + finalized_number, + finalized_hash, + |_| {} + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + + let (b2_hash, chain_b) = construct_chain_on_base( + vec![2], + 1, + a1_hash, + |h| salt_header(h, b"b"), + ); + + let (c2_hash, chain_c) = construct_chain_on_base( + vec![1, 3], + finalized_number, + finalized_hash, + |h| salt_header(h, b"c"), + ); + + import_chains_into_empty( + &mut virtual_overseer, + &backend, + finalized_number, + finalized_hash, + vec![chain_a.clone(), chain_b.clone(), chain_c.clone()], + ).await; + + assert_eq!(backend.load_first_block_number().unwrap().unwrap(), 1); + assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_b.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_c.iter().map(|&(ref h, _)| h)); + assert_leaves(&backend, vec![c2_hash, a3_hash, b2_hash]); + assert_leaves_query(&mut virtual_overseer, vec![c2_hash, a3_hash, b2_hash]).await; + virtual_overseer + }); +} + +#[test] +fn subtrees_imported_even_with_gaps() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3 + // A2 <- B3 <- B4 <- B5 + + let (a3_hash, chain_a) = construct_chain_on_base( + vec![1, 2, 3], + finalized_number, + finalized_hash, + |_| {} + ); + + let (_, a2_hash, _) = extract_info_from_chain(1, &chain_a); + + let (b5_hash, chain_b) = construct_chain_on_base( + vec![4, 4, 5], + 2, + a2_hash, + |h| salt_header(h, b"b"), + ); + + import_all_blocks_into( + &mut virtual_overseer, + &backend, + Some((finalized_number, finalized_hash)), + Vec::new(), + chain_a.clone(), + ).await; + + import_all_blocks_into( + &mut virtual_overseer, + &backend, + None, + vec![chain_a[0].0.clone(), chain_a[1].0.clone()], + chain_b.clone(), + ).await; + + assert_eq!(backend.load_first_block_number().unwrap().unwrap(), 1); + assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_b.iter().map(|&(ref h, _)| h)); + assert_leaves(&backend, vec![b5_hash, a3_hash]); + assert_leaves_query(&mut virtual_overseer, vec![b5_hash, a3_hash]).await; + + virtual_overseer + }); +} + +#[test] +fn reversion_removes_viability_of_chain() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3. + // + // A3 reverts A1 + + let (_a3_hash, chain_a) = construct_chain_on_base( + vec![1, 2, 3], + finalized_number, + finalized_hash, + |h| if h.number == 3 { add_reversions(h, Some(1)) } + ); + + import_blocks_into( + &mut virtual_overseer, + &backend, + Some((finalized_number, finalized_hash)), + chain_a.clone(), + ).await; + + assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); + assert_leaves(&backend, vec![]); + assert_finalized_leaves_query( + &mut virtual_overseer, + finalized_number, + finalized_hash, + ).await; + + virtual_overseer + }); +} + +#[test] +fn reversion_removes_viability_and_finds_ancestor_as_leaf() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3. + // + // A3 reverts A2 + + let (_a3_hash, chain_a) = construct_chain_on_base( + vec![1, 2, 3], + finalized_number, + finalized_hash, + |h| if h.number == 3 { add_reversions(h, Some(2)) } + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + + import_blocks_into( + &mut virtual_overseer, + &backend, + Some((finalized_number, finalized_hash)), + chain_a.clone(), + ).await; + + assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); + assert_leaves(&backend, vec![a1_hash]); + assert_leaves_query(&mut virtual_overseer, vec![a1_hash]).await; + + virtual_overseer + }); +} + +#[test] +fn ancestor_of_unviable_is_not_leaf_if_has_children() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3. + // A1 <- B2 + // + // A3 reverts A2 + + let (a2_hash, chain_a) = construct_chain_on_base( + vec![1, 2], + finalized_number, + finalized_hash, + |_| {} + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + + let (_a3_hash, chain_a_ext) = construct_chain_on_base( + vec![3], + 2, + a2_hash, + |h| add_reversions(h, Some(2)), + ); + + let (b2_hash, chain_b) = construct_chain_on_base( + vec![1], + 1, + a1_hash, + |h| salt_header(h, b"b") + ); + + import_blocks_into( + &mut virtual_overseer, + &backend, + Some((finalized_number, finalized_hash)), + chain_a.clone(), + ).await; + + import_blocks_into( + &mut virtual_overseer, + &backend, + None, + chain_b.clone(), + ).await; + + assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_b.iter().map(|&(ref h, _)| h)); + assert_leaves(&backend, vec![a2_hash, b2_hash]); + + import_blocks_into( + &mut virtual_overseer, + &backend, + None, + chain_a_ext.clone(), + ).await; + + assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_a_ext.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_b.iter().map(|&(ref h, _)| h)); + assert_leaves(&backend, vec![b2_hash]); + assert_leaves_query(&mut virtual_overseer, vec![b2_hash]).await; + + virtual_overseer + }); +} + +#[test] +fn self_and_future_reversions_are_ignored() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3. + // + // A3 reverts itself and future blocks. ignored. + + let (a3_hash, chain_a) = construct_chain_on_base( + vec![1, 2, 3], + finalized_number, + finalized_hash, + |h| if h.number == 3 { add_reversions(h, vec![3, 4, 100]) } + ); + + import_blocks_into( + &mut virtual_overseer, + &backend, + Some((finalized_number, finalized_hash)), + chain_a.clone(), + ).await; + + assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); + assert_leaves(&backend, vec![a3_hash]); + assert_leaves_query(&mut virtual_overseer, vec![a3_hash]).await; + + virtual_overseer + }); +} + +#[test] +fn revert_finalized_is_ignored() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 10; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3. + // + // A3 reverts itself and future blocks. ignored. + + let (a3_hash, chain_a) = construct_chain_on_base( + vec![1, 2, 3], + finalized_number, + finalized_hash, + |h| if h.number == 13 { add_reversions(h, vec![10, 9, 8, 0, 1]) } + ); + + import_blocks_into( + &mut virtual_overseer, + &backend, + Some((finalized_number, finalized_hash)), + chain_a.clone(), + ).await; + + assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); + assert_leaves(&backend, vec![a3_hash]); + assert_leaves_query(&mut virtual_overseer, vec![a3_hash]).await; + + virtual_overseer + }); +} + +#[test] +fn reversion_affects_viability_of_all_subtrees() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3. + // A2 <- B3 <- B4 + // + // B4 reverts A2. + + let (a3_hash, chain_a) = construct_chain_on_base( + vec![1, 2, 3], + finalized_number, + finalized_hash, + |_| {} + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + let (_, a2_hash, _) = extract_info_from_chain(1, &chain_a); + + let (_b4_hash, chain_b) = construct_chain_on_base( + vec![3, 4], + 2, + a2_hash, + |h| { + salt_header(h, b"b"); + if h.number == 4 { + add_reversions(h, Some(2)); + } + } + ); + + import_blocks_into( + &mut virtual_overseer, + &backend, + Some((finalized_number, finalized_hash)), + chain_a.clone(), + ).await; + + assert_leaves(&backend, vec![a3_hash]); + + import_blocks_into( + &mut virtual_overseer, + &backend, + None, + chain_b.clone(), + ).await; + + assert_backend_contains(&backend, chain_a.iter().map(|&(ref h, _)| h)); + assert_backend_contains(&backend, chain_b.iter().map(|&(ref h, _)| h)); + assert_leaves(&backend, vec![a1_hash]); + assert_leaves_query(&mut virtual_overseer, vec![a1_hash]).await; + + virtual_overseer + }); +} + +#[test] +fn finalize_viable_prunes_subtrees() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // A2 <- X3 + // F <- A1 <- A2 <- A3. + // A1 <- B2 + // F <- C1 <- C2 <- C3 + // C2 <- D3 + // + // Finalize A2. Only A2, A3, and X3 should remain. + + let (a3_hash, chain_a) = construct_chain_on_base( + vec![1, 2, 10], + finalized_number, + finalized_hash, + |h| salt_header(h, b"a"), + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + let (_, a2_hash, _) = extract_info_from_chain(1, &chain_a); + + let (x3_hash, chain_x) = construct_chain_on_base( + vec![3], + 2, + a2_hash, + |h| salt_header(h, b"x"), + ); + + let (b2_hash, chain_b) = construct_chain_on_base( + vec![6], + 1, + a1_hash, + |h| salt_header(h, b"b"), + ); + + let (c3_hash, chain_c) = construct_chain_on_base( + vec![1, 2, 8], + finalized_number, + finalized_hash, + |h| salt_header(h, b"c"), + ); + let (_, c2_hash, _) = extract_info_from_chain(1, &chain_c); + + let (d3_hash, chain_d) = construct_chain_on_base( + vec![7], + 2, + c2_hash, + |h| salt_header(h, b"d"), + ); + + let all_chains = vec![ + chain_a.clone(), + chain_x.clone(), + chain_b.clone(), + chain_c.clone(), + chain_d.clone(), + ]; + + import_chains_into_empty( + &mut virtual_overseer, + &backend, + finalized_number, + finalized_hash, + all_chains.clone(), + ).await; + + assert_backend_contains_chains( + &backend, + all_chains.clone(), + ); + assert_leaves(&backend, vec![a3_hash, c3_hash, d3_hash, b2_hash, x3_hash]); + + // Finalize block A2. Now lots of blocks should go missing. + finalize_block( + &mut virtual_overseer, + &backend, + 2, + a2_hash, + ).await; + + // A2 <- A3 + // A2 <- X3 + + backend.assert_contains_only(vec![ + (3, a3_hash), + (3, x3_hash), + ]); + + assert_leaves(&backend, vec![a3_hash, x3_hash]); + assert_leaves_query(&mut virtual_overseer, vec![a3_hash, x3_hash]).await; + + assert_eq!( + backend.load_first_block_number().unwrap().unwrap(), + 3, + ); + + assert_eq!( + backend.load_blocks_by_number(3).unwrap(), + vec![a3_hash, x3_hash], + ); + + virtual_overseer + }); +} + +#[test] +fn finalization_does_not_clobber_unviability() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3 + // A3 reverts A2. + // Finalize A1. + + let (a3_hash, chain_a) = construct_chain_on_base( + vec![1, 2, 10], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"a"); + if h.number == 3 { + add_reversions(h, Some(2)); + } + } + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + let (_, a2_hash, _) = extract_info_from_chain(1, &chain_a); + + import_blocks_into( + &mut virtual_overseer, + &backend, + Some((finalized_number, finalized_hash)), + chain_a.clone(), + ).await; + + finalize_block( + &mut virtual_overseer, + &backend, + 1, + a1_hash, + ).await; + + assert_leaves(&backend, vec![]); + assert_finalized_leaves_query( + &mut virtual_overseer, + 1, + a1_hash, + ).await; + backend.assert_contains_only(vec![ + (3, a3_hash), + (2, a2_hash), + ]); + + virtual_overseer + }); +} + +#[test] +fn finalization_erases_unviable() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3 + // A1 <- B2 + // + // A2 reverts A1. + // Finalize A1. + + let (a3_hash, chain_a) = construct_chain_on_base( + vec![1, 2, 3], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"a"); + if h.number == 2 { + add_reversions(h, Some(1)); + } + } + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + let (_, a2_hash, _) = extract_info_from_chain(1, &chain_a); + + let (b2_hash, chain_b) = construct_chain_on_base( + vec![1], + 1, + a1_hash, + |h| salt_header(h, b"b"), + ); + + import_chains_into_empty( + &mut virtual_overseer, + &backend, + finalized_number, + finalized_hash, + vec![chain_a.clone(), chain_b.clone()], + ).await; + + assert_leaves(&backend, vec![]); + + finalize_block( + &mut virtual_overseer, + &backend, + 1, + a1_hash, + ).await; + + assert_leaves(&backend, vec![a3_hash, b2_hash]); + assert_leaves_query(&mut virtual_overseer, vec![a3_hash, b2_hash]).await; + + backend.assert_contains_only(vec![ + (3, a3_hash), + (2, a2_hash), + (2, b2_hash), + ]); + + virtual_overseer + }); +} + +#[test] +fn finalize_erases_unviable_but_keeps_later_unviability() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3 + // A1 <- B2 + // + // A2 reverts A1. + // A3 reverts A2. + // Finalize A1. A2 is stil unviable, but B2 is viable. + + let (a3_hash, chain_a) = construct_chain_on_base( + vec![1, 2, 3], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"a"); + if h.number == 2 { + add_reversions(h, Some(1)); + } + if h.number == 3 { + add_reversions(h, Some(2)); + } + } + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + let (_, a2_hash, _) = extract_info_from_chain(1, &chain_a); + + let (b2_hash, chain_b) = construct_chain_on_base( + vec![1], + 1, + a1_hash, + |h| salt_header(h, b"b"), + ); + + import_chains_into_empty( + &mut virtual_overseer, + &backend, + finalized_number, + finalized_hash, + vec![chain_a.clone(), chain_b.clone()], + ).await; + + assert_leaves(&backend, vec![]); + + finalize_block( + &mut virtual_overseer, + &backend, + 1, + a1_hash, + ).await; + + assert_leaves(&backend, vec![b2_hash]); + assert_leaves_query(&mut virtual_overseer, vec![b2_hash]).await; + + backend.assert_contains_only(vec![ + (3, a3_hash), + (2, a2_hash), + (2, b2_hash), + ]); + + virtual_overseer + }); +} + +#[test] +fn finalize_erases_unviable_from_one_but_not_all_reverts() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3 + // + // A3 reverts A2 and A1. + // Finalize A1. A2 is stil unviable. + + let (a3_hash, chain_a) = construct_chain_on_base( + vec![1, 2, 3], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"a"); + if h.number == 3 { + add_reversions(h, Some(1)); + add_reversions(h, Some(2)); + } + } + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + let (_, a2_hash, _) = extract_info_from_chain(1, &chain_a); + + import_chains_into_empty( + &mut virtual_overseer, + &backend, + finalized_number, + finalized_hash, + vec![chain_a.clone()], + ).await; + + assert_leaves(&backend, vec![]); + + finalize_block( + &mut virtual_overseer, + &backend, + 1, + a1_hash, + ).await; + + assert_leaves(&backend, vec![]); + assert_finalized_leaves_query( + &mut virtual_overseer, + 1, + a1_hash, + ).await; + + backend.assert_contains_only(vec![ + (3, a3_hash), + (2, a2_hash), + ]); + + virtual_overseer + }); +} + +#[test] +fn finalize_triggers_viability_search() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3 + // A2 <- B3 + // A2 <- C3 + // A3 reverts A1. + // Finalize A1. A3, B3, and C3 are all viable now. + + let (a3_hash, chain_a) = construct_chain_on_base( + vec![1, 2, 3], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"a"); + if h.number == 3 { + add_reversions(h, Some(1)); + } + } + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + let (_, a2_hash, _) = extract_info_from_chain(1, &chain_a); + + let (b3_hash, chain_b) = construct_chain_on_base( + vec![4], + 2, + a2_hash, + |h| salt_header(h, b"b"), + ); + + let (c3_hash, chain_c) = construct_chain_on_base( + vec![5], + 2, + a2_hash, + |h| salt_header(h, b"c"), + ); + + import_chains_into_empty( + &mut virtual_overseer, + &backend, + finalized_number, + finalized_hash, + vec![chain_a.clone(), chain_b.clone(), chain_c.clone()], + ).await; + + assert_leaves(&backend, vec![]); + + finalize_block( + &mut virtual_overseer, + &backend, + 1, + a1_hash, + ).await; + + assert_leaves(&backend, vec![c3_hash, b3_hash, a3_hash]); + assert_leaves_query(&mut virtual_overseer, vec![c3_hash, b3_hash, a3_hash]).await; + + backend.assert_contains_only(vec![ + (3, a3_hash), + (3, b3_hash), + (3, c3_hash), + (2, a2_hash), + ]); + + virtual_overseer + }); +} + +#[test] +fn best_leaf_none_with_empty_db() { + test_harness(|_backend, mut virtual_overseer| async move { + let required = Hash::repeat_byte(1); + let best_leaf = best_leaf_containing(&mut virtual_overseer, required).await; + assert!(best_leaf.is_none()); + + virtual_overseer + }) +} + +#[test] +fn best_leaf_none_with_no_viable_leaves() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 + // + // A2 reverts A1. + + let (a2_hash, chain_a) = construct_chain_on_base( + vec![1, 2], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"a"); + if h.number == 2 { + add_reversions(h, Some(1)); + } + } + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + + import_chains_into_empty( + &mut virtual_overseer, + &backend, + finalized_number, + finalized_hash, + vec![chain_a.clone()], + ).await; + + let best_leaf = best_leaf_containing(&mut virtual_overseer, a2_hash).await; + assert!(best_leaf.is_none()); + + let best_leaf = best_leaf_containing(&mut virtual_overseer, a1_hash).await; + assert!(best_leaf.is_none()); + + virtual_overseer + }) +} + +#[test] +fn best_leaf_none_with_unknown_required() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 + + let (_a2_hash, chain_a) = construct_chain_on_base( + vec![1, 2], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"a"); + } + ); + + let unknown_hash = Hash::repeat_byte(0x69); + + import_chains_into_empty( + &mut virtual_overseer, + &backend, + finalized_number, + finalized_hash, + vec![chain_a.clone()], + ).await; + + let best_leaf = best_leaf_containing(&mut virtual_overseer, unknown_hash).await; + assert!(best_leaf.is_none()); + + virtual_overseer + }) +} + +#[test] +fn best_leaf_none_with_unviable_required() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 + // F <- B1 <- B2 + // + // A2 reverts A1. + + let (a2_hash, chain_a) = construct_chain_on_base( + vec![1, 2], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"a"); + if h.number == 2 { + add_reversions(h, Some(1)); + } + } + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + + let (_b2_hash, chain_b) = construct_chain_on_base( + vec![1, 2], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"b"); + } + ); + + import_chains_into_empty( + &mut virtual_overseer, + &backend, + finalized_number, + finalized_hash, + vec![chain_a.clone(), chain_b.clone()], + ).await; + + let best_leaf = best_leaf_containing(&mut virtual_overseer, a2_hash).await; + assert!(best_leaf.is_none()); + + let best_leaf = best_leaf_containing(&mut virtual_overseer, a1_hash).await; + assert!(best_leaf.is_none()); + + virtual_overseer + }) +} + +#[test] +fn best_leaf_with_finalized_required() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 + // F <- B1 <- B2 + // + // B2 > A2 + + let (_a2_hash, chain_a) = construct_chain_on_base( + vec![1, 1], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"a"); + } + ); + + let (b2_hash, chain_b) = construct_chain_on_base( + vec![1, 2], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"b"); + } + ); + + import_chains_into_empty( + &mut virtual_overseer, + &backend, + finalized_number, + finalized_hash, + vec![chain_a.clone(), chain_b.clone()], + ).await; + + let best_leaf = best_leaf_containing(&mut virtual_overseer, finalized_hash).await; + assert_eq!(best_leaf, Some(b2_hash)); + + virtual_overseer + }) +} + +#[test] +fn best_leaf_with_unfinalized_required() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 + // F <- B1 <- B2 + // + // B2 > A2 + + let (a2_hash, chain_a) = construct_chain_on_base( + vec![1, 1], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"a"); + } + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + + let (_b2_hash, chain_b) = construct_chain_on_base( + vec![1, 2], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"b"); + } + ); + + import_chains_into_empty( + &mut virtual_overseer, + &backend, + finalized_number, + finalized_hash, + vec![chain_a.clone(), chain_b.clone()], + ).await; + + let best_leaf = best_leaf_containing(&mut virtual_overseer, a1_hash).await; + assert_eq!(best_leaf, Some(a2_hash)); + + virtual_overseer + }) +} + +#[test] +fn best_leaf_ancestor_of_all_leaves() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3 + // A1 <- B2 <- B3 + // B2 <- C3 + // + // C3 > B3 > A3 + + let (_a3_hash, chain_a) = construct_chain_on_base( + vec![1, 1, 2], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"a"); + } + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + + let (_b3_hash, chain_b) = construct_chain_on_base( + vec![2, 3], + 1, + a1_hash, + |h| { + salt_header(h, b"b"); + } + ); + + let (_, b2_hash, _) = extract_info_from_chain(0, &chain_b); + + let (c3_hash, chain_c) = construct_chain_on_base( + vec![4], + 2, + b2_hash, + |h| { + salt_header(h, b"c"); + } + ); + + import_chains_into_empty( + &mut virtual_overseer, + &backend, + finalized_number, + finalized_hash, + vec![chain_a.clone(), chain_b.clone(), chain_c.clone()], + ).await; + + let best_leaf = best_leaf_containing(&mut virtual_overseer, a1_hash).await; + assert_eq!(best_leaf, Some(c3_hash)); + + virtual_overseer + }) +} + +#[test] +fn approve_message_approves_block_entry() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3 + + let (a3_hash, chain_a) = construct_chain_on_base( + vec![1, 2, 3], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"a"); + } + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + let (_, a2_hash, _) = extract_info_from_chain(1, &chain_a); + + import_chains_into_empty( + &mut virtual_overseer, + &backend, + finalized_number, + finalized_hash, + vec![chain_a.clone()], + ).await; + + approve_block(&mut virtual_overseer, &backend, a3_hash).await; + + // a3 is approved, but not a1 or a2. + assert_matches!( + backend.load_block_entry(&a3_hash).unwrap().unwrap().viability.approval, + Approval::Approved + ); + + assert_matches!( + backend.load_block_entry(&a2_hash).unwrap().unwrap().viability.approval, + Approval::Unapproved + ); + + assert_matches!( + backend.load_block_entry(&a1_hash).unwrap().unwrap().viability.approval, + Approval::Unapproved + ); + + virtual_overseer + }) +} + +#[test] +fn approve_nonexistent_has_no_effect() { + test_harness(|backend, mut virtual_overseer| async move { + let finalized_number = 0; + let finalized_hash = Hash::repeat_byte(0); + + // F <- A1 <- A2 <- A3 + + let (a3_hash, chain_a) = construct_chain_on_base( + vec![1, 2, 3], + finalized_number, + finalized_hash, + |h| { + salt_header(h, b"a"); + } + ); + + let (_, a1_hash, _) = extract_info_from_chain(0, &chain_a); + let (_, a2_hash, _) = extract_info_from_chain(1, &chain_a); + + import_chains_into_empty( + &mut virtual_overseer, + &backend, + finalized_number, + finalized_hash, + vec![chain_a.clone()], + ).await; + + let nonexistent = Hash::repeat_byte(1); + approve_block(&mut virtual_overseer, &backend, nonexistent).await; + + // a3 is approved, but not a1 or a2. + assert_matches!( + backend.load_block_entry(&a3_hash).unwrap().unwrap().viability.approval, + Approval::Unapproved + ); + + assert_matches!( + backend.load_block_entry(&a2_hash).unwrap().unwrap().viability.approval, + Approval::Unapproved + ); + + assert_matches!( + backend.load_block_entry(&a1_hash).unwrap().unwrap().viability.approval, + Approval::Unapproved + ); + + virtual_overseer + }) +} diff --git a/node/core/chain-selection/src/tree.rs b/node/core/chain-selection/src/tree.rs new file mode 100644 index 000000000000..a10f0d0c5ad5 --- /dev/null +++ b/node/core/chain-selection/src/tree.rs @@ -0,0 +1,584 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Implements the tree-view over the data backend which we use to determine +//! viable leaves. +//! +//! The metadata is structured as a tree, with the root implicitly being the +//! finalized block, which is not stored as part of the tree. +//! +//! Each direct descendant of the finalized block acts as its own sub-tree, +//! and as the finalized block advances, orphaned sub-trees are entirely pruned. + +use polkadot_primitives::v1::{BlockNumber, Hash}; +use polkadot_node_primitives::BlockWeight; + + +use std::collections::HashMap; + +use super::{ + LOG_TARGET, + Approval, BlockEntry, Error, LeafEntry, ViabilityCriteria, + Timestamp, +}; +use crate::backend::{Backend, OverlayedBackend}; + +// A viability update to be applied to a block. +struct ViabilityUpdate(Option); + +impl ViabilityUpdate { + // Apply the viability update to a single block, yielding the updated + // block entry along with a vector of children and the updates to apply + // to them. + fn apply(self, mut entry: BlockEntry) -> ( + BlockEntry, + Vec<(Hash, ViabilityUpdate)> + ) { + // 1. When an ancestor has changed from unviable to viable, + // we erase the `earliest_unviable_ancestor` of all descendants + // until encountering a explicitly unviable descendant D. + // + // We then update the `earliest_unviable_ancestor` for all + // descendants of D to be equal to D. + // + // 2. When an ancestor A has changed from viable to unviable, + // we update the `earliest_unviable_ancestor` for all blocks + // to A. + // + // The following algorithm covers both cases. + // + // Furthermore, if there has been any change in viability, + // it is necessary to visit every single descendant of the root + // block. + // + // If a block B was unviable and is now viable, then every descendant + // has an `earliest_unviable_ancestor` which must be updated either + // to nothing or to the new earliest unviable ancestor. + // + // If a block B was viable and is now unviable, then every descendant + // has an `earliest_unviable_ancestor` which needs to be set to B. + + let maybe_earliest_unviable = self.0; + let next_earliest_unviable = { + if maybe_earliest_unviable.is_none() && !entry.viability.is_explicitly_viable() { + Some(entry.block_hash) + } else { + maybe_earliest_unviable + } + }; + entry.viability.earliest_unviable_ancestor = maybe_earliest_unviable; + + let recurse = entry.children.iter() + .cloned() + .map(move |c| (c, ViabilityUpdate(next_earliest_unviable))) + .collect(); + + (entry, recurse) + } +} + +// Propagate viability update to descendants of the given block. This writes +// the `base` entry as well as all descendants. If the parent of the block +// entry is not viable, this wlil not affect any descendants. +// +// If the block entry provided is self-unviable, then it's assumed that an +// unviability update needs to be propagated to descendants. +// +// If the block entry provided is self-viable, then it's assumed that a +// viability update needs to be propagated to descendants. +fn propagate_viability_update( + backend: &mut OverlayedBackend, + base: BlockEntry, +) -> Result<(), Error> { + enum BlockEntryRef { + Explicit(BlockEntry), + Hash(Hash), + } + + if !base.viability.is_parent_viable() { + // If the parent of the block is still unviable, + // then the `earliest_viable_ancestor` will not change + // regardless of the change in the block here. + // + // Furthermore, in such cases, the set of viable leaves + // does not change at all. + backend.write_block_entry(base); + return Ok(()) + } + + let mut viable_leaves = backend.load_leaves()?; + + // A mapping of Block Hash -> number + // Where the hash is the hash of a viable block which has + // at least 1 unviable child. + // + // The number is the number of known unviable children which is known + // as the pivot count. + let mut viability_pivots = HashMap::new(); + + // If the base block is itself explicitly unviable, + // this will change to a `Some(base_hash)` after the first + // invocation. + let viability_update = ViabilityUpdate(None); + + // Recursively apply update to tree. + // + // As we go, we remove any blocks from the leaves which are no longer viable + // leaves. We also add blocks to the leaves-set which are obviously viable leaves. + // And we build up a frontier of blocks which may either be viable leaves or + // the ancestors of one. + let mut tree_frontier = vec![(BlockEntryRef::Explicit(base), viability_update)]; + while let Some((entry_ref, update)) = tree_frontier.pop() { + let entry = match entry_ref { + BlockEntryRef::Explicit(entry) => entry, + BlockEntryRef::Hash(hash) => match backend.load_block_entry(&hash)? { + None => { + tracing::warn!( + target: LOG_TARGET, + block_hash = ?hash, + "Missing expected block entry" + ); + + continue; + } + Some(entry) => entry, + } + }; + + let (new_entry, children) = update.apply(entry); + + if new_entry.viability.is_viable() { + // A block which is viable has a parent which is obviously not + // in the viable leaves set. + viable_leaves.remove(&new_entry.parent_hash); + + // Furthermore, if the block is viable and has no children, + // it is viable by definition. + if new_entry.children.is_empty() { + viable_leaves.insert(new_entry.leaf_entry()); + } + } else { + // A block which is not viable is certainly not a viable leaf. + viable_leaves.remove(&new_entry.block_hash); + + // When the parent is viable but the entry itself is not, that means + // that the parent is a viability pivot. As we visit the children + // of a viability pivot, we build up an exhaustive pivot count. + if new_entry.viability.is_parent_viable() { + *viability_pivots.entry(new_entry.parent_hash).or_insert(0) += 1; + } + } + + backend.write_block_entry(new_entry); + + tree_frontier.extend( + children.into_iter().map(|(h, update)| (BlockEntryRef::Hash(h), update)) + ); + } + + // Revisit the viability pivots now that we've traversed the entire subtree. + // After this point, the viable leaves set is fully updated. A proof follows. + // + // If the base has become unviable, then we've iterated into all descendants, + // made them unviable and removed them from the set. We know that the parent is + // viable as this function is a no-op otherwise, so we need to see if the parent + // has other children or not. + // + // If the base has become viable, then we've iterated into all descendants, + // and found all blocks which are viable and have no children. We've already added + // those blocks to the leaf set, but what we haven't detected + // is blocks which are viable and have children, but all of the children are + // unviable. + // + // The solution of viability pivots addresses both of these: + // + // When the base has become unviable, the parent's viability is unchanged and therefore + // any leaves descending from parent but not base are still in the viable leaves set. + // If the parent has only one child which is the base, the parent is now a viable leaf. + // We've already visited the base in recursive search so the set of pivots should + // contain only a single entry `(parent, 1)`. qed. + // + // When the base has become viable, we've already iterated into every descendant + // of the base and thus have collected a set of pivots whose corresponding pivot + // counts have already been exhaustively computed from their children. qed. + for (pivot, pivot_count) in viability_pivots { + match backend.load_block_entry(&pivot)? { + None => { + // This means the block is finalized. We might reach this + // code path when the base is a child of the finalized block + // and has become unviable. + // + // Each such child is the root of its own tree + // which, as an invariant, does not depend on the viability + // of the finalized block. So no siblings need to be inspected + // and we can ignore it safely. + // + // Furthermore, if the set of viable leaves is empty, the + // finalized block is implicitly the viable leaf. + continue + } + Some(entry) => { + if entry.children.len() == pivot_count { + viable_leaves.insert(entry.leaf_entry()); + } + } + } + } + + backend.write_leaves(viable_leaves); + + Ok(()) +} + +/// Imports a new block and applies any reversions to ancestors. +pub(crate) fn import_block( + backend: &mut OverlayedBackend, + block_hash: Hash, + block_number: BlockNumber, + parent_hash: Hash, + reversion_logs: Vec, + weight: BlockWeight, +) -> Result<(), Error> { + add_block(backend, block_hash, block_number, parent_hash, weight)?; + apply_reversions( + backend, + block_hash, + block_number, + reversion_logs, + )?; + + Ok(()) +} + +// Load the given ancestor's block entry, in descending order from the `block_hash`. +// The ancestor_number must be at least one block less than the `block_number`. +// +// The returned entry will be `None` if the range is invalid or any block in the path had +// no entry present. If any block entry was missing, it can safely be assumed to +// be finalized. +fn load_ancestor( + backend: &mut OverlayedBackend, + block_hash: Hash, + block_number: BlockNumber, + ancestor_number: BlockNumber, +) -> Result, Error> { + if block_number <= ancestor_number { return Ok(None) } + + let mut current_hash = block_hash; + let mut current_entry = None; + + let segment_length = (block_number - ancestor_number) + 1; + for _ in 0..segment_length { + match backend.load_block_entry(¤t_hash)? { + None => return Ok(None), + Some(entry) => { + let parent_hash = entry.parent_hash; + current_entry = Some(entry); + current_hash = parent_hash; + } + } + } + + // Current entry should always be `Some` here. + Ok(current_entry) +} + +// Add a new block to the tree, which is assumed to be unreverted and unapproved, +// but not stagnant. It inherits viability from its parent, if any. +// +// This updates the parent entry, if any, and updates the viable leaves set accordingly. +// This also schedules a stagnation-check update and adds the block to the blocks-by-number +// mapping. +fn add_block( + backend: &mut OverlayedBackend, + block_hash: Hash, + block_number: BlockNumber, + parent_hash: Hash, + weight: BlockWeight, +) -> Result<(), Error> { + let mut leaves = backend.load_leaves()?; + let parent_entry = backend.load_block_entry(&parent_hash)?; + + let inherited_viability = parent_entry.as_ref() + .and_then(|parent| parent.non_viable_ancestor_for_child()); + + // 1. Add the block to the DB assuming it's not reverted. + backend.write_block_entry( + BlockEntry { + block_hash, + block_number, + parent_hash, + children: Vec::new(), + viability: ViabilityCriteria { + earliest_unviable_ancestor: inherited_viability, + explicitly_reverted: false, + approval: Approval::Unapproved, + }, + weight, + } + ); + + // 2. Update leaves if inherited viability is fine. + if inherited_viability.is_none() { + leaves.remove(&parent_hash); + leaves.insert(LeafEntry { block_hash, block_number, weight }); + backend.write_leaves(leaves); + } + + // 3. Update and write the parent + if let Some(mut parent_entry) = parent_entry { + parent_entry.children.push(block_hash); + backend.write_block_entry(parent_entry); + } + + // 4. Add to blocks-by-number. + let mut blocks_by_number = backend.load_blocks_by_number(block_number)?; + blocks_by_number.push(block_hash); + backend.write_blocks_by_number(block_number, blocks_by_number); + + // 5. Add stagnation timeout. + let stagnant_at = crate::stagnant_timeout_from_now(); + let mut stagnant_at_list = backend.load_stagnant_at(stagnant_at)?; + stagnant_at_list.push(block_hash); + backend.write_stagnant_at(stagnant_at, stagnant_at_list); + + Ok(()) +} + +// Assuming that a block is already imported, accepts the number of the block +// as well as a list of reversions triggered by the block in ascending order. +fn apply_reversions( + backend: &mut OverlayedBackend, + block_hash: Hash, + block_number: BlockNumber, + reversions: Vec, +) -> Result<(), Error> { + // Note: since revert numbers are in ascending order, the expensive propagation + // of unviability is only heavy on the first log. + for revert_number in reversions { + let mut ancestor_entry = match load_ancestor( + backend, + block_hash, + block_number, + revert_number, + )? { + None => { + tracing::warn!( + target: LOG_TARGET, + ?block_hash, + block_number, + revert_target = revert_number, + "The hammer has dropped. \ + A block has indicated that its finalized ancestor be reverted. \ + Please inform an adult.", + ); + + continue + } + Some(ancestor_entry) => { + tracing::info!( + target: LOG_TARGET, + ?block_hash, + block_number, + revert_target = revert_number, + revert_hash = ?ancestor_entry.block_hash, + "A block has signaled that its ancestor be reverted due to a bad parachain block.", + ); + + ancestor_entry + } + }; + + ancestor_entry.viability.explicitly_reverted = true; + propagate_viability_update(backend, ancestor_entry)?; + } + + Ok(()) +} + +/// Finalize a block with the given number and hash. +/// +/// This will prune all sub-trees not descending from the given block, +/// all block entries at or before the given height, +/// and will update the viability of all sub-trees descending from the given +/// block if the finalized block was not viable. +/// +/// This is assumed to start with a fresh backend, and will produce +/// an overlay over the backend with all the changes applied. +pub(super) fn finalize_block<'a, B: Backend + 'a>( + backend: &'a B, + finalized_hash: Hash, + finalized_number: BlockNumber, +) -> Result, Error> { + let earliest_stored_number = backend.load_first_block_number()?; + let mut backend = OverlayedBackend::new(backend); + + let earliest_stored_number = match earliest_stored_number { + None => { + // This implies that there are no unfinalized blocks and hence nothing + // to update. + return Ok(backend); + } + Some(e) => e, + }; + + let mut viable_leaves = backend.load_leaves()?; + + // Walk all numbers up to the finalized number and remove those entries. + for number in earliest_stored_number..finalized_number { + let blocks_at = backend.load_blocks_by_number(number)?; + backend.delete_blocks_by_number(number); + + for block in blocks_at { + viable_leaves.remove(&block); + backend.delete_block_entry(&block); + } + } + + // Remove all blocks at the finalized height, with the exception of the finalized block, + // and their descendants, recursively. + { + let blocks_at_finalized_height = backend.load_blocks_by_number(finalized_number)?; + backend.delete_blocks_by_number(finalized_number); + + let mut frontier: Vec<_> = blocks_at_finalized_height + .into_iter() + .filter(|h| h != &finalized_hash) + .map(|h| (h, finalized_number)) + .collect(); + + while let Some((dead_hash, dead_number)) = frontier.pop() { + let entry = backend.load_block_entry(&dead_hash)?; + backend.delete_block_entry(&dead_hash); + viable_leaves.remove(&dead_hash); + + // This does a few extra `clone`s but is unlikely to be + // a bottleneck. Code complexity is very low as a result. + let mut blocks_at_height = backend.load_blocks_by_number(dead_number)?; + blocks_at_height.retain(|h| h != &dead_hash); + backend.write_blocks_by_number(dead_number, blocks_at_height); + + // Add all children to the frontier. + let next_height = dead_number + 1; + frontier.extend( + entry.into_iter().flat_map(|e| e.children).map(|h| (h, next_height)) + ); + } + } + + // Visit and remove the finalized block, fetching its children. + let children_of_finalized = { + let finalized_entry = backend.load_block_entry(&finalized_hash)?; + backend.delete_block_entry(&finalized_hash); + viable_leaves.remove(&finalized_hash); + + finalized_entry.into_iter().flat_map(|e| e.children) + }; + + backend.write_leaves(viable_leaves); + + // Update the viability of each child. + for child in children_of_finalized { + if let Some(mut child) = backend.load_block_entry(&child)? { + // Finalized blocks are always viable. + child.viability.earliest_unviable_ancestor = None; + + propagate_viability_update(&mut backend, child)?; + } else { + tracing::debug!( + target: LOG_TARGET, + ?finalized_hash, + finalized_number, + child_hash = ?child, + "Missing child of finalized block", + ); + + // No need to do anything, but this is an inconsistent state. + } + } + + Ok(backend) +} + +/// Mark a block as approved and update the viability of itself and its +/// descendants accordingly. +pub(super) fn approve_block( + backend: &mut OverlayedBackend, + approved_hash: Hash, +) -> Result<(), Error> { + if let Some(mut entry) = backend.load_block_entry(&approved_hash)? { + let was_viable = entry.viability.is_viable(); + entry.viability.approval = Approval::Approved; + let is_viable = entry.viability.is_viable(); + + // Approval can change the viability in only one direction. + // If the viability has changed, then we propagate that to children + // and recalculate the viable leaf set. + if !was_viable && is_viable { + propagate_viability_update(backend, entry)?; + } else { + backend.write_block_entry(entry); + } + + } else { + tracing::debug!( + target: LOG_TARGET, + block_hash = ?approved_hash, + "Missing entry for freshly-approved block. Ignoring" + ); + } + + Ok(()) +} + +/// Check whether any blocks up to the given timestamp are stagnant and update +/// accordingly. +/// +/// This accepts a fresh backend and returns an overlay on top of it representing +/// all changes made. +// TODO https://github.com/paritytech/polkadot/issues/3293:: remove allow +#[allow(unused)] +pub(super) fn detect_stagnant<'a, B: 'a + Backend>( + backend: &'a B, + up_to: Timestamp, +) -> Result, Error> { + let stagnant_up_to = backend.load_stagnant_at_up_to(up_to)?; + let mut backend = OverlayedBackend::new(backend); + + // As this is in ascending order, only the earliest stagnant + // blocks will involve heavy viability propagations. + for (timestamp, maybe_stagnant) in stagnant_up_to { + backend.delete_stagnant_at(timestamp); + + for block_hash in maybe_stagnant { + if let Some(mut entry) = backend.load_block_entry(&block_hash)? { + let was_viable = entry.viability.is_viable(); + if let Approval::Unapproved = entry.viability.approval { + entry.viability.approval = Approval::Stagnant; + } + let is_viable = entry.viability.is_viable(); + + if was_viable && !is_viable { + propagate_viability_update(&mut backend, entry)?; + } else { + backend.write_block_entry(entry); + } + } + } + } + + Ok(backend) +} diff --git a/node/network/bridge/src/tests.rs b/node/network/bridge/src/tests.rs index 8785238b3506..48296fb94f05 100644 --- a/node/network/bridge/src/tests.rs +++ b/node/network/bridge/src/tests.rs @@ -1279,6 +1279,7 @@ fn spread_event_to_subsystems_is_up_to_date() { AllMessages::GossipSupport(_) => unreachable!("Not interested in network events"), AllMessages::DisputeCoordinator(_) => unreachable!("Not interested in network events"), AllMessages::DisputeParticipation(_) => unreachable!("Not interetsed in network events"), + AllMessages::ChainSelection(_) => unreachable!("Not interested in network events"), // Add variants here as needed, `{ cnt += 1; }` for those that need to be // notified, `unreachable!()` for those that should not. } diff --git a/node/overseer/src/lib.rs b/node/overseer/src/lib.rs index 13461a060742..5f2422fc73df 100644 --- a/node/overseer/src/lib.rs +++ b/node/overseer/src/lib.rs @@ -631,6 +631,7 @@ impl ChannelsOut { }, AllMessages::DisputeCoordinator(_) => Ok(()), AllMessages::DisputeParticipation(_) => Ok(()), + AllMessages::ChainSelection(_) => Ok(()), }; if res.is_err() { @@ -735,6 +736,7 @@ impl ChannelsOut { }, AllMessages::DisputeCoordinator(_) => Ok(()), AllMessages::DisputeParticipation(_) => Ok(()), + AllMessages::ChainSelection(_) => Ok(()), }; if res.is_err() { @@ -2068,6 +2070,7 @@ where }, AllMessages::DisputeCoordinator(_) => {} AllMessages::DisputeParticipation(_) => {} + AllMessages::ChainSelection(_) => {} } Ok(()) diff --git a/node/subsystem-util/src/determine_new_blocks.rs b/node/subsystem-util/src/determine_new_blocks.rs index b275689d68f1..adfc614beef9 100644 --- a/node/subsystem-util/src/determine_new_blocks.rs +++ b/node/subsystem-util/src/determine_new_blocks.rs @@ -17,8 +17,7 @@ //! A utility for fetching all unknown blocks based on a new chain-head hash. use polkadot_node_subsystem::{ - messages::ChainApiMessage, - SubsystemSender, SubsystemError, SubsystemResult, + messages::ChainApiMessage, SubsystemSender, }; use polkadot_primitives::v1::{Hash, Header, BlockNumber}; use futures::prelude::*; @@ -41,9 +40,7 @@ pub async fn determine_new_blocks( head: Hash, header: &Header, lower_bound_number: BlockNumber, -) -> SubsystemResult> - where SubsystemError: From -{ +) -> Result, E> { const ANCESTRY_STEP: usize = 4; let min_block_needed = lower_bound_number + 1; @@ -173,7 +170,7 @@ mod tests { self.blocks.insert(hash); } - fn is_known(&self, hash: &Hash) -> Result { + fn is_known(&self, hash: &Hash) -> Result { Ok(self.blocks.contains(hash)) } } diff --git a/node/subsystem/src/lib.rs b/node/subsystem/src/lib.rs index 871b6f2c80c7..d070ff4d6cf0 100644 --- a/node/subsystem/src/lib.rs +++ b/node/subsystem/src/lib.rs @@ -175,6 +175,11 @@ pub enum FromOverseer { }, } +impl From for FromOverseer { + fn from(signal: OverseerSignal) -> Self { + FromOverseer::Signal(signal) + } +} /// An error type that describes faults that may happen /// diff --git a/node/subsystem/src/messages.rs b/node/subsystem/src/messages.rs index f7a6c884990a..f6171a8a3baf 100644 --- a/node/subsystem/src/messages.rs +++ b/node/subsystem/src/messages.rs @@ -506,6 +506,32 @@ impl ChainApiMessage { } } +/// Chain selection subsystem messages +#[derive(Debug)] +pub enum ChainSelectionMessage { + /// Signal to the chain selection subsystem that a specific block has been approved. + Approved(Hash), + /// Request the leaves in descending order by score. + Leaves(oneshot::Sender>), + /// Request the best leaf containing the given block in its ancestry. Return `None` if + /// there is no such leaf. + BestLeafContaining(Hash, oneshot::Sender>), +} + +impl ChainSelectionMessage { + /// If the current variant contains the relay parent hash, return it. + pub fn relay_parent(&self) -> Option { + // None of the messages, even the ones containing specific + // block hashes, can be considered to have those blocks as + // a relay parent. + match *self { + ChainSelectionMessage::Approved(_) => None, + ChainSelectionMessage::Leaves(_) => None, + ChainSelectionMessage::BestLeafContaining(..) => None, + } + } +} + /// A sender for the result of a runtime API request. pub type RuntimeApiSender = oneshot::Sender>; @@ -832,6 +858,9 @@ pub enum AllMessages { /// Message for the dispute participation subsystem. #[skip] DisputeParticipation(DisputeParticipationMessage), + /// Message for the chain selection subsystem. + #[skip] + ChainSelection(ChainSelectionMessage), } impl From> for AvailabilityDistributionMessage { diff --git a/roadmap/implementers-guide/src/types/overseer-protocol.md b/roadmap/implementers-guide/src/types/overseer-protocol.md index 77e8bd862653..70e214a79492 100644 --- a/roadmap/implementers-guide/src/types/overseer-protocol.md +++ b/roadmap/implementers-guide/src/types/overseer-protocol.md @@ -70,6 +70,7 @@ enum AllMessages { GossipSupport(GossipSupportMessage), DisputeCoordinator(DisputeCoordinatorMessage), DisputeParticipation(DisputeParticipationMessage), + ChainSelection(ChainSelectionMessage), } ```