diff --git a/Cargo.lock b/Cargo.lock index 334202f74a41..c63503129e01 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5038,6 +5038,22 @@ dependencies = [ "tracing-futures", ] +[[package]] +name = "polkadot-node-core-approval-voting" +version = "0.1.0" +dependencies = [ + "bitvec", + "futures 0.3.8", + "parity-scale-codec", + "polkadot-node-primitives", + "polkadot-node-subsystem", + "polkadot-overseer", + "polkadot-primitives", + "sc-client-api", + "sp-blockchain", + "sp-consensus-slots", +] + [[package]] name = "polkadot-node-core-av-store" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index 57708ded628e..debe7e7a3cca 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -43,6 +43,7 @@ members = [ "xcm/xcm-builder", "xcm/xcm-executor", "node/collation-generation", + "node/core/approval-voting", "node/core/av-store", "node/core/backing", "node/core/bitfield-signing", diff --git a/node/core/approval-voting/Cargo.toml b/node/core/approval-voting/Cargo.toml new file mode 100644 index 000000000000..b6c361b53835 --- /dev/null +++ b/node/core/approval-voting/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "polkadot-node-core-approval-voting" +version = "0.1.0" +authors = ["Parity Technologies "] +edition = "2018" + +[dependencies] +futures = "0.3.8" +parity-scale-codec = { version = "1.3.5", default-features = false, features = ["bit-vec", "derive"] } + +polkadot-subsystem = { package = "polkadot-node-subsystem", path = "../../subsystem" } +polkadot-overseer = { path = "../../overseer" } +polkadot-primitives = { path = "../../../primitives" } +polkadot-node-primitives = { path = "../../primitives" } +bitvec = "0.17.4" + +sc-client-api = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-consensus-slots = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } +sp-blockchain = { git = "https://github.com/paritytech/substrate", branch = "master", default-features = false } + +[dev-dependencies] \ No newline at end of file diff --git a/node/core/approval-voting/src/aux_schema/mod.rs b/node/core/approval-voting/src/aux_schema/mod.rs new file mode 100644 index 000000000000..fd55644a1918 --- /dev/null +++ b/node/core/approval-voting/src/aux_schema/mod.rs @@ -0,0 +1,531 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Auxiliary DB schema, accessors, and writers for on-disk persisted approval storage +//! data. +//! +//! We persist data to disk although it is not intended to be used across runs of the +//! program. This is because under medium to long periods of finality stalling, for whatever +//! reason that may be, the amount of data we'd need to keep would be potentially too large +//! for memory. +//! +//! With tens or hundreds of parachains, hundreds of validators, and parablocks +//! in every relay chain block, there can be a humongous amount of information to reference +//! at any given time. +//! +//! As such, we provide a function from this module to clear the database on start-up. +//! In the future, we may use a temporary DB which doesn't need to be wiped, but for the +//! time being we share the same DB with the rest of Substrate. + +// TODO https://github.com/paritytech/polkadot/issues/1975: remove this +#![allow(unused)] + +use sc_client_api::backend::AuxStore; +use polkadot_node_primitives::approval::{DelayTranche, RelayVRF}; +use polkadot_primitives::v1::{ + ValidatorIndex, GroupIndex, CandidateReceipt, SessionIndex, CoreIndex, + BlockNumber, Hash, CandidateHash, +}; +use sp_consensus_slots::SlotNumber; +use parity_scale_codec::{Encode, Decode}; + +use std::collections::{BTreeMap, HashMap}; +use std::collections::hash_map::Entry; +use bitvec::{vec::BitVec, order::Lsb0 as BitOrderLsb0}; + +use super::Tick; + +#[cfg(test)] +mod tests; + +const STORED_BLOCKS_KEY: &[u8] = b"Approvals_StoredBlocks"; + +/// Metadata regarding a specific tranche of assignments for a specific candidate. +#[derive(Debug, Clone, Encode, Decode, PartialEq)] +pub(crate) struct TrancheEntry { + tranche: DelayTranche, + // Assigned validators, and the instant we received their assignment, rounded + // to the nearest tick. + assignments: Vec<(ValidatorIndex, Tick)>, +} + +/// Metadata regarding approval of a particular candidate within the context of some +/// particular block. +#[derive(Debug, Clone, Encode, Decode, PartialEq)] +pub(crate) struct ApprovalEntry { + tranches: Vec, + backing_group: GroupIndex, + // When the next wakeup for this entry should occur. This is either to + // check a no-show or to check if we need to broadcast an assignment. + next_wakeup: Tick, + our_assignment: Option, + // `n_validators` bits. + assignments: BitVec, + approved: bool, +} + +/// Metadata regarding approval of a particular candidate. +#[derive(Debug, Clone, Encode, Decode, PartialEq)] +pub(crate) struct CandidateEntry { + candidate: CandidateReceipt, + session: SessionIndex, + // Assignments are based on blocks, so we need to track assignments separately + // based on the block we are looking at. + block_assignments: BTreeMap, + approvals: BitVec, +} + +/// Metadata regarding approval of a particular block, by way of approval of the +/// candidates contained within it. +#[derive(Debug, Clone, Encode, Decode, PartialEq)] +pub(crate) struct BlockEntry { + block_hash: Hash, + session: SessionIndex, + slot: SlotNumber, + relay_vrf_story: RelayVRF, + // The candidates included as-of this block and the index of the core they are + // leaving. Sorted ascending by core index. + candidates: Vec<(CoreIndex, CandidateHash)>, + // A bitfield where the i'th bit corresponds to the i'th candidate in `candidates`. + // The i'th bit is `true` iff the candidate has been approved in the context of this + // block. The block can be considered approved if the bitfield has all bits set to `true`. + approved_bitfield: BitVec, + children: Vec, +} + +/// A range from earliest..last block number stored within the DB. +#[derive(Debug, Clone, Encode, Decode, PartialEq)] +pub(crate) struct StoredBlockRange(BlockNumber, BlockNumber); + +// TODO https://github.com/paritytech/polkadot/issues/1975: probably in lib.rs +#[derive(Debug, Clone, Encode, Decode, PartialEq)] +pub(crate) struct OurAssignment { } + +/// Canonicalize some particular block, pruning everything before it and +/// pruning any competing branches at the same height. +pub(crate) fn canonicalize( + store: &impl AuxStore, + canon_number: BlockNumber, + canon_hash: Hash, +) + -> sp_blockchain::Result<()> +{ + let range = match load_stored_blocks(store)? { + None => return Ok(()), + Some(range) => if range.0 >= canon_number { + return Ok(()) + } else { + range + }, + }; + + let mut deleted_height_keys = Vec::new(); + let mut deleted_block_keys = Vec::new(); + + // Storing all candidates in memory is potentially heavy, but should be fine + // as long as finality doesn't stall for a long while. We could optimize this + // by keeping only the metadata about which blocks reference each candidate. + let mut visited_candidates = HashMap::new(); + + // All the block heights we visited but didn't necessarily delete everything from. + let mut visited_heights = HashMap::new(); + + let visit_and_remove_block_entry = | + block_hash: Hash, + deleted_block_keys: &mut Vec<_>, + visited_candidates: &mut HashMap, + | -> sp_blockchain::Result> { + let block_entry = match load_block_entry(store, &block_hash)? { + None => return Ok(Vec::new()), + Some(b) => b, + }; + + deleted_block_keys.push(block_entry_key(&block_hash)); + for &(_, ref candidate_hash) in &block_entry.candidates { + let candidate = match visited_candidates.entry(*candidate_hash) { + Entry::Occupied(e) => e.into_mut(), + Entry::Vacant(e) => { + e.insert(match load_candidate_entry(store, candidate_hash)? { + None => continue, // Should not happen except for corrupt DB + Some(c) => c, + }) + } + }; + + candidate.block_assignments.remove(&block_hash); + } + + Ok(block_entry.children) + }; + + // First visit everything before the height. + for i in range.0..canon_number { + let at_height = load_blocks_at_height(store, i)?; + deleted_height_keys.push(blocks_at_height_key(i)); + + for b in at_height { + let _ = visit_and_remove_block_entry( + b, + &mut deleted_block_keys, + &mut visited_candidates, + )?; + } + } + + // Then visit everything at the height. + let pruned_branches = { + let at_height = load_blocks_at_height(store, canon_number)?; + deleted_height_keys.push(blocks_at_height_key(canon_number)); + + // Note that while there may be branches descending from blocks at earlier heights, + // we have already covered them by removing everything at earlier heights. + let mut pruned_branches = Vec::new(); + + for b in at_height { + let children = visit_and_remove_block_entry( + b, + &mut deleted_block_keys, + &mut visited_candidates, + )?; + + if b != canon_hash { + pruned_branches.extend(children); + } + } + + pruned_branches + }; + + // Follow all children of non-canonicalized blocks. + { + let mut frontier: Vec<_> = pruned_branches.into_iter().map(|h| (canon_number + 1, h)).collect(); + while let Some((height, next_child)) = frontier.pop() { + let children = visit_and_remove_block_entry( + next_child, + &mut deleted_block_keys, + &mut visited_candidates, + )?; + + // extend the frontier of branches to include the given height. + frontier.extend(children.into_iter().map(|h| (height + 1, h))); + + // visit the at-height key for this deleted block's height. + let at_height = match visited_heights.entry(height) { + Entry::Occupied(e) => e.into_mut(), + Entry::Vacant(e) => e.insert(load_blocks_at_height(store, height)?), + }; + + if let Some(i) = at_height.iter().position(|x| x == &next_child) { + at_height.remove(i); + } + } + } + + // Update all `CandidateEntry`s, deleting all those which now have empty `block_assignments`. + let (written_candidates, deleted_candidates) = { + let mut written = Vec::new(); + let mut deleted = Vec::new(); + + for (candidate_hash, candidate) in visited_candidates { + if candidate.block_assignments.is_empty() { + deleted.push(candidate_entry_key(&candidate_hash)); + } else { + written.push((candidate_entry_key(&candidate_hash), candidate.encode())); + } + } + + (written, deleted) + }; + + // Update all blocks-at-height keys, deleting all those which now have empty `block_assignments`. + let written_at_height = { + visited_heights.into_iter().filter_map(|(h, at)| { + if at.is_empty() { + deleted_height_keys.push(blocks_at_height_key(h)); + None + } else { + Some((blocks_at_height_key(h), at.encode())) + } + }).collect::>() + }; + + // due to the fork pruning, this range actually might go too far above where our actual highest block is, + // if a relatively short fork is canonicalized. + let new_range = StoredBlockRange( + canon_number + 1, + std::cmp::max(range.1, canon_number + 2), + ).encode(); + + // Because aux-store requires &&[u8], we have to collect. + + let inserted_keys: Vec<_> = std::iter::once((&STORED_BLOCKS_KEY[..], &new_range[..])) + .chain(written_candidates.iter().map(|&(ref k, ref v)| (&k[..], &v[..]))) + .chain(written_at_height.iter().map(|&(ref k, ref v)| (&k[..], &v[..]))) + .collect(); + + let deleted_keys: Vec<_> = deleted_block_keys.iter().map(|k| &k[..]) + .chain(deleted_height_keys.iter().map(|k| &k[..])) + .chain(deleted_candidates.iter().map(|k| &k[..])) + .collect(); + + // Update the values on-disk. + store.insert_aux( + inserted_keys.iter(), + deleted_keys.iter(), + )?; + + Ok(()) +} + +/// Clear the aux store of everything. +pub(crate) fn clear(store: &impl AuxStore) + -> sp_blockchain::Result<()> +{ + let range = match load_stored_blocks(store)? { + None => return Ok(()), + Some(range) => range, + }; + + let mut visited_height_keys = Vec::new(); + let mut visited_block_keys = Vec::new(); + let mut visited_candidate_keys = Vec::new(); + + for i in range.0..range.1 { + let at_height = load_blocks_at_height(store, i)?; + + visited_height_keys.push(blocks_at_height_key(i)); + + for block_hash in at_height { + let block_entry = match load_block_entry(store, &block_hash)? { + None => continue, + Some(e) => e, + }; + + visited_block_keys.push(block_entry_key(&block_hash)); + + for &(_, candidate_hash) in &block_entry.candidates { + visited_candidate_keys.push(candidate_entry_key(&candidate_hash)); + } + } + } + + // unfortunately demands a `collect` because aux store wants `&&[u8]` for some reason. + let visited_keys_borrowed = visited_height_keys.iter().map(|x| &x[..]) + .chain(visited_block_keys.iter().map(|x| &x[..])) + .chain(visited_candidate_keys.iter().map(|x| &x[..])) + .chain(std::iter::once(&STORED_BLOCKS_KEY[..])) + .collect::>(); + + store.insert_aux(&[], &visited_keys_borrowed)?; + + Ok(()) +} + +fn load_decode(store: &impl AuxStore, key: &[u8]) + -> sp_blockchain::Result> +{ + match store.get_aux(key)? { + None => Ok(None), + Some(raw) => D::decode(&mut &raw[..]) + .map(Some) + .map_err(|e| sp_blockchain::Error::Storage( + format!("Failed to decode item in approvals DB: {:?}", e) + )), + } +} + +/// Information about a new candidate necessary to instantiate the requisite +/// candidate and approval entries. +#[derive(Clone)] +pub(crate) struct NewCandidateInfo { + candidate: CandidateReceipt, + backing_group: GroupIndex, + our_assignment: Option, +} + +/// Record a new block entry. +/// +/// This will update the blocks-at-height mapping, the stored block range, if necessary, +/// and add block and candidate entries. It will also add approval entries to existing +/// candidate entries and add this as a child of any block entry corresponding to the +/// parent hash. +/// +/// Has no effect if there is already an entry for the block or `candidate_info` returns +/// `None` for any of the candidates referenced by the block entry. +pub(crate) fn add_block_entry( + store: &impl AuxStore, + parent_hash: Hash, + number: BlockNumber, + entry: BlockEntry, + n_validators: usize, + candidate_info: impl Fn(&CandidateHash) -> Option, +) -> sp_blockchain::Result<()> { + let session = entry.session; + + let new_block_range = { + let new_range = match load_stored_blocks(store)? { + None => Some(StoredBlockRange(number, number + 1)), + Some(range) => if range.1 <= number { + Some(StoredBlockRange(range.0, number + 1)) + } else { + None + } + }; + + new_range.map(|n| (STORED_BLOCKS_KEY, n.encode())) + }; + + let updated_blocks_at = { + let mut blocks_at_height = load_blocks_at_height(store, number)?; + if blocks_at_height.contains(&entry.block_hash) { + // seems we already have a block entry for this block. nothing to do here. + return Ok(()) + } + + blocks_at_height.push(entry.block_hash); + (blocks_at_height_key(number), blocks_at_height.encode()) + }; + + let candidate_entry_updates = { + let mut updated_entries = Vec::with_capacity(entry.candidates.len()); + for &(_, ref candidate_hash) in &entry.candidates { + let NewCandidateInfo { + candidate, + backing_group, + our_assignment, + } = match candidate_info(candidate_hash) { + None => return Ok(()), + Some(info) => info, + }; + + let mut candidate_entry = load_candidate_entry(store, &candidate_hash)? + .unwrap_or_else(move || CandidateEntry { + candidate, + session, + block_assignments: BTreeMap::new(), + approvals: bitvec::bitvec![BitOrderLsb0, u8; 0; n_validators], + }); + + candidate_entry.block_assignments.insert( + entry.block_hash, + ApprovalEntry { + tranches: Vec::new(), + backing_group, + next_wakeup: 0, + our_assignment, + assignments: bitvec::bitvec![BitOrderLsb0, u8; 0; n_validators], + approved: false, + } + ); + + updated_entries.push( + (candidate_entry_key(&candidate_hash), candidate_entry.encode()) + ); + } + + updated_entries + }; + + let updated_parent = { + load_block_entry(store, &parent_hash)?.map(|mut e| { + e.children.push(entry.block_hash); + (block_entry_key(&parent_hash), e.encode()) + }) + }; + + let write_block_entry = (block_entry_key(&entry.block_hash), entry.encode()); + + // write: + // - new block range + // - updated blocks-at item + // - fresh and updated candidate entries + // - the parent block entry. + // - the block entry itself + + // Unfortunately have to collect because aux-store demands &(&[u8], &[u8]). + let all_keys_and_values: Vec<_> = new_block_range.as_ref().into_iter() + .map(|&(ref k, ref v)| (&k[..], &v[..])) + .chain(std::iter::once((&updated_blocks_at.0[..], &updated_blocks_at.1[..]))) + .chain(candidate_entry_updates.iter().map(|&(ref k, ref v)| (&k[..], &v[..]))) + .chain(std::iter::once((&write_block_entry.0[..], &write_block_entry.1[..]))) + .chain(updated_parent.as_ref().into_iter().map(|&(ref k, ref v)| (&k[..], &v[..]))) + .collect(); + + store.insert_aux(&all_keys_and_values, &[])?; + + Ok(()) +} + +/// Load the stored-blocks key from the state. +pub(crate) fn load_stored_blocks(store: &impl AuxStore) + -> sp_blockchain::Result> +{ + load_decode(store, STORED_BLOCKS_KEY) +} + +/// Load a blocks-at-height entry for a given block number. +pub(crate) fn load_blocks_at_height(store: &impl AuxStore, block_number: BlockNumber) + -> sp_blockchain::Result> { + load_decode(store, &blocks_at_height_key(block_number)) + .map(|x| x.unwrap_or_default()) +} + +/// Load a block entry from the aux store. +pub(crate) fn load_block_entry(store: &impl AuxStore, block_hash: &Hash) + -> sp_blockchain::Result> +{ + load_decode(store, &block_entry_key(block_hash)) +} + +/// Load a candidate entry from the aux store. +pub(crate) fn load_candidate_entry(store: &impl AuxStore, candidate_hash: &CandidateHash) + -> sp_blockchain::Result> +{ + load_decode(store, &candidate_entry_key(candidate_hash)) +} + +/// The key a given block entry is stored under. +fn block_entry_key(block_hash: &Hash) -> [u8; 46] { + const BLOCK_ENTRY_PREFIX: [u8; 14] = *b"Approvals_blck"; + + let mut key = [0u8; 14 + 32]; + key[0..14].copy_from_slice(&BLOCK_ENTRY_PREFIX); + key[14..][..32].copy_from_slice(block_hash.as_ref()); + + key +} + +/// The key a given candidate entry is stored under. +fn candidate_entry_key(candidate_hash: &CandidateHash) -> [u8; 46] { + const CANDIDATE_ENTRY_PREFIX: [u8; 14] = *b"Approvals_cand"; + + let mut key = [0u8; 14 + 32]; + key[0..14].copy_from_slice(&CANDIDATE_ENTRY_PREFIX); + key[14..][..32].copy_from_slice(candidate_hash.0.as_ref()); + + key +} + +/// The key a set of block hashes corresponding to a block number is stored under. +fn blocks_at_height_key(block_number: BlockNumber) -> [u8; 16] { + const BLOCKS_AT_HEIGHT_PREFIX: [u8; 12] = *b"Approvals_at"; + + let mut key = [0u8; 12 + 4]; + key[0..12].copy_from_slice(&BLOCKS_AT_HEIGHT_PREFIX); + block_number.using_encoded(|s| key[12..16].copy_from_slice(s)); + + key +} diff --git a/node/core/approval-voting/src/aux_schema/tests.rs b/node/core/approval-voting/src/aux_schema/tests.rs new file mode 100644 index 000000000000..97fd27d597ef --- /dev/null +++ b/node/core/approval-voting/src/aux_schema/tests.rs @@ -0,0 +1,517 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Tests for the aux-schema of approval voting. + +use super::*; +use std::cell::RefCell; +use polkadot_primitives::v1::Id as ParaId; + +#[derive(Default)] +struct TestStore { + inner: RefCell, Vec>>, +} + +impl AuxStore for TestStore { + fn insert_aux<'a, 'b: 'a, 'c: 'a, I, D>(&self, insertions: I, deletions: D) -> sp_blockchain::Result<()> + where I: IntoIterator, D: IntoIterator + { + let mut store = self.inner.borrow_mut(); + + // insertions before deletions. + for (k, v) in insertions { + store.insert(k.to_vec(), v.to_vec()); + } + + for k in deletions { + store.remove(&k[..]); + } + + Ok(()) + } + + fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result>> { + Ok(self.inner.borrow().get(key).map(|v| v.clone())) + } +} + +impl TestStore { + fn write_stored_blocks(&self, range: StoredBlockRange) { + self.inner.borrow_mut().insert( + STORED_BLOCKS_KEY.to_vec(), + range.encode(), + ); + } + + fn write_blocks_at_height(&self, height: BlockNumber, blocks: &[Hash]) { + self.inner.borrow_mut().insert( + blocks_at_height_key(height).to_vec(), + blocks.encode(), + ); + } + + fn write_block_entry(&self, block_hash: &Hash, entry: &BlockEntry) { + self.inner.borrow_mut().insert( + block_entry_key(block_hash).to_vec(), + entry.encode(), + ); + } + + fn write_candidate_entry(&self, candidate_hash: &CandidateHash, entry: &CandidateEntry) { + self.inner.borrow_mut().insert( + candidate_entry_key(candidate_hash).to_vec(), + entry.encode(), + ); + } +} + +fn make_bitvec(len: usize) -> BitVec { + bitvec::bitvec![BitOrderLsb0, u8; 0; len] +} + +fn make_block_entry( + block_hash: Hash, + candidates: Vec<(CoreIndex, CandidateHash)>, +) -> BlockEntry { + BlockEntry { + block_hash, + session: 1, + slot: 1, + relay_vrf_story: RelayVRF([0u8; 32]), + approved_bitfield: make_bitvec(candidates.len()), + candidates, + children: Vec::new(), + } +} + +fn make_candidate(para_id: ParaId, relay_parent: Hash) -> CandidateReceipt { + let mut c = CandidateReceipt::default(); + + c.descriptor.para_id = para_id; + c.descriptor.relay_parent = relay_parent; + + c +} + +#[test] +fn read_write() { + let store = TestStore::default(); + + let hash_a = Hash::repeat_byte(1); + let hash_b = Hash::repeat_byte(2); + let candidate_hash = CandidateHash(Hash::repeat_byte(3)); + + let range = StoredBlockRange(10, 20); + let at_height = vec![hash_a, hash_b]; + + let block_entry = make_block_entry( + hash_a, + vec![(CoreIndex(0), candidate_hash)], + ); + + let candidate_entry = CandidateEntry { + candidate: Default::default(), + session: 5, + block_assignments: vec![ + (hash_a, ApprovalEntry { + tranches: Vec::new(), + backing_group: GroupIndex(1), + next_wakeup: 1000, + our_assignment: None, + assignments: Default::default(), + approved: false, + }) + ].into_iter().collect(), + approvals: Default::default(), + }; + + store.write_stored_blocks(range.clone()); + store.write_blocks_at_height(1, &at_height); + store.write_block_entry(&hash_a, &block_entry); + store.write_candidate_entry(&candidate_hash, &candidate_entry); + + assert_eq!(load_stored_blocks(&store).unwrap(), Some(range)); + assert_eq!(load_blocks_at_height(&store, 1).unwrap(), at_height); + assert_eq!(load_block_entry(&store, &hash_a).unwrap(), Some(block_entry)); + assert_eq!(load_candidate_entry(&store, &candidate_hash).unwrap(), Some(candidate_entry)); + + let delete_keys = vec![ + STORED_BLOCKS_KEY.to_vec(), + blocks_at_height_key(1).to_vec(), + block_entry_key(&hash_a).to_vec(), + candidate_entry_key(&candidate_hash).to_vec(), + ]; + + let delete_keys: Vec<_> = delete_keys.iter().map(|k| &k[..]).collect(); + store.insert_aux(&[], &delete_keys); + + assert!(load_stored_blocks(&store).unwrap().is_none()); + assert!(load_blocks_at_height(&store, 1).unwrap().is_empty()); + assert!(load_block_entry(&store, &hash_a).unwrap().is_none()); + assert!(load_candidate_entry(&store, &candidate_hash).unwrap().is_none()); +} + +#[test] +fn add_block_entry_works() { + let store = TestStore::default(); + + let parent_hash = Hash::repeat_byte(1); + let block_hash_a = Hash::repeat_byte(2); + let block_hash_b = Hash::repeat_byte(69); + + let candidate_hash_a = CandidateHash(Hash::repeat_byte(3)); + let candidate_hash_b = CandidateHash(Hash::repeat_byte(4)); + + let block_entry_a = make_block_entry( + block_hash_a, + vec![(CoreIndex(0), candidate_hash_a)], + ); + + let block_entry_b = make_block_entry( + block_hash_b, + vec![(CoreIndex(0), candidate_hash_a), (CoreIndex(1), candidate_hash_b)], + ); + + let n_validators = 10; + let block_number = 10; + + let mut new_candidate_info = HashMap::new(); + new_candidate_info.insert(candidate_hash_a, NewCandidateInfo { + candidate: make_candidate(1.into(), parent_hash), + backing_group: GroupIndex(0), + our_assignment: None, + }); + + add_block_entry( + &store, + parent_hash, + block_number, + block_entry_a.clone(), + n_validators, + |h| new_candidate_info.get(h).map(|x| x.clone()), + ).unwrap(); + + new_candidate_info.insert(candidate_hash_b, NewCandidateInfo { + candidate: make_candidate(2.into(), parent_hash), + backing_group: GroupIndex(1), + our_assignment: None, + }); + + add_block_entry( + &store, + parent_hash, + block_number, + block_entry_b.clone(), + n_validators, + |h| new_candidate_info.get(h).map(|x| x.clone()), + ).unwrap(); + + assert_eq!(load_block_entry(&store, &block_hash_a).unwrap(), Some(block_entry_a)); + assert_eq!(load_block_entry(&store, &block_hash_b).unwrap(), Some(block_entry_b)); + + let candidate_entry_a = load_candidate_entry(&store, &candidate_hash_a).unwrap().unwrap(); + assert_eq!(candidate_entry_a.block_assignments.keys().collect::>(), vec![&block_hash_a, &block_hash_b]); + + let candidate_entry_b = load_candidate_entry(&store, &candidate_hash_b).unwrap().unwrap(); + assert_eq!(candidate_entry_b.block_assignments.keys().collect::>(), vec![&block_hash_b]); +} + +#[test] +fn add_block_entry_adds_child() { + let store = TestStore::default(); + + let parent_hash = Hash::repeat_byte(1); + let block_hash_a = Hash::repeat_byte(2); + let block_hash_b = Hash::repeat_byte(69); + + let mut block_entry_a = make_block_entry( + block_hash_a, + Vec::new(), + ); + + let block_entry_b = make_block_entry( + block_hash_b, + Vec::new(), + ); + + let n_validators = 10; + + add_block_entry( + &store, + parent_hash, + 1, + block_entry_a.clone(), + n_validators, + |_| None, + ).unwrap(); + + add_block_entry( + &store, + block_hash_a, + 2, + block_entry_b.clone(), + n_validators, + |_| None, + ).unwrap(); + + block_entry_a.children.push(block_hash_b); + + assert_eq!(load_block_entry(&store, &block_hash_a).unwrap(), Some(block_entry_a)); + assert_eq!(load_block_entry(&store, &block_hash_b).unwrap(), Some(block_entry_b)); +} + +#[test] +fn clear_works() { + let store = TestStore::default(); + + let hash_a = Hash::repeat_byte(1); + let hash_b = Hash::repeat_byte(2); + let candidate_hash = CandidateHash(Hash::repeat_byte(3)); + + let range = StoredBlockRange(0, 5); + let at_height = vec![hash_a, hash_b]; + + let block_entry = make_block_entry( + hash_a, + vec![(CoreIndex(0), candidate_hash)], + ); + + let candidate_entry = CandidateEntry { + candidate: Default::default(), + session: 5, + block_assignments: vec![ + (hash_a, ApprovalEntry { + tranches: Vec::new(), + backing_group: GroupIndex(1), + next_wakeup: 1000, + our_assignment: None, + assignments: Default::default(), + approved: false, + }) + ].into_iter().collect(), + approvals: Default::default(), + }; + + store.write_stored_blocks(range.clone()); + store.write_blocks_at_height(1, &at_height); + store.write_block_entry(&hash_a, &block_entry); + store.write_candidate_entry(&candidate_hash, &candidate_entry); + + assert_eq!(load_stored_blocks(&store).unwrap(), Some(range)); + assert_eq!(load_blocks_at_height(&store, 1).unwrap(), at_height); + assert_eq!(load_block_entry(&store, &hash_a).unwrap(), Some(block_entry)); + assert_eq!(load_candidate_entry(&store, &candidate_hash).unwrap(), Some(candidate_entry)); + + clear(&store).unwrap(); + + assert!(load_stored_blocks(&store).unwrap().is_none()); + assert!(load_blocks_at_height(&store, 1).unwrap().is_empty()); + assert!(load_block_entry(&store, &hash_a).unwrap().is_none()); + assert!(load_candidate_entry(&store, &candidate_hash).unwrap().is_none()); +} + + +#[test] +fn canonicalize_works() { + let store = TestStore::default(); + + // -> B1 -> C1 -> D1 + // A -> B2 -> C2 -> D2 + // + // We'll canonicalize C1. Everything except D1 should disappear. + // + // Candidates: + // Cand1 in B2 + // Cand2 in C2 + // Cand3 in C2 and D1 + // Cand4 in D1 + // Cand5 in D2 + // Only Cand3 and Cand4 should remain after canonicalize. + + let n_validators = 10; + + store.write_stored_blocks(StoredBlockRange(1, 5)); + + let genesis = Hash::repeat_byte(0); + + let block_hash_a = Hash::repeat_byte(1); + let block_hash_b1 = Hash::repeat_byte(2); + let block_hash_b2 = Hash::repeat_byte(3); + let block_hash_c1 = Hash::repeat_byte(4); + let block_hash_c2 = Hash::repeat_byte(5); + let block_hash_d1 = Hash::repeat_byte(6); + let block_hash_d2 = Hash::repeat_byte(7); + + let cand_hash_1 = CandidateHash(Hash::repeat_byte(10)); + let cand_hash_2 = CandidateHash(Hash::repeat_byte(11)); + let cand_hash_3 = CandidateHash(Hash::repeat_byte(12)); + let cand_hash_4 = CandidateHash(Hash::repeat_byte(13)); + let cand_hash_5 = CandidateHash(Hash::repeat_byte(15)); + + let block_entry_a = make_block_entry(block_hash_a, Vec::new()); + let block_entry_b1 = make_block_entry(block_hash_b1, Vec::new()); + let block_entry_b2 = make_block_entry(block_hash_b2, vec![(CoreIndex(0), cand_hash_1)]); + let block_entry_c1 = make_block_entry(block_hash_c1, Vec::new()); + let block_entry_c2 = make_block_entry( + block_hash_c2, + vec![(CoreIndex(0), cand_hash_2), (CoreIndex(1), cand_hash_3)], + ); + let block_entry_d1 = make_block_entry( + block_hash_d1, + vec![(CoreIndex(0), cand_hash_3), (CoreIndex(1), cand_hash_4)], + ); + let block_entry_d2 = make_block_entry(block_hash_d2, vec![(CoreIndex(0), cand_hash_5)]); + + + let candidate_info = { + let mut candidate_info = HashMap::new(); + candidate_info.insert(cand_hash_1, NewCandidateInfo { + candidate: make_candidate(1.into(), genesis), + backing_group: GroupIndex(1), + our_assignment: None, + }); + + candidate_info.insert(cand_hash_2, NewCandidateInfo { + candidate: make_candidate(2.into(), block_hash_a), + backing_group: GroupIndex(2), + our_assignment: None, + }); + + candidate_info.insert(cand_hash_3, NewCandidateInfo { + candidate: make_candidate(3.into(), block_hash_a), + backing_group: GroupIndex(3), + our_assignment: None, + }); + + candidate_info.insert(cand_hash_4, NewCandidateInfo { + candidate: make_candidate(4.into(), block_hash_b1), + backing_group: GroupIndex(4), + our_assignment: None, + }); + + candidate_info.insert(cand_hash_5, NewCandidateInfo { + candidate: make_candidate(5.into(), block_hash_c1), + backing_group: GroupIndex(5), + our_assignment: None, + }); + + candidate_info + }; + + // now insert all the blocks. + let blocks = vec![ + (genesis, 1, block_entry_a.clone()), + (block_hash_a, 2, block_entry_b1.clone()), + (block_hash_a, 2, block_entry_b2.clone()), + (block_hash_b1, 3, block_entry_c1.clone()), + (block_hash_b2, 3, block_entry_c2.clone()), + (block_hash_c1, 4, block_entry_d1.clone()), + (block_hash_c2, 4, block_entry_d2.clone()), + ]; + + for (parent_hash, number, block_entry) in blocks { + add_block_entry( + &store, + parent_hash, + number, + block_entry, + n_validators, + |h| candidate_info.get(h).map(|x| x.clone()), + ).unwrap(); + } + + let check_candidates_in_store = |expected: Vec<(CandidateHash, Option>)>| { + for (c_hash, in_blocks) in expected { + let (entry, in_blocks) = match in_blocks { + None => { + assert!(load_candidate_entry(&store, &c_hash).unwrap().is_none()); + continue + } + Some(i) => ( + load_candidate_entry(&store, &c_hash).unwrap().unwrap(), + i, + ), + }; + + assert_eq!(entry.block_assignments.len(), in_blocks.len()); + + for x in in_blocks { + assert!(entry.block_assignments.contains_key(&x)); + } + } + }; + + let check_blocks_in_store = |expected: Vec<(Hash, Option>)>| { + for (hash, with_candidates) in expected { + let (entry, with_candidates) = match with_candidates { + None => { + assert!(load_block_entry(&store, &hash).unwrap().is_none()); + continue + } + Some(i) => ( + load_block_entry(&store, &hash).unwrap().unwrap(), + i, + ), + }; + + assert_eq!(entry.candidates.len(), with_candidates.len()); + + for x in with_candidates { + assert!(entry.candidates.iter().position(|&(_, ref c)| c == &x).is_some()); + } + } + }; + + check_candidates_in_store(vec![ + (cand_hash_1, Some(vec![block_hash_b2])), + (cand_hash_2, Some(vec![block_hash_c2])), + (cand_hash_3, Some(vec![block_hash_c2, block_hash_d1])), + (cand_hash_4, Some(vec![block_hash_d1])), + (cand_hash_5, Some(vec![block_hash_d2])), + ]); + + check_blocks_in_store(vec![ + (block_hash_a, Some(vec![])), + (block_hash_b1, Some(vec![])), + (block_hash_b2, Some(vec![cand_hash_1])), + (block_hash_c1, Some(vec![])), + (block_hash_c2, Some(vec![cand_hash_2, cand_hash_3])), + (block_hash_d1, Some(vec![cand_hash_3, cand_hash_4])), + (block_hash_d2, Some(vec![cand_hash_5])), + ]); + + canonicalize(&store, 3, block_hash_c1).unwrap(); + + assert_eq!(load_stored_blocks(&store).unwrap().unwrap(), StoredBlockRange(4, 5)); + + check_candidates_in_store(vec![ + (cand_hash_1, None), + (cand_hash_2, None), + (cand_hash_3, Some(vec![block_hash_d1])), + (cand_hash_4, Some(vec![block_hash_d1])), + (cand_hash_5, None), + ]); + + check_blocks_in_store(vec![ + (block_hash_a, None), + (block_hash_b1, None), + (block_hash_b2, None), + (block_hash_c1, None), + (block_hash_c2, None), + (block_hash_d1, Some(vec![cand_hash_3, cand_hash_4])), + (block_hash_d2, None), + ]); +} diff --git a/node/core/approval-voting/src/lib.rs b/node/core/approval-voting/src/lib.rs new file mode 100644 index 000000000000..270afee617a7 --- /dev/null +++ b/node/core/approval-voting/src/lib.rs @@ -0,0 +1,27 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! The Approval Voting Subsystem. +//! +//! This subsystem is responsible for determining candidates to do approval checks +//! on, performing those approval checks, and tracking the assignments and approvals +//! of others. It uses this information to determine when candidates and blocks have +//! been sufficiently approved to finalize. + +mod aux_schema; + +/// A base unit of time, starting from the unix epoch, split into half-second intervals. +type Tick = u64; diff --git a/node/primitives/src/approval.rs b/node/primitives/src/approval.rs index 32b4e5af7051..2783cb660202 100644 --- a/node/primitives/src/approval.rs +++ b/node/primitives/src/approval.rs @@ -35,7 +35,7 @@ pub const RELAY_VRF_DELAY_CONTEXT: &str = "A&V TRANCHE"; /// random bytes derived from the VRF submitted within the block by the /// block author as a credential and used as input to approval assignment criteria. -#[derive(Debug, Clone, Encode, Decode)] +#[derive(Debug, Clone, Encode, Decode, PartialEq)] pub struct RelayVRF(pub [u8; 32]); /// Different kinds of input data or criteria that can prove a validator's assignment diff --git a/roadmap/implementers-guide/src/node/approval/approval-voting.md b/roadmap/implementers-guide/src/node/approval/approval-voting.md index 7f96f9672162..1c673f0324ef 100644 --- a/roadmap/implementers-guide/src/node/approval/approval-voting.md +++ b/roadmap/implementers-guide/src/node/approval/approval-voting.md @@ -82,7 +82,6 @@ struct BlockEntry { // The i'th bit is `true` iff the candidate has been approved in the context of // this block. The block can be considered approved has all bits set to 1 approved_bitfield: Bitfield, - rotation_offset: GroupIndex, children: Vec, } @@ -143,7 +142,7 @@ Main loop: #### `OverseerSignal::BlockFinalized` -On receiving an `OverseerSignal::BlockFinalized(h)`, we fetch the block number `b` of that block from the ChainApi subsystem. We update our `StoredBlockRange` to begin at `b+1`. Additionally, we remove all block entries and candidates referenced by them up to and including `b`. Lastly, we prune out all descendents of `h` transitively: when we remove a `BlockEntry` with number `b` that is not equal to `h`, we recursively delete all the `BlockEntry`s referenced as children. We remove the `block_assignments` entry for the block hash and if `block_assignments` is now empty, remove the `CandidateEntry`. +On receiving an `OverseerSignal::BlockFinalized(h)`, we fetch the block number `b` of that block from the ChainApi subsystem. We update our `StoredBlockRange` to begin at `b+1`. Additionally, we remove all block entries and candidates referenced by them up to and including `b`. Lastly, we prune out all descendents of `h` transitively: when we remove a `BlockEntry` with number `b` that is not equal to `h`, we recursively delete all the `BlockEntry`s referenced as children. We remove the `block_assignments` entry for the block hash and if `block_assignments` is now empty, remove the `CandidateEntry`. We also update each of the `BlockNumber -> Vec` keys in the database to reflect the blocks at that height, clearing if empty. #### `OverseerSignal::ActiveLeavesUpdate`