diff --git a/Cargo.lock b/Cargo.lock index c852d480edd5d..793f1179918de 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3969,6 +3969,7 @@ dependencies = [ "sr-primitives 2.0.0", "sr-staking-primitives 2.0.0", "sr-std 2.0.0", + "sr-version 2.0.0", "srml-session 2.0.0", "srml-support 2.0.0", "srml-system 2.0.0", @@ -3976,6 +3977,7 @@ dependencies = [ "substrate-consensus-babe-primitives 2.0.0", "substrate-inherents 2.0.0", "substrate-primitives 2.0.0", + "substrate-test-runtime 2.0.0", ] [[package]] diff --git a/core/client/src/client.rs b/core/client/src/client.rs index acba5fa824aab..5203c72303551 100644 --- a/core/client/src/client.rs +++ b/core/client/src/client.rs @@ -1482,6 +1482,9 @@ impl CallRuntimeAt for Client where } } +/// NOTE: only use this implementation when you are sure there are NO consensus-level BlockImport +/// objects. Otherwise, importing blocks directly into the client would be bypassing +/// important verification work. impl<'a, B, E, Block, RA> consensus::BlockImport for &'a Client where B: backend::Backend, E: CallExecutor + Clone + Send + Sync, @@ -1491,6 +1494,13 @@ impl<'a, B, E, Block, RA> consensus::BlockImport for &'a Client, @@ -1899,8 +1909,9 @@ where /// Utility methods for the client. pub mod utils { use super::*; - use crate::{backend::Backend, blockchain, error}; + use crate::{blockchain, error}; use primitives::H256; + use std::borrow::Borrow; /// Returns a function for checking block ancestry, the returned function will /// return `true` if the given hash (second parameter) is a descendent of the @@ -1908,16 +1919,17 @@ pub mod utils { /// represent the current block `hash` and its `parent hash`, if given the /// function that's returned will assume that `hash` isn't part of the local DB /// yet, and all searches in the DB will instead reference the parent. - pub fn is_descendent_of<'a, B, E, Block: BlockT, RA>( - client: &'a Client, - current: Option<(&'a H256, &'a H256)>, + pub fn is_descendent_of<'a, Block: BlockT, T, H: Borrow + 'a>( + client: &'a T, + current: Option<(H, H)>, ) -> impl Fn(&H256, &H256) -> Result + 'a - where B: Backend, - E: CallExecutor + Send + Sync, + where T: ChainHeaderBackend, { move |base, hash| { if base == hash { return Ok(false); } + let current = current.as_ref().map(|(c, p)| (c.borrow(), p.borrow())); + let mut hash = hash; if let Some((current_hash, current_parent_hash)) = current { if base == current_hash { return Ok(false); } @@ -1931,7 +1943,7 @@ pub mod utils { } let tree_route = blockchain::tree_route( - |id| client.header(&id)?.ok_or_else(|| Error::UnknownBlock(format!("{:?}", id))), + |id| client.header(id)?.ok_or_else(|| Error::UnknownBlock(format!("{:?}", id))), BlockId::Hash(*hash), BlockId::Hash(*base), )?; diff --git a/core/consensus/aura/src/lib.rs b/core/consensus/aura/src/lib.rs index 3c60128dd39e2..a61eb6da7214e 100644 --- a/core/consensus/aura/src/lib.rs +++ b/core/consensus/aura/src/lib.rs @@ -218,8 +218,8 @@ impl slots::SimpleSlotWorker for AuraWorker Result { - authorities(self.client.as_ref(), &BlockId::Hash(*block)) + fn epoch_data(&self, header: &B::Header, _slot_number: u64) -> Result { + authorities(self.client.as_ref(), &BlockId::Hash(header.hash())) } fn authorities_len(&self, epoch_data: &Self::EpochData) -> usize { @@ -741,7 +741,7 @@ mod tests { } } - fn make_verifier(&self, client: PeersClient, _cfg: &ProtocolConfig) + fn make_verifier(&self, client: PeersClient, _cfg: &ProtocolConfig, _peer_data: &()) -> Self::Verifier { match client { diff --git a/core/consensus/babe/primitives/src/digest.rs b/core/consensus/babe/primitives/src/digest.rs index 427c4fec57e7d..3f275ecdb6341 100644 --- a/core/consensus/babe/primitives/src/digest.rs +++ b/core/consensus/babe/primitives/src/digest.rs @@ -17,12 +17,10 @@ //! Private implementation details of BABE digests. #[cfg(feature = "std")] -use super::AuthoritySignature; -#[cfg(feature = "std")] -use super::{BABE_ENGINE_ID, Epoch}; +use super::{BABE_ENGINE_ID, AuthoritySignature}; #[cfg(not(feature = "std"))] use super::{VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH}; -use super::{AuthorityIndex, BabeBlockWeight, SlotNumber}; +use super::{AuthorityId, AuthorityIndex, SlotNumber, BabeAuthorityWeight}; #[cfg(feature = "std")] use sr_primitives::{DigestItem, generic::OpaqueDigestItemId}; #[cfg(feature = "std")] @@ -35,6 +33,8 @@ use schnorrkel::{ SignatureError, errors::MultiSignatureStage, vrf::{VRFProof, VRFOutput, VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH} }; +use rstd::vec::Vec; + /// A BABE pre-runtime digest. This contains all data required to validate a /// block and for the BABE runtime module. Slots can be assigned to a primary @@ -52,8 +52,6 @@ pub enum BabePreDigest { authority_index: super::AuthorityIndex, /// Slot number slot_number: SlotNumber, - /// Chain weight (measured in number of Primary blocks) - weight: BabeBlockWeight, }, /// A secondary deterministic slot assignment. Secondary { @@ -61,8 +59,6 @@ pub enum BabePreDigest { authority_index: super::AuthorityIndex, /// Slot number slot_number: SlotNumber, - /// Chain weight (measured in number of Primary blocks) - weight: BabeBlockWeight, }, } @@ -84,11 +80,12 @@ impl BabePreDigest { } } - /// Returns the weight of the pre digest. - pub fn weight(&self) -> BabeBlockWeight { + /// Returns the weight _added_ by this digest, not the cumulative weight + /// of the chain. + pub fn added_weight(&self) -> crate::BabeBlockWeight { match self { - BabePreDigest::Primary { weight, .. } => *weight, - BabePreDigest::Secondary { weight, .. } => *weight, + BabePreDigest::Primary { .. } => 1, + BabePreDigest::Secondary { .. } => 0, } } } @@ -100,26 +97,29 @@ pub const BABE_VRF_PREFIX: &'static [u8] = b"substrate-babe-vrf"; #[derive(Copy, Clone, Encode, Decode)] pub enum RawBabePreDigest { /// A primary VRF-based slot assignment. + #[codec(index = "1")] Primary { /// Authority index authority_index: AuthorityIndex, /// Slot number slot_number: SlotNumber, - /// Chain weight (measured in number of Primary blocks) - weight: BabeBlockWeight, /// VRF output vrf_output: [u8; VRF_OUTPUT_LENGTH], /// VRF proof vrf_proof: [u8; VRF_PROOF_LENGTH], }, /// A secondary deterministic slot assignment. + #[codec(index = "2")] Secondary { /// Authority index + /// + /// This is not strictly-speaking necessary, since the secondary slots + /// are assigned based on slot number and epoch randomness. But including + /// it makes things easier for higher-level users of the chain data to + /// be aware of the author of a secondary-slot block. authority_index: AuthorityIndex, /// Slot number slot_number: SlotNumber, - /// Chain weight (measured in number of Primary blocks) - weight: BabeBlockWeight, }, } @@ -142,25 +142,21 @@ impl Encode for BabePreDigest { vrf_proof, authority_index, slot_number, - weight, } => { RawBabePreDigest::Primary { vrf_output: *vrf_output.as_bytes(), vrf_proof: vrf_proof.to_bytes(), authority_index: *authority_index, slot_number: *slot_number, - weight: *weight, } }, BabePreDigest::Secondary { authority_index, slot_number, - weight, } => { RawBabePreDigest::Secondary { authority_index: *authority_index, slot_number: *slot_number, - weight: *weight, } }, }; @@ -176,7 +172,7 @@ impl codec::EncodeLike for BabePreDigest {} impl Decode for BabePreDigest { fn decode(i: &mut R) -> Result { let pre_digest = match Decode::decode(i)? { - RawBabePreDigest::Primary { vrf_output, vrf_proof, authority_index, slot_number, weight } => { + RawBabePreDigest::Primary { vrf_output, vrf_proof, authority_index, slot_number } => { // Verify (at compile time) that the sizes in babe_primitives are correct let _: [u8; super::VRF_OUTPUT_LENGTH] = vrf_output; let _: [u8; super::VRF_PROOF_LENGTH] = vrf_proof; @@ -186,11 +182,10 @@ impl Decode for BabePreDigest { vrf_output: VRFOutput::from_bytes(&vrf_output).map_err(convert_error)?, authority_index, slot_number, - weight, } }, - RawBabePreDigest::Secondary { authority_index, slot_number, weight } => { - BabePreDigest::Secondary { authority_index, slot_number, weight } + RawBabePreDigest::Secondary { authority_index, slot_number } => { + BabePreDigest::Secondary { authority_index, slot_number } }, }; @@ -198,6 +193,18 @@ impl Decode for BabePreDigest { } } +/// Information about the next epoch. This is broadcast in the first block +/// of the epoch. +#[derive(Decode, Encode, Default, PartialEq, Eq, Clone)] +#[cfg_attr(any(feature = "std", test), derive(Debug))] +pub struct NextEpochDescriptor { + /// The authorities. + pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, + + /// The value of randomness to use for the slot-assignment. + pub randomness: [u8; VRF_OUTPUT_LENGTH], +} + /// A digest item which is usable with BABE consensus. #[cfg(feature = "std")] pub trait CompatibleDigestItem: Sized { @@ -214,7 +221,7 @@ pub trait CompatibleDigestItem: Sized { fn as_babe_seal(&self) -> Option; /// If this item is a BABE epoch, return it. - fn as_babe_epoch(&self) -> Option; + fn as_next_epoch_descriptor(&self) -> Option; } #[cfg(feature = "std")] @@ -237,8 +244,12 @@ impl CompatibleDigestItem for DigestItem where self.try_to(OpaqueDigestItemId::Seal(&BABE_ENGINE_ID)) } - fn as_babe_epoch(&self) -> Option { + fn as_next_epoch_descriptor(&self) -> Option { self.try_to(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID)) + .and_then(|x: super::ConsensusLog| match x { + super::ConsensusLog::NextEpochData(n) => Some(n), + _ => None, + }) } } diff --git a/core/consensus/babe/primitives/src/lib.rs b/core/consensus/babe/primitives/src/lib.rs index 09ac2f20123ac..1293b7c8baa00 100644 --- a/core/consensus/babe/primitives/src/lib.rs +++ b/core/consensus/babe/primitives/src/lib.rs @@ -28,7 +28,7 @@ use substrate_client::decl_runtime_apis; #[cfg(feature = "std")] pub use digest::{BabePreDigest, CompatibleDigestItem}; -pub use digest::{BABE_VRF_PREFIX, RawBabePreDigest}; +pub use digest::{BABE_VRF_PREFIX, RawBabePreDigest, NextEpochDescriptor}; mod app { use app_crypto::{app_crypto, key_types::BABE, sr25519}; @@ -59,6 +59,11 @@ pub const VRF_PROOF_LENGTH: usize = 64; /// The length of the public key pub const PUBLIC_KEY_LENGTH: usize = 32; +/// How many blocks to wait before running the median algorithm for relative time +/// This will not vary from chain to chain as it is not dependent on slot duration +/// or epoch length. +pub const MEDIAN_ALGORITHM_CARDINALITY: usize = 1200; // arbitrary suggestion by w3f-research. + /// The index of an authority. pub type AuthorityIndex = u32; @@ -80,33 +85,50 @@ pub struct Epoch { /// The epoch index pub epoch_index: u64, /// The starting slot of the epoch, - pub start_slot: u64, + pub start_slot: SlotNumber, /// The duration of this epoch pub duration: SlotNumber, /// The authorities and their weights pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, /// Randomness for this epoch pub randomness: [u8; VRF_OUTPUT_LENGTH], - /// Whether secondary slot assignments should be used during the epoch. - pub secondary_slots: bool, +} + +impl Epoch { + /// "increment" the epoch, with given descriptor for the next. + pub fn increment(&self, descriptor: NextEpochDescriptor) -> Epoch { + Epoch { + epoch_index: self.epoch_index + 1, + start_slot: self.start_slot + self.duration, + duration: self.duration, + authorities: descriptor.authorities, + randomness: descriptor.randomness, + } + } + + /// Produce the "end slot" of the epoch. This is NOT inclusive to the epoch, + // i.e. the slots covered by the epoch are `self.start_slot .. self.end_slot()`. + pub fn end_slot(&self) -> SlotNumber { + self.start_slot + self.duration + } } /// An consensus log item for BABE. #[derive(Decode, Encode, Clone, PartialEq, Eq)] pub enum ConsensusLog { - /// The epoch has changed. This provides information about the - /// epoch _after_ next: what slot number it will start at, who are the authorities (and their weights) - /// and the next epoch randomness. The information for the _next_ epoch should already - /// be available. + /// The epoch has changed. This provides information about the _next_ + /// epoch - information about the _current_ epoch (i.e. the one we've just + /// entered) should already be available earlier in the chain. #[codec(index = "1")] - NextEpochData(Epoch), + NextEpochData(NextEpochDescriptor), /// Disable the authority with given index. #[codec(index = "2")] OnDisabled(AuthorityIndex), } /// Configuration data used by the BABE consensus engine. -#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug, Encode, Decode)] +#[derive(Clone, PartialEq, Eq, Encode, Decode)] +#[cfg_attr(any(feature = "std", test), derive(Debug))] pub struct BabeConfiguration { /// The slot duration in milliseconds for BABE. Currently, only /// the value provided by this type at genesis will be used. @@ -114,35 +136,35 @@ pub struct BabeConfiguration { /// Dynamic slot duration may be supported in the future. pub slot_duration: u64, + /// The duration of epochs in slots. + pub epoch_length: SlotNumber, + /// A constant value that is used in the threshold calculation formula. - /// Expressed as a fraction where the first member of the tuple is the - /// numerator and the second is the denominator. The fraction should + /// Expressed as a rational where the first member of the tuple is the + /// numerator and the second is the denominator. The rational should /// represent a value between 0 and 1. /// In the threshold formula calculation, `1 - c` represents the probability /// of a slot being empty. pub c: (u64, u64), - /// The minimum number of blocks that must be received before running the - /// median algorithm to compute the offset between the on-chain time and the - /// local time. Currently, only the value provided by this type at genesis - /// will be used, but this is subject to change. - /// - /// Blocks less than `self.median_required_blocks` must be generated by an - /// *initial validator* ― that is, a node that was a validator at genesis. - pub median_required_blocks: u64, + /// The authorities for the genesis epoch. + pub genesis_authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, + + /// The randomness for the genesis epoch. + pub randomness: [u8; VRF_OUTPUT_LENGTH], + + /// Whether this chain should run with secondary slots, which are assigned + /// in round-robin manner. + pub secondary_slots: bool, } #[cfg(feature = "std")] impl slots::SlotData for BabeConfiguration { - /// Return the slot duration in milliseconds for BABE. Currently, only - /// the value provided by this type at genesis will be used. - /// - /// Dynamic slot duration may be supported in the future. fn slot_duration(&self) -> u64 { self.slot_duration } - const SLOT_KEY: &'static [u8] = b"babe_bootstrap_data"; + const SLOT_KEY: &'static [u8] = b"babe_configuration"; } decl_runtime_apis! { @@ -152,9 +174,6 @@ decl_runtime_apis! { /// only the value provided by this type at genesis will be used. /// /// Dynamic configuration may be supported in the future. - fn startup_data() -> BabeConfiguration; - - /// Get the current epoch data for Babe. - fn epoch() -> Epoch; + fn configuration() -> BabeConfiguration; } } diff --git a/core/consensus/babe/src/aux_schema.rs b/core/consensus/babe/src/aux_schema.rs index ac90b4ce52dc4..67f61050fa31a 100644 --- a/core/consensus/babe/src/aux_schema.rs +++ b/core/consensus/babe/src/aux_schema.rs @@ -22,11 +22,16 @@ use codec::{Decode, Encode}; use client::backend::AuxStore; use client::error::{Result as ClientResult, Error as ClientError}; use sr_primitives::traits::Block as BlockT; +use babe_primitives::BabeBlockWeight; -use super::{EpochChanges, SharedEpochChanges}; +use super::{epoch_changes::EpochChangesFor, SharedEpochChanges}; const BABE_EPOCH_CHANGES: &[u8] = b"babe_epoch_changes"; +fn block_weight_key(block_hash: H) -> Vec { + (b"block_weight", block_hash).encode() +} + fn load_decode(backend: &B, key: &[u8]) -> ClientResult> where B: AuxStore, @@ -45,7 +50,7 @@ fn load_decode(backend: &B, key: &[u8]) -> ClientResult> pub(crate) fn load_epoch_changes( backend: &B, ) -> ClientResult> { - let epoch_changes = load_decode::<_, EpochChanges>(backend, BABE_EPOCH_CHANGES)? + let epoch_changes = load_decode::<_, EpochChangesFor>(backend, BABE_EPOCH_CHANGES)? .map(Into::into) .unwrap_or_else(|| { info!(target: "babe", @@ -59,7 +64,7 @@ pub(crate) fn load_epoch_changes( /// Update the epoch changes on disk after a change. pub(crate) fn write_epoch_changes( - epoch_changes: &EpochChanges, + epoch_changes: &EpochChangesFor, write_aux: F, ) -> R where F: FnOnce(&[(&'static [u8], &[u8])]) -> R, @@ -69,3 +74,28 @@ pub(crate) fn write_epoch_changes( &[(BABE_EPOCH_CHANGES, encoded_epoch_changes.as_slice())], ) } + +/// Write the cumulative chain-weight of a block ot aux storage. +pub(crate) fn write_block_weight( + block_hash: H, + block_weight: &BabeBlockWeight, + write_aux: F, +) -> R where + F: FnOnce(&[(Vec, &[u8])]) -> R, +{ + + let key = block_weight_key(block_hash); + block_weight.using_encoded(|s| + write_aux( + &[(key, s)], + ) + ) +} + +/// Load the cumulative chain-weight associated with a block. +pub(crate) fn load_block_weight( + backend: &B, + block_hash: H, +) -> ClientResult> { + load_decode(backend, block_weight_key(block_hash).as_slice()) +} diff --git a/core/consensus/babe/src/epoch_changes.rs b/core/consensus/babe/src/epoch_changes.rs new file mode 100644 index 0000000000000..0dbad245e644e --- /dev/null +++ b/core/consensus/babe/src/epoch_changes.rs @@ -0,0 +1,637 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Handling epoch changes in BABE. +//! +//! This exposes the `SharedEpochChanges`, which is a wrapper around a +//! persistent DAG superimposed over the forks of the blockchain. + +use std::sync::Arc; +use babe_primitives::{Epoch, SlotNumber, NextEpochDescriptor}; +use fork_tree::ForkTree; +use parking_lot::{Mutex, MutexGuard}; +use sr_primitives::traits::{Block as BlockT, NumberFor, One, Zero}; +use codec::{Encode, Decode}; +use client::error::Error as ClientError; +use client::utils as client_utils; +use client::blockchain::HeaderBackend; +use primitives::H256; +use std::ops::Add; + +/// A builder for `is_descendent_of` functions. +pub trait IsDescendentOfBuilder { + /// The error returned by the function. + type Error: std::error::Error; + /// A function that can tell you if the second parameter is a descendent of + /// the first. + type IsDescendentOf: Fn(&Hash, &Hash) -> Result; + + /// Build an `is_descendent_of` function. + /// + /// The `current` parameter can be `Some` with the details a fresh block whose + /// details aren't yet stored, but its parent is. + /// + /// The format of `current` when `Some` is `(current, current_parent)`. + fn build_is_descendent_of(&self, current: Option<(Hash, Hash)>) + -> Self::IsDescendentOf; +} + +/// Produce a descendent query object given the client. +pub(crate) fn descendent_query(client: &H) -> HeaderBackendDescendentBuilder<&H, Block> { + HeaderBackendDescendentBuilder(client, std::marker::PhantomData) +} + +/// Wrapper to get around unconstrained type errors when implementing +/// `IsDescendentOfBuilder` for header backends. +pub(crate) struct HeaderBackendDescendentBuilder(H, std::marker::PhantomData); + +// TODO: relying on Hash = H256 is awful. +// https://github.com/paritytech/substrate/issues/3624 +impl<'a, H, Block> IsDescendentOfBuilder + for HeaderBackendDescendentBuilder<&'a H, Block> where + H: HeaderBackend, + Block: BlockT, +{ + type Error = ClientError; + type IsDescendentOf = Box Result + 'a>; + + fn build_is_descendent_of(&self, current: Option<(H256, H256)>) + -> Self::IsDescendentOf + { + Box::new(client_utils::is_descendent_of(self.0, current)) + } +} + +/// An unimported genesis epoch. +pub struct UnimportedGenesis(Epoch); + +/// The viable epoch under which a block can be verified. +/// +/// If this is the first non-genesis block in the chain, then it will +/// hold an `UnimportedGenesis` epoch. +pub enum ViableEpoch { + Genesis(UnimportedGenesis), + Regular(Epoch), +} + +impl From for ViableEpoch { + fn from(epoch: Epoch) -> ViableEpoch { + ViableEpoch::Regular(epoch) + } +} + +impl AsRef for ViableEpoch { + fn as_ref(&self) -> &Epoch { + match *self { + ViableEpoch::Genesis(UnimportedGenesis(ref e)) => e, + ViableEpoch::Regular(ref e) => e, + } + } +} + +impl ViableEpoch { + /// Extract the underlying epoch, disregarding the fact that a genesis + /// epoch may be unimported. + pub fn into_inner(self) -> Epoch { + match self { + ViableEpoch::Genesis(UnimportedGenesis(e)) => e, + ViableEpoch::Regular(e) => e, + } + } + + /// Increment the epoch, yielding an `IncrementedEpoch` to be imported + /// into the fork-tree. + pub fn increment(&self, next_descriptor: NextEpochDescriptor) -> IncrementedEpoch { + let next = self.as_ref().increment(next_descriptor); + let to_persist = match *self { + ViableEpoch::Genesis(UnimportedGenesis(ref epoch_0)) => + PersistedEpoch::Genesis(epoch_0.clone(), next), + ViableEpoch::Regular(_) => PersistedEpoch::Regular(next), + }; + + IncrementedEpoch(to_persist) + } +} + +/// The datatype encoded on disk. +// This really shouldn't be public, but the encode/decode derives force it to be. +#[derive(Clone, Encode, Decode)] +pub enum PersistedEpoch { + // epoch_0, epoch_1, + Genesis(Epoch, Epoch), + // epoch_n + Regular(Epoch), +} + +/// A fresh, incremented epoch to import into the underlying fork-tree. +/// +/// Create this with `ViableEpoch::increment`. +#[must_use = "Freshly-incremented epoch must be imported with `EpochChanges::import`"] +pub struct IncrementedEpoch(PersistedEpoch); + +impl AsRef for IncrementedEpoch { + fn as_ref(&self) -> &Epoch { + match self.0 { + PersistedEpoch::Genesis(_, ref epoch_1) => epoch_1, + PersistedEpoch::Regular(ref epoch_n) => epoch_n, + } + } +} + +/// Tree of all epoch changes across all *seen* forks. Data stored in tree is +/// the hash and block number of the block signaling the epoch change, and the +/// epoch that was signalled at that block. +/// +/// BABE special-cases the first epoch, epoch_0, by saying that it starts at +/// slot number of the first block in the chain. When bootstrapping a chain, +/// there can be multiple competing block #1s, so we have to ensure that the overlayed +/// DAG doesn't get confused. +/// +/// The first block of every epoch should be producing a descriptor for the next +/// epoch - this is checked in higher-level code. So the first block of epoch_0 contains +/// a descriptor for epoch_1. We special-case these and bundle them together in the +/// same DAG entry, pinned to a specific block #1. +/// +/// Further epochs (epoch_2, ..., epoch_n) each get their own entry. +#[derive(Clone, Encode, Decode)] +pub struct EpochChanges { + inner: ForkTree, +} + +// create a fake header hash which hasn't been included in the chain. +fn fake_head_hash + AsMut<[u8]> + Clone>(parent_hash: &H) -> H { + let mut h = parent_hash.clone(); + // dirty trick: flip the first bit of the parent hash to create a hash + // which has not been in the chain before (assuming a strong hash function). + h.as_mut()[0] ^= 0b10000000; + h +} + +impl EpochChanges where + Hash: PartialEq + AsRef<[u8]> + AsMut<[u8]> + Copy, + Number: Ord + One + Zero + Add + Copy, +{ + /// Create a new epoch-change tracker. + fn new() -> Self { + EpochChanges { inner: ForkTree::new() } + } + + /// Prune out finalized epochs, except for the ancestor of the finalized block. + pub fn prune_finalized>( + &mut self, + descendent_of_builder: D, + _hash: &Hash, + _number: Number, + ) -> Result<(), fork_tree::Error> { + let _is_descendent_of = descendent_of_builder + .build_is_descendent_of(None); + + // TODO: + // https://github.com/paritytech/substrate/issues/3651 + // + // prune any epochs which could not be _live_ as of the children of the + // finalized block. + // i.e. re-root the fork tree to the oldest ancestor of (hash, number) + // where epoch.end_slot() >= slot(hash) + + Ok(()) + } + + /// Finds the epoch for a child of the given block, assuming the given slot number. + /// + /// If the returned epoch is an `UnimportedGenesis` epoch, it should be imported into the + /// tree. + pub fn epoch_for_child_of, G>( + &self, + descendent_of_builder: D, + parent_hash: &Hash, + parent_number: Number, + slot_number: SlotNumber, + make_genesis: G, + ) -> Result, fork_tree::Error> + where G: FnOnce(SlotNumber) -> Epoch + { + // find_node_where will give you the node in the fork-tree which is an ancestor + // of the `parent_hash` by default. if the last epoch was signalled at the parent_hash, + // then it won't be returned. we need to create a new fake chain head hash which + // "descends" from our parent-hash. + let fake_head_hash = fake_head_hash(parent_hash); + + let is_descendent_of = descendent_of_builder + .build_is_descendent_of(Some((fake_head_hash, *parent_hash))); + + if parent_number == Zero::zero() { + // need to insert the genesis epoch. + let genesis_epoch = make_genesis(slot_number); + return Ok(Some(ViableEpoch::Genesis(UnimportedGenesis(genesis_epoch)))); + } + + // We want to find the deepest node in the tree which is an ancestor + // of our block and where the start slot of the epoch was before the + // slot of our block. The genesis special-case doesn't need to look + // at epoch_1 -- all we're doing here is figuring out which node + // we need. + let predicate = |epoch: &PersistedEpoch| match *epoch { + PersistedEpoch::Genesis(ref epoch_0, _) => + epoch_0.start_slot <= slot_number, + PersistedEpoch::Regular(ref epoch_n) => + epoch_n.start_slot <= slot_number, + }; + + self.inner.find_node_where( + &fake_head_hash, + &(parent_number + One::one()), + &is_descendent_of, + &predicate, + ) + .map(|n| n.map(|node| ViableEpoch::Regular(match node.data { + // Ok, we found our node. + // and here we figure out which of the internal epochs + // of a genesis node to use based on their start slot. + PersistedEpoch::Genesis(ref epoch_0, ref epoch_1) => + if epoch_1.start_slot <= slot_number { + epoch_1.clone() + } else { + epoch_0.clone() + }, + PersistedEpoch::Regular(ref epoch_n) => epoch_n.clone(), + }))) + } + + /// Import a new epoch-change, signalled at the given block. + /// + /// This assumes that the given block is prospective (i.e. has not been + /// imported yet), but its parent has. This is why the parent hash needs + /// to be provided. + pub fn import>( + &mut self, + descendent_of_builder: D, + hash: Hash, + number: Number, + parent_hash: Hash, + epoch: IncrementedEpoch, + ) -> Result<(), fork_tree::Error> { + let is_descendent_of = descendent_of_builder + .build_is_descendent_of(Some((hash, parent_hash))); + + let res = self.inner.import( + hash, + number, + epoch.0, + &is_descendent_of, + ); + + match res { + Ok(_) | Err(fork_tree::Error::Duplicate) => Ok(()), + Err(e) => Err(e), + } + } +} + +/// Type alias to produce the epoch-changes tree from a block type. +pub type EpochChangesFor = EpochChanges<::Hash, NumberFor>; + +/// A shared epoch changes tree. +#[derive(Clone)] +pub struct SharedEpochChanges { + inner: Arc>>, +} + +impl SharedEpochChanges { + /// Create a new instance of the `SharedEpochChanges`. + pub fn new() -> Self { + SharedEpochChanges { + inner: Arc::new(Mutex::new(EpochChanges::<_, _>::new())) + } + } + + /// Lock the shared epoch changes, + pub fn lock(&self) -> MutexGuard> { + self.inner.lock() + } +} + +impl From> for SharedEpochChanges { + fn from(epoch_changes: EpochChangesFor) -> Self { + SharedEpochChanges { + inner: Arc::new(Mutex::new(epoch_changes)) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[derive(Debug, PartialEq)] + pub struct TestError; + + impl std::fmt::Display for TestError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "TestError") + } + } + + impl std::error::Error for TestError {} + + impl<'a, F: 'a , H: 'a + PartialEq + std::fmt::Debug> IsDescendentOfBuilder for &'a F + where F: Fn(&H, &H) -> Result + { + type Error = TestError; + type IsDescendentOf = Box Result + 'a>; + + fn build_is_descendent_of(&self, current: Option<(H, H)>) + -> Self::IsDescendentOf + { + let f = *self; + Box::new(move |base, head| { + let mut head = head; + + if let Some((ref c_head, ref c_parent)) = current { + if head == c_head { + if base == c_parent { + return Ok(true); + } else { + head = c_parent; + } + } + } + + f(base, head) + }) + } + } + + type Hash = [u8; 1]; + + #[test] + fn genesis_epoch_is_created_but_not_imported() { + // + // A - B + // \ + // — C + // + let is_descendent_of = |base: &Hash, block: &Hash| -> Result { + match (base, *block) { + (b"A", b) => Ok(b == *b"B" || b == *b"C" || b == *b"D"), + (b"B", b) | (b"C", b) => Ok(b == *b"D"), + (b"0", _) => Ok(true), + _ => Ok(false), + } + }; + + let make_genesis = |slot| Epoch { + epoch_index: 0, + start_slot: slot, + duration: 100, + authorities: Vec::new(), + randomness: [0; 32], + }; + + let epoch_changes = EpochChanges::new(); + let genesis_epoch = epoch_changes.epoch_for_child_of( + &is_descendent_of, + b"0", + 0, + 10101, + &make_genesis, + ).unwrap().unwrap(); + + match genesis_epoch { + ViableEpoch::Genesis(_) => {}, + _ => panic!("should be unimported genesis"), + }; + assert_eq!(genesis_epoch.as_ref(), &make_genesis(10101)); + + let genesis_epoch_2 = epoch_changes.epoch_for_child_of( + &is_descendent_of, + b"0", + 0, + 10102, + &make_genesis, + ).unwrap().unwrap(); + + match genesis_epoch_2 { + ViableEpoch::Genesis(_) => {}, + _ => panic!("should be unimported genesis"), + }; + assert_eq!(genesis_epoch_2.as_ref(), &make_genesis(10102)); + } + + #[test] + fn epoch_changes_between_blocks() { + // + // A - B + // \ + // — C + // + let is_descendent_of = |base: &Hash, block: &Hash| -> Result { + match (base, *block) { + (b"A", b) => Ok(b == *b"B" || b == *b"C" || b == *b"D"), + (b"B", b) | (b"C", b) => Ok(b == *b"D"), + (b"0", _) => Ok(true), + _ => Ok(false), + } + }; + + let make_genesis = |slot| Epoch { + epoch_index: 0, + start_slot: slot, + duration: 100, + authorities: Vec::new(), + randomness: [0; 32], + }; + + let mut epoch_changes = EpochChanges::new(); + let genesis_epoch = epoch_changes.epoch_for_child_of( + &is_descendent_of, + b"0", + 0, + 100, + &make_genesis, + ).unwrap().unwrap(); + + assert_eq!(genesis_epoch.as_ref(), &make_genesis(100)); + + let import_epoch_1 = genesis_epoch.increment(NextEpochDescriptor { + authorities: Vec::new(), + randomness: [1; 32], + }); + let epoch_1 = import_epoch_1.as_ref().clone(); + + epoch_changes.import( + &is_descendent_of, + *b"A", + 1, + *b"0", + import_epoch_1, + ).unwrap(); + let genesis_epoch = genesis_epoch.into_inner(); + + assert!(is_descendent_of(b"0", b"A").unwrap()); + + let end_slot = genesis_epoch.end_slot(); + assert_eq!(end_slot, epoch_1.start_slot); + + { + // x is still within the genesis epoch. + let x = epoch_changes.epoch_for_child_of( + &is_descendent_of, + b"A", + 1, + end_slot - 1, + &make_genesis, + ).unwrap().unwrap().into_inner(); + + assert_eq!(x, genesis_epoch); + } + + { + // x is now at the next epoch, because the block is now at the + // start slot of epoch 1. + let x = epoch_changes.epoch_for_child_of( + &is_descendent_of, + b"A", + 1, + end_slot, + &make_genesis, + ).unwrap().unwrap().into_inner(); + + assert_eq!(x, epoch_1); + } + + { + // x is now at the next epoch, because the block is now after + // start slot of epoch 1. + let x = epoch_changes.epoch_for_child_of( + &is_descendent_of, + b"A", + 1, + epoch_1.end_slot() - 1, + &make_genesis, + ).unwrap().unwrap().into_inner(); + + assert_eq!(x, epoch_1); + } + } + + #[test] + fn two_block_ones_dont_conflict() { + // X - Y + // / + // 0 - A - B + // + let is_descendent_of = |base: &Hash, block: &Hash| -> Result { + match (base, *block) { + (b"A", b) => Ok(b == *b"B"), + (b"X", b) => Ok(b == *b"Y"), + (b"0", _) => Ok(true), + _ => Ok(false), + } + }; + + let duration = 100; + + let make_genesis = |slot| Epoch { + epoch_index: 0, + start_slot: slot, + duration, + authorities: Vec::new(), + randomness: [0; 32], + }; + + let mut epoch_changes = EpochChanges::new(); + let next_descriptor = NextEpochDescriptor { + authorities: Vec::new(), + randomness: [0; 32], + }; + + // insert genesis epoch for A + { + let genesis_epoch_a = epoch_changes.epoch_for_child_of( + &is_descendent_of, + b"0", + 0, + 100, + &make_genesis, + ).unwrap().unwrap(); + + epoch_changes.import( + &is_descendent_of, + *b"A", + 1, + *b"0", + genesis_epoch_a.increment(next_descriptor.clone()), + ).unwrap(); + + } + + // insert genesis epoch for X + { + let genesis_epoch_x = epoch_changes.epoch_for_child_of( + &is_descendent_of, + b"0", + 0, + 1000, + &make_genesis, + ).unwrap().unwrap(); + + epoch_changes.import( + &is_descendent_of, + *b"X", + 1, + *b"0", + genesis_epoch_x.increment(next_descriptor.clone()), + ).unwrap(); + } + + // now check that the genesis epochs for our respective block 1s + // respect the chain structure. + { + let epoch_for_a_child = epoch_changes.epoch_for_child_of( + &is_descendent_of, + b"A", + 1, + 101, + &make_genesis, + ).unwrap().unwrap(); + + assert_eq!(epoch_for_a_child.into_inner(), make_genesis(100)); + + let epoch_for_x_child = epoch_changes.epoch_for_child_of( + &is_descendent_of, + b"X", + 1, + 1001, + &make_genesis, + ).unwrap().unwrap(); + + assert_eq!(epoch_for_x_child.into_inner(), make_genesis(1000)); + + let epoch_for_x_child_before_genesis = epoch_changes.epoch_for_child_of( + &is_descendent_of, + b"X", + 1, + 101, + &make_genesis, + ).unwrap(); + + // even though there is a genesis epoch at that slot, it's not in + // this chain. + assert!(epoch_for_x_child_before_genesis.is_none()); + } + } +} diff --git a/core/consensus/babe/src/lib.rs b/core/consensus/babe/src/lib.rs index 7816f81d47a4d..58e6c77cc3e27 100644 --- a/core/consensus/babe/src/lib.rs +++ b/core/consensus/babe/src/lib.rs @@ -56,7 +56,8 @@ //! An in-depth description and analysis of the protocol can be found here: //! -#![forbid(unsafe_code, missing_docs)] +#![forbid(unsafe_code)] +#![warn(missing_docs)] pub use babe_primitives::*; pub use consensus_common::SyncOracle; use std::{collections::HashMap, sync::Arc, u64, pin::Pin, time::{Instant, Duration}}; @@ -67,12 +68,12 @@ use consensus_common::import_queue::{ }; use sr_primitives::{generic::{BlockId, OpaqueDigestItemId}, Justification}; use sr_primitives::traits::{ - Block as BlockT, Header, DigestItemFor, NumberFor, ProvideRuntimeApi, + Block as BlockT, Header, DigestItemFor, ProvideRuntimeApi, Zero, }; use keystore::KeyStorePtr; -use codec::{Decode, Encode}; -use parking_lot::{Mutex, MutexGuard}; +use codec::Encode; +use parking_lot::Mutex; use primitives::{blake2_256, Blake2Hasher, H256, Pair, Public, U256}; use merlin::Transcript; use inherents::{InherentDataProviders, InherentData}; @@ -96,43 +97,62 @@ use srml_babe::{ timestamp::{TimestampInherentData, InherentType as TimestampInherent} }; use consensus_common::SelectChain; -use consensus_common::import_queue::{Verifier, BasicQueue}; +use consensus_common::import_queue::{Verifier, BasicQueue, CacheKeyId}; use client::{ block_builder::api::BlockBuilder as BlockBuilderApi, blockchain::{self, HeaderBackend, ProvideCache}, BlockchainEvents, CallExecutor, Client, - runtime_api::ApiExt, error::Result as ClientResult, backend::{AuxStore, Backend}, + error::Result as ClientResult, backend::{AuxStore, Backend}, ProvideUncles, - utils::is_descendent_of, - well_known_cache_keys::{self, Id as CacheKeyId}, }; -use fork_tree::ForkTree; use slots::{CheckedHeader, check_equivocation}; use futures::prelude::*; -use futures01::Stream as _; -use log::{error, warn, debug, info, trace}; +use log::{warn, debug, info, trace}; use slots::{SlotWorker, SlotData, SlotInfo, SlotCompatible}; +use epoch_changes::descendent_query; mod aux_schema; +mod epoch_changes; #[cfg(test)] mod tests; -pub use babe_primitives::{AuthorityId, AuthorityPair, AuthoritySignature}; +pub use babe_primitives::{ + AuthorityId, AuthorityPair, AuthoritySignature, Epoch, NextEpochDescriptor, +}; +pub use epoch_changes::{EpochChanges, SharedEpochChanges}; + +macro_rules! babe_err { + ($($i: expr),+) => { + { + debug!(target: "babe", $($i),+); + format!($($i),+) + } + }; +} + +macro_rules! babe_info { + ($($i: expr),+) => { + { + info!(target: "babe", $($i),+); + format!($($i),+) + } + }; +} /// A slot duration. Create with `get_or_compute`. // FIXME: Once Rust has higher-kinded types, the duplication between this // and `super::babe::Config` can be eliminated. // https://github.com/paritytech/substrate/issues/2434 +#[derive(Clone)] pub struct Config(slots::SlotDuration); impl Config { /// Either fetch the slot duration from disk or compute it from the genesis /// state. - pub fn get_or_compute(client: &C) -> ClientResult - where + pub fn get_or_compute(client: &C) -> ClientResult where C: AuxStore + ProvideRuntimeApi, C::Api: BabeApi, { trace!(target: "babe", "Getting slot duration"); - match slots::SlotDuration::get_or_compute(client, |a, b| a.startup_data(b)).map(Self) { + match slots::SlotDuration::get_or_compute(client, |a, b| a.configuration(b)).map(Self) { Ok(s) => Ok(s), Err(s) => { warn!(target: "babe", "Failed to get slot duration"); @@ -141,37 +161,29 @@ impl Config { } } - /// Get the slot duration in milliseconds. - pub fn get(&self) -> u64 { - self.0.slot_duration - } - - /// Retrieve the threshold calculation constant `c`. - pub fn c(&self) -> (u64, u64) { - self.0.c + /// Create the genesis epoch (epoch #0). This is defined to start at the slot of + /// the first block, so that has to be provided. + pub fn genesis_epoch(&self, slot_number: SlotNumber) -> Epoch { + Epoch { + epoch_index: 0, + start_slot: slot_number, + duration: self.epoch_length, + authorities: self.genesis_authorities.clone(), + randomness: self.randomness.clone(), + } } } -impl SlotCompatible for BabeLink { - fn extract_timestamp_and_slot( - &self, - data: &InherentData, - ) -> Result<(TimestampInherent, u64, std::time::Duration), consensus_common::Error> { - trace!(target: "babe", "extract timestamp"); - data.timestamp_inherent_data() - .and_then(|t| data.babe_inherent_data().map(|a| (t, a))) - .map_err(Into::into) - .map_err(consensus_common::Error::InherentData) - .map(|(x, y)| (x, y, self.0.lock().0.take().unwrap_or_default())) +impl std::ops::Deref for Config { + type Target = BabeConfiguration; + + fn deref(&self) -> &BabeConfiguration { + &*self.0 } } /// Parameters for BABE. -pub struct BabeParams { - /// The configuration for BABE. Includes the slot duration, threshold, and - /// other parameters. - pub config: Config, - +pub struct BabeParams { /// The keystore that manages the keys of the node. pub keystore: KeyStorePtr, @@ -181,12 +193,14 @@ pub struct BabeParams { /// The SelectChain Strategy pub select_chain: SC, - /// A block importer - pub block_import: I, - - /// The environment + /// The environment we are producing blocks for. pub env: E, + /// The underlying block-import object to supply our produced blocks to. + /// This must be a `BabeBlockImport` or a wrapper of it, otherwise + /// critical consensus logic will be omitted. + pub block_import: I, + /// A sync oracle pub sync_oracle: SO, @@ -197,80 +211,104 @@ pub struct BabeParams { pub force_authoring: bool, /// The source of timestamps for relative slots - pub time_source: BabeLink, + pub babe_link: BabeLink, } /// Start the babe worker. The returned future should be run in a tokio runtime. -pub fn start_babe(BabeParams { - config, - client, +pub fn start_babe(BabeParams { keystore, + client, select_chain, - block_import, env, + block_import, sync_oracle, inherent_data_providers, force_authoring, - time_source, -}: BabeParams) -> Result< + babe_link, +}: BabeParams) -> Result< impl futures01::Future, consensus_common::Error, > where - B: BlockT, - C: ProvideRuntimeApi + ProvideCache + ProvideUncles + Send + Sync + 'static, + B: BlockT, + C: ProvideRuntimeApi + ProvideCache + ProvideUncles + BlockchainEvents + + HeaderBackend + Send + Sync + 'static, C::Api: BabeApi, SC: SelectChain + 'static, E: Environment + Send + Sync, E::Proposer: Proposer, >::Create: Unpin + Send + 'static, - H: Header, - I: BlockImport + Send + Sync + 'static, + I: BlockImport + Send + Sync + 'static, Error: std::error::Error + Send + From<::consensus_common::Error> + From + 'static, SO: SyncOracle + Send + Sync + Clone, { + let config = babe_link.config; let worker = BabeWorker { client: client.clone(), block_import: Arc::new(Mutex::new(block_import)), env, sync_oracle: sync_oracle.clone(), force_authoring, - c: config.c(), keystore, + epoch_changes: babe_link.epoch_changes.clone(), + config: config.clone(), }; - register_babe_inherent_data_provider(&inherent_data_providers, config.0.slot_duration())?; + + register_babe_inherent_data_provider(&inherent_data_providers, config.slot_duration())?; uncles::register_uncles_inherent_data_provider( client.clone(), select_chain.clone(), &inherent_data_providers, )?; - Ok(slots::start_slot_worker( + + let epoch_changes = babe_link.epoch_changes.clone(); + let pruning_task = client.finality_notification_stream() + .for_each(move |notification| { + // TODO: supply is-descendent-of and maybe write to disk _now_ + // as opposed to waiting for the next epoch? + let res = epoch_changes.lock().prune_finalized( + descendent_query(&*client), + ¬ification.hash, + *notification.header.number(), + ); + + if let Err(e) = res { + babe_err!("Could not prune expired epoch changes: {:?}", e); + } + + future::ready(()) + }); + + babe_info!("Starting BABE Authorship worker"); + let slot_worker = slots::start_slot_worker( config.0, select_chain, worker, sync_oracle, inherent_data_providers, - time_source, - ).map(|()| Ok::<(), ()>(())).compat()) + babe_link.time_source, + ).map(|_| ()); + + Ok(future::select(slot_worker, pruning_task).map(|_| Ok::<(), ()>(())).compat()) } -struct BabeWorker { +struct BabeWorker { client: Arc, block_import: Arc>, env: E, sync_oracle: SO, force_authoring: bool, - c: (u64, u64), keystore: KeyStorePtr, + epoch_changes: SharedEpochChanges, + config: Config, } -impl slots::SimpleSlotWorker for BabeWorker where - B: BlockT, - C: ProvideRuntimeApi + ProvideCache, +impl slots::SimpleSlotWorker for BabeWorker where + B: BlockT, + C: ProvideRuntimeApi + ProvideCache + HeaderBackend, C::Api: BabeApi, E: Environment, E::Proposer: Proposer, >::Create: Unpin + Send + 'static, - H: Header, I: BlockImport + Send + Sync + 'static, SO: SyncOracle + Send + Clone, Error: std::error::Error + Send + From<::consensus_common::Error> + From + 'static, @@ -289,8 +327,16 @@ impl slots::SimpleSlotWorker for BabeWorker Result { - epoch_from_runtime(self.client.as_ref(), &BlockId::Hash(*block)) + fn epoch_data(&self, parent: &B::Header, slot_number: u64) -> Result { + self.epoch_changes.lock().epoch_for_child_of( + descendent_query(&*self.client), + &parent.hash(), + parent.number().clone(), + slot_number, + |slot| self.config.genesis_epoch(slot) + ) + .map_err(|e| ConsensusError::ChainLookup(format!("{:?}", e)))? + .map(|e| e.into_inner()) .ok_or(consensus_common::Error::InvalidAuthoritiesSet) } @@ -300,22 +346,23 @@ impl slots::SimpleSlotWorker for BabeWorker Option { - let parent_weight = { - let pre_digest = find_pre_digest::(&header).ok()?; - pre_digest.weight() - }; - - claim_slot( + debug!(target: "babe", "Attempting to claim slot {}", slot_number); + let s = claim_slot( slot_number, - parent_weight, epoch_data, - self.c, + &*self.config, &self.keystore, - ) + ); + + if let Some(_) = s { + debug!(target: "babe", "Claimed slot {}", slot_number); + } + + s } fn pre_digest_data(&self, _slot_number: u64, claim: &Self::Claim) -> Vec> { @@ -336,9 +383,6 @@ impl slots::SimpleSlotWorker for BabeWorker as CompatibleDigestItem>::babe_seal(signature); - // When we building our own blocks we always author on top of the - // current best according to `SelectChain`, therefore our own block - // proposal should always become the new best. BlockImportParams { origin: BlockOrigin::Own, header, @@ -346,8 +390,11 @@ impl slots::SimpleSlotWorker for BabeWorker slots::SimpleSlotWorker for BabeWorker SlotWorker for BabeWorker where - B: BlockT, - C: ProvideRuntimeApi + ProvideCache + Send + Sync, +impl SlotWorker for BabeWorker where + B: BlockT, + C: ProvideRuntimeApi + ProvideCache + HeaderBackend + Send + Sync, C::Api: BabeApi, E: Environment + Send + Sync, E::Proposer: Proposer, >::Create: Unpin + Send + 'static, - H: Header, I: BlockImport + Send + Sync + 'static, SO: SyncOracle + Send + Sync + Clone, Error: std::error::Error + Send + From<::consensus_common::Error> + From + 'static, @@ -386,14 +432,6 @@ impl SlotWorker for BabeWorker where } } -macro_rules! babe_err { - ($($i: expr),+) => { - { debug!(target: "babe", $($i),+) - ; format!($($i),+) - } - }; -} - /// Extract the BABE pre digest from the given header. Pre-runtime digests are /// mandatory, the function will return `Err` if none is found. fn find_pre_digest(header: &B::Header) -> Result @@ -405,7 +443,6 @@ fn find_pre_digest(header: &B::Header) -> Result(header: &B::Header) -> Result(header: &B::Header) -> Result, String> +fn find_next_epoch_digest(header: &B::Header) + -> Result, String> where DigestItemFor: CompatibleDigestItem, { let mut epoch_digest: Option<_> = None; @@ -439,6 +477,26 @@ fn find_next_epoch_digest(header: &B::Header) -> Result Ok(epoch_digest) } +struct VerificationParams<'a, B: 'a + BlockT> { + /// the header being verified. + header: B::Header, + /// the pre-digest of the header being verified. this is optional - if prior + /// verification code had to read it, it can be included here to avoid duplicate + /// work. + pre_digest: Option, + /// the slot number of the current time. + slot_now: SlotNumber, + /// epoch descriptor of the epoch this block _should_ be under, if it's valid. + epoch: &'a Epoch, + /// genesis config of this BABE chain. + config: &'a Config, +} + +struct VerifiedHeaderInfo { + pre_digest: DigestItemFor, + seal: DigestItemFor, +} + /// Check a header has been signed by the right key. If the slot is too far in /// the future, an error will be returned. If successful, returns the pre-header /// and the digest item containing the seal. @@ -450,23 +508,23 @@ fn find_next_epoch_digest(header: &B::Header) -> Result /// /// The given header can either be from a primary or secondary slot assignment, /// with each having different validation logic. -// FIXME #1018 needs misbehavior types. The `transaction_pool` parameter will be -// used to submit such misbehavior reports. -fn check_header( - mut header: B::Header, - parent_header: B::Header, - slot_now: u64, - authorities: &[(AuthorityId, BabeAuthorityWeight)], +fn check_header( + params: VerificationParams, client: &C, - randomness: [u8; 32], - epoch_index: u64, - secondary_slots: bool, - c: (u64, u64), - _transaction_pool: Option<&T>, -) -> Result, DigestItemFor)>, String> where +) -> Result>, String> where DigestItemFor: CompatibleDigestItem, - T: Send + Sync + 'static, { + let VerificationParams { + mut header, + pre_digest, + slot_now, + epoch, + config, + } = params; + + let authorities = &epoch.authorities; + let pre_digest = pre_digest.map(Ok).unwrap_or_else(|| find_pre_digest::(&header))?; + trace!(target: "babe", "Checking header"); let seal = match header.digest_mut().pop() { Some(x) => x, @@ -481,8 +539,6 @@ fn check_header( // and that's what we sign let pre_hash = header.hash(); - let pre_digest = find_pre_digest::(&header)?; - if pre_digest.slot_number() > slot_now { header.digest_mut().push(seal); return Ok(CheckedHeader::Deferred(header, pre_digest.slot_number())); @@ -492,40 +548,30 @@ fn check_header( return Err(babe_err!("Slot author not found")); } - let parent_weight = { - let parent_pre_digest = find_pre_digest::(&parent_header)?; - parent_pre_digest.weight() - }; - match &pre_digest { - BabePreDigest::Primary { vrf_output, vrf_proof, authority_index, slot_number, weight } => { + BabePreDigest::Primary { vrf_output, vrf_proof, authority_index, slot_number } => { debug!(target: "babe", "Verifying Primary block"); - let digest = (vrf_output, vrf_proof, *authority_index, *slot_number, *weight); + let digest = (vrf_output, vrf_proof, *authority_index, *slot_number); check_primary_header::( pre_hash, digest, sig, - parent_weight, - authorities, - randomness, - epoch_index, - c, + &epoch, + config.c, )?; }, - BabePreDigest::Secondary { authority_index, slot_number, weight } if secondary_slots => { + BabePreDigest::Secondary { authority_index, slot_number } if config.secondary_slots => { debug!(target: "babe", "Verifying Secondary block"); - let digest = (*authority_index, *slot_number, *weight); + let digest = (*authority_index, *slot_number); check_secondary_header::( pre_hash, digest, sig, - parent_weight, - &authorities, - randomness, + &epoch, )?; }, _ => { @@ -544,17 +590,20 @@ fn check_header( &header, author, ).map_err(|e| e.to_string())? { - info!( + babe_info!( "Slot author {:?} is equivocating at slot {} with headers {:?} and {:?}", author, pre_digest.slot_number(), equivocation_proof.fst_header().hash(), - equivocation_proof.snd_header().hash(), + equivocation_proof.snd_header().hash() ); } - let pre_digest = CompatibleDigestItem::babe_pre_digest(pre_digest); - Ok(CheckedHeader::Checked(header, (pre_digest, seal))) + let info = VerifiedHeaderInfo { + pre_digest: CompatibleDigestItem::babe_pre_digest(pre_digest), + seal, + }; + Ok(CheckedHeader::Checked(header, info)) } /// Check a primary slot proposal header. We validate that the given header is @@ -563,29 +612,23 @@ fn check_header( /// its parent since it is a primary block. fn check_primary_header( pre_hash: B::Hash, - pre_digest: (&VRFOutput, &VRFProof, AuthorityIndex, SlotNumber, BabeBlockWeight), + pre_digest: (&VRFOutput, &VRFProof, AuthorityIndex, SlotNumber), signature: AuthoritySignature, - parent_weight: BabeBlockWeight, - authorities: &[(AuthorityId, BabeAuthorityWeight)], - randomness: [u8; 32], - epoch_index: u64, + epoch: &Epoch, c: (u64, u64), ) -> Result<(), String> where DigestItemFor: CompatibleDigestItem, { - let (vrf_output, vrf_proof, authority_index, slot_number, weight) = pre_digest; - if weight != parent_weight + 1 { - return Err("Invalid weight: should increase with Primary block.".into()); - } + let (vrf_output, vrf_proof, authority_index, slot_number) = pre_digest; - let author = &authorities[authority_index as usize].0; + let author = &epoch.authorities[authority_index as usize].0; if AuthorityPair::verify(&signature, pre_hash, &author) { let (inout, _) = { let transcript = make_transcript( - &randomness, + &epoch.randomness, slot_number, - epoch_index, + epoch.epoch_index, ); schnorrkel::PublicKey::from_bytes(author.as_slice()).and_then(|p| { @@ -595,7 +638,12 @@ fn check_primary_header( })? }; - let threshold = calculate_primary_threshold(c, authorities, authority_index as usize); + let threshold = calculate_primary_threshold( + c, + &epoch.authorities, + authority_index as usize, + ); + if !check_primary_threshold(&inout, threshold) { return Err(babe_err!("VRF verification of block by author {:?} failed: \ threshold {} exceeded", author, threshold)); @@ -613,27 +661,21 @@ fn check_primary_header( /// compared to its parent since it is a secondary block. fn check_secondary_header( pre_hash: B::Hash, - pre_digest: (AuthorityIndex, SlotNumber, BabeBlockWeight), + pre_digest: (AuthorityIndex, SlotNumber), signature: AuthoritySignature, - parent_weight: BabeBlockWeight, - authorities: &[(AuthorityId, BabeAuthorityWeight)], - randomness: [u8; 32], + epoch: &Epoch, ) -> Result<(), String> { - let (authority_index, slot_number, weight) = pre_digest; - - if weight != parent_weight { - return Err("Invalid weight: Should stay the same with secondary block.".into()); - } + let (authority_index, slot_number) = pre_digest; // check the signature is valid under the expected authority and // chain state. let expected_author = secondary_slot_author( slot_number, - authorities, - randomness, + &epoch.authorities, + epoch.randomness, ).ok_or_else(|| "No secondary author expected.".to_string())?; - let author = &authorities[authority_index as usize].0; + let author = &epoch.authorities[authority_index as usize].0; if expected_author != author { let msg = format!("Invalid author: Expected secondary author: {:?}, got: {:?}.", @@ -651,21 +693,42 @@ fn check_secondary_header( } } +#[derive(Default, Clone)] +struct TimeSource(Arc, Vec<(Instant, u64)>)>>); + +impl SlotCompatible for TimeSource { + fn extract_timestamp_and_slot( + &self, + data: &InherentData, + ) -> Result<(TimestampInherent, u64, std::time::Duration), consensus_common::Error> { + trace!(target: "babe", "extract timestamp"); + data.timestamp_inherent_data() + .and_then(|t| data.babe_inherent_data().map(|a| (t, a))) + .map_err(Into::into) + .map_err(consensus_common::Error::InherentData) + .map(|(x, y)| (x, y, self.0.lock().0.take().unwrap_or_default())) + } +} + /// State that must be shared between the import queue and the authoring logic. -#[derive(Default, Clone, Debug)] -pub struct BabeLink(Arc, Vec<(Instant, u64)>)>>); +#[derive(Clone)] +pub struct BabeLink { + time_source: TimeSource, + epoch_changes: SharedEpochChanges, + config: Config, +} /// A verifier for Babe blocks. -pub struct BabeVerifier { +pub struct BabeVerifier { client: Arc>, api: Arc, inherent_data_providers: inherents::InherentDataProviders, config: Config, - time_source: BabeLink, - transaction_pool: Option>, + epoch_changes: SharedEpochChanges, + time_source: TimeSource, } -impl BabeVerifier { +impl BabeVerifier { fn check_inherents( &self, block: Block, @@ -704,7 +767,7 @@ fn median_algorithm( if num_timestamps as u64 >= median_required_blocks && median_required_blocks > 0 { let mut new_list: Vec<_> = time_source.1.iter().map(|&(t, sl)| { let offset: u128 = u128::from(slot_duration) - .checked_mul(1_000_000u128) // self.config.get() returns *milliseconds* + .checked_mul(1_000_000u128) // self.config.slot_duration returns milliseconds .and_then(|x| { x.checked_mul(u128::from(slot_number).saturating_sub(u128::from(sl))) }) @@ -735,14 +798,13 @@ fn median_algorithm( } } -impl Verifier for BabeVerifier where +impl Verifier for BabeVerifier where Block: BlockT, B: Backend + 'static, E: CallExecutor + 'static + Clone + Send + Sync, RA: Send + Sync, PRA: ProvideRuntimeApi + Send + Sync + AuxStore + ProvideCache, PRA::Api: BlockBuilderApi + BabeApi, - T: Send + Sync + 'static, { fn verify( &mut self, @@ -772,59 +834,38 @@ impl Verifier for BabeVerifier( - header.clone(), - parent_header.clone(), - slot_now + 1, - &authorities, - &self.api, - randomness, - epoch_index, - secondary_slots, - self.config.c(), - self.transaction_pool.as_ref().map(|x| &**x), - ); - - // if we have failed to check header using (presumably) current epoch AND we're probably in the next epoch - // => check using next epoch - // (this is only possible on the light client at epoch#0) - if epoch_index == 0 && checked_header.is_err() { - if let Some(Epoch { authorities, randomness, epoch_index, .. }) = maybe_next_epoch { - let checked_header_next = check_header::( - header, - parent_header, - slot_now + 1, - &authorities, - &self.api, - randomness, - epoch_index, - secondary_slots, - self.config.c(), - self.transaction_pool.as_ref().map(|x| &**x), - ); + let pre_digest = find_pre_digest::(&header)?; + let epoch = { + let epoch_changes = self.epoch_changes.lock(); + epoch_changes.epoch_for_child_of( + descendent_query(&*self.client), + &parent_hash, + parent_header.number().clone(), + pre_digest.slot_number(), + |slot| self.config.genesis_epoch(slot), + ) + .map_err(|e| format!("{:?}", e))? + .ok_or_else(|| format!("Could not fetch epoch at {:?}", parent_hash))? + }; - match checked_header_next { - Ok(checked_header_next) => checked_header = Ok(checked_header_next), - Err(_) => (), - } - } - } + // We add one to the current slot to allow for some small drift. + // FIXME #1019 in the future, alter this queue to allow deferring of headers + let v_params = VerificationParams { + header, + pre_digest: Some(pre_digest.clone()), + slot_now: slot_now + 1, + epoch: epoch.as_ref(), + config: &self.config, + }; + let checked_header = check_header::(v_params, &self.api)?; - let checked_header = checked_header?; match checked_header { - CheckedHeader::Checked(pre_header, (pre_digest, seal)) => { - let babe_pre_digest = pre_digest.as_babe_pre_digest() + CheckedHeader::Checked(pre_header, verified_info) => { + let babe_pre_digest = verified_info.pre_digest.as_babe_pre_digest() .expect("check_header always returns a pre-digest digest item; qed"); let slot_number = babe_pre_digest.slot_number(); @@ -852,42 +893,18 @@ impl Verifier for BabeVerifier ?pre_header); - // The fork choice rule is that we pick the heaviest chain (i.e. - // more primary blocks), if there's a tie we go with the longest - // chain. - let new_best = { - let (last_best, last_best_number) = { - let info = self.client.info().chain; - (info.best_hash, info.best_number) - }; - - let best_header = self.client.header(&BlockId::Hash(last_best)) - .map_err(|_| "Failed fetching best header")? - .expect("parent_header must be imported; qed"); - - let best_weight = find_pre_digest::(&best_header) - .map(|babe_pre_digest| babe_pre_digest.weight())?; - - let new_weight = babe_pre_digest.weight(); - - if new_weight > best_weight { - true - } else if new_weight == best_weight { - *pre_header.number() > last_best_number - } else { - false - } - }; - let block_import_params = BlockImportParams { origin, header: pre_header, - post_digests: vec![seal], + post_digests: vec![verified_info.seal], body, finalized: false, justification, auxiliary: Vec::new(), - fork_choice: ForkChoiceStrategy::Custom(new_best), + // TODO: block-import handles fork choice and this shouldn't even have the + // option to specify one. + // https://github.com/paritytech/substrate/issues/3623 + fork_choice: ForkChoiceStrategy::LongestChain, }; Ok((block_import_params, Default::default())) @@ -903,77 +920,6 @@ impl Verifier for BabeVerifier (Epoch, Option) { - match self { - MaybeSpanEpoch::Genesis(epoch0, epoch1) => (epoch0, Some(epoch1)), - MaybeSpanEpoch::Regular(epoch) => (epoch, None), - } - } - - #[cfg(test)] - pub fn into_regular(self) -> Option { - match self { - MaybeSpanEpoch::Regular(epoch) => Some(epoch), - _ => None, - } - } -} - -/// Extract current epoch data from cache and fallback to querying the runtime -/// if the cache isn't populated. -fn epoch(client: &C, at: &BlockId) -> Result where - B: BlockT, - C: ProvideRuntimeApi + ProvideCache, - C::Api: BabeApi, -{ - epoch_from_cache(client, at) - .or_else(|| epoch_from_runtime(client, at).map(MaybeSpanEpoch::Regular)) - .ok_or(consensus_common::Error::InvalidAuthoritiesSet) -} - -/// Extract current epoch data from cache. -fn epoch_from_cache(client: &C, at: &BlockId) -> Option where - B: BlockT, - C: ProvideCache, -{ - // the epoch that is BABE-valid at the block is not the epoch that is cache-valid at the block - // we need to go back for maximum two steps - client.cache() - .and_then(|cache| cache - .get_at(&well_known_cache_keys::EPOCH, at) - .and_then(|(_, _, v)| Decode::decode(&mut &v[..]).ok())) -} - -/// Extract current epoch from runtime. -fn epoch_from_runtime(client: &C, at: &BlockId) -> Option where - B: BlockT, - C: ProvideRuntimeApi, - C::Api: BabeApi, -{ - if client.runtime_api().has_api::>(at).unwrap_or(false) { - let s = BabeApi::epoch(&*client.runtime_api(), at).ok()?; - if s.authorities.is_empty() { - error!("No authorities!"); - None - } else { - Some(s) - } - } else { - error!("bad api!"); - None - } -} - /// The BABE import queue type. pub type BabeImportQueue = BasicQueue; @@ -1050,17 +996,15 @@ fn calculate_primary_threshold( /// claim a secondary slot. fn claim_slot( slot_number: SlotNumber, - parent_weight: BabeBlockWeight, epoch: &Epoch, - c: (u64, u64), + config: &BabeConfiguration, keystore: &KeyStorePtr, ) -> Option<(BabePreDigest, AuthorityPair)> { - claim_primary_slot(slot_number, parent_weight, epoch, c, keystore) + claim_primary_slot(slot_number, epoch, config.c, keystore) .or_else(|| { - if epoch.secondary_slots { + if config.secondary_slots { claim_secondary_slot( slot_number, - parent_weight, &epoch.authorities, keystore, epoch.randomness, @@ -1077,7 +1021,6 @@ fn claim_slot( /// so it returns `Some(_)`. Otherwise, it returns `None`. fn claim_primary_slot( slot_number: SlotNumber, - parent_weight: BabeBlockWeight, epoch: &Epoch, c: (u64, u64), keystore: &KeyStorePtr, @@ -1107,7 +1050,6 @@ fn claim_primary_slot( vrf_output: s.0.to_output(), vrf_proof: s.1, authority_index: authority_index as u32, - weight: parent_weight + 1, } }); @@ -1149,7 +1091,6 @@ fn secondary_slot_author( /// to propose. fn claim_secondary_slot( slot_number: SlotNumber, - parent_weight: BabeBlockWeight, authorities: &[(AuthorityId, BabeAuthorityWeight)], keystore: &KeyStorePtr, randomness: [u8; 32], @@ -1176,7 +1117,6 @@ fn claim_secondary_slot( let pre_digest = BabePreDigest::Secondary { slot_number, authority_index: authority_index as u32, - weight: parent_weight, }; return Some((pre_digest, pair)); @@ -1186,76 +1126,6 @@ fn claim_secondary_slot( None } -fn initialize_authorities_cache(client: &C) -> Result<(), ConsensusError> where - B: BlockT, - C: ProvideRuntimeApi + ProvideCache, - C::Api: BabeApi, -{ - // no cache => no initialization - let cache = match client.cache() { - Some(cache) => cache, - None => return Ok(()), - }; - - // check if we already have initialized the cache - let genesis_id = BlockId::Number(Zero::zero()); - let genesis_epoch: Option = cache - .get_at(&well_known_cache_keys::EPOCH, &genesis_id) - .and_then(|(_, _, v)| Decode::decode(&mut &v[..]).ok()); - if genesis_epoch.is_some() { - return Ok(()); - } - - let map_err = |error| consensus_common::Error::from(consensus_common::Error::ClientImport( - format!( - "Error initializing authorities cache: {}", - error, - ))); - - let epoch0 = epoch_from_runtime(client, &genesis_id).ok_or(consensus_common::Error::InvalidAuthoritiesSet)?; - let mut epoch1 = epoch0.clone(); - epoch1.epoch_index = 1; - - let genesis_epoch = MaybeSpanEpoch::Genesis(epoch0, epoch1); - cache.initialize(&well_known_cache_keys::EPOCH, genesis_epoch.encode()) - .map_err(map_err) -} - -/// Tree of all epoch changes across all *seen* forks. Data stored in tree is -/// the hash and block number of the block signaling the epoch change, and the -/// epoch that was signalled at that block. -type EpochChanges = ForkTree< - ::Hash, - NumberFor, - Epoch, ->; - -/// A shared epoch changes tree. -#[derive(Clone)] -struct SharedEpochChanges { - inner: Arc>>, -} - -impl SharedEpochChanges { - fn new() -> Self { - SharedEpochChanges { - inner: Arc::new(Mutex::new(EpochChanges::::new())) - } - } - - fn lock(&self) -> MutexGuard> { - self.inner.lock() - } -} - -impl From> for SharedEpochChanges { - fn from(epoch_changes: EpochChanges) -> Self { - SharedEpochChanges { - inner: Arc::new(Mutex::new(epoch_changes)) - } - } -} - /// A block-import handler for BABE. /// /// This scans each imported block for epoch change signals. The signals are @@ -1269,6 +1139,7 @@ pub struct BabeBlockImport { client: Arc>, api: Arc, epoch_changes: SharedEpochChanges, + config: Config, } impl Clone for BabeBlockImport { @@ -1278,6 +1149,7 @@ impl Clone for BabeBlockImport BabeBlockImport { api: Arc, epoch_changes: SharedEpochChanges, block_import: I, + config: Config, ) -> Self { BabeBlockImport { client, api, inner: block_import, epoch_changes, + config, } } } @@ -1313,7 +1187,7 @@ impl BlockImport for BabeBlockImport, - mut new_cache: HashMap>, + new_cache: HashMap>, ) -> Result { let hash = block.post_header().hash(); let number = block.header.number().clone(); @@ -1326,63 +1200,79 @@ impl BlockImport for BabeBlockImport return Err(ConsensusError::ClientImport(e.to_string()).into()), } - let slot_number = { - let pre_digest = find_pre_digest::(&block.header) - .expect("valid babe headers must contain a predigest; \ - header has been already verified; qed"); - pre_digest.slot_number() - }; - - // returns a function for checking whether a block is a descendent of another - // consistent with querying client directly after importing the block. - let parent_hash = *block.header.parent_hash(); - let is_descendent_of = is_descendent_of(&self.client, Some((&hash, &parent_hash))); + let pre_digest = find_pre_digest::(&block.header) + .expect("valid babe headers must contain a predigest; \ + header has been already verified; qed"); + let slot_number = pre_digest.slot_number(); - // check if there's any epoch change expected to happen at this slot let mut epoch_changes = self.epoch_changes.lock(); - let enacted_epoch = epoch_changes.find_node_where( - &hash, - &number, - &is_descendent_of, - &|epoch| epoch.start_slot <= slot_number, - ).map_err(|e| ConsensusError::from(ConsensusError::ClientImport(e.to_string())))?; - - let check_roots = || -> Result { - // this can only happen when the chain starts, since there's no - // epoch change at genesis. afterwards every time we expect an epoch - // change it means we will import another one. - for (root, _, _) in epoch_changes.roots() { - let is_descendent_of = is_descendent_of(root, &hash) - .map_err(|e| { - ConsensusError::from(ConsensusError::ClientImport(e.to_string())) - })?; - - if is_descendent_of { - return Ok(false); - } - } - Ok(true) + // check if there's any epoch change expected to happen at this slot. + // `epoch` is the epoch to verify the block under, and `first_in_epoch` is true + // if this is the first block in its chain for that epoch. + // + // also provides the total weight of the chain, including the imported block. + let (epoch, first_in_epoch, parent_weight) = { + let parent_hash = *block.header.parent_hash(); + let parent_header = self.client.header(&BlockId::Hash(parent_hash)) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? + .ok_or_else(|| ConsensusError::ChainLookup(babe_err!( + "Parent ({}) of {} unavailable. Cannot import", + parent_hash, + hash + )))?; + + let parent_slot = find_pre_digest::(&parent_header) + .map(|d| d.slot_number()) + .expect("parent is non-genesis; valid BABE headers contain a pre-digest; \ + header has already been verified; qed"); + + let parent_weight = if *parent_header.number() == Zero::zero() { + 0 + } else { + aux_schema::load_block_weight(&*self.client, parent_hash) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + .ok_or_else(|| ConsensusError::ClientImport( + babe_err!("Parent block of {} has no associated weight", hash) + ))? + }; + + let epoch = epoch_changes.epoch_for_child_of( + descendent_query(&*self.client), + &parent_hash, + *parent_header.number(), + slot_number, + |slot| self.config.genesis_epoch(slot), + ) + .map_err(|e: fork_tree::Error| ConsensusError::ChainLookup( + babe_err!("Could not look up epoch: {:?}", e) + ))? + .ok_or_else(|| ConsensusError::ClientImport( + babe_err!("Block {} is not valid under any epoch.", hash) + ))?; + + let first_in_epoch = parent_slot < epoch.as_ref().start_slot; + (epoch, first_in_epoch, parent_weight) }; - let expected_epoch_change = enacted_epoch.is_some(); + let total_weight = parent_weight + pre_digest.added_weight(); + + // search for this all the time so we can reject unexpected announcements. let next_epoch_digest = find_next_epoch_digest::(&block.header) .map_err(|e| ConsensusError::from(ConsensusError::ClientImport(e.to_string())))?; - match (expected_epoch_change, next_epoch_digest.is_some()) { + match (first_in_epoch, next_epoch_digest.is_some()) { (true, true) => {}, (false, false) => {}, (true, false) => { return Err( ConsensusError::ClientImport( - "Expected epoch change to happen by this block".into(), + babe_err!("Expected epoch change to happen at {:?}, s{}", hash, slot_number), ) ); }, (false, true) => { - if !check_roots()? { - return Err(ConsensusError::ClientImport("Unexpected epoch change".into())); - } + return Err(ConsensusError::ClientImport("Unexpected epoch change".into())); }, } @@ -1390,37 +1280,31 @@ impl BlockImport for BabeBlockImport= start slot {}).", + epoch.as_ref().epoch_index, hash, slot_number, epoch.as_ref().start_slot); + babe_info!("Next epoch starts at slot {}", next_epoch.as_ref().start_slot); + // track the epoch change in the fork tree - epoch_changes.import( + let res = epoch_changes.import( + descendent_query(&*self.client), hash, number, + *block.header.parent_hash(), next_epoch, - &is_descendent_of, - ).map_err(|e| ConsensusError::from(ConsensusError::ClientImport(e.to_string())))?; + ); + + + if let Err(e) = res { + let err = ConsensusError::ClientImport(format!("{:?}", e)); + babe_err!("Failed to launch next epoch: {:?}", e); + *epoch_changes = old_epoch_changes.expect("set `Some` above and not taken; qed"); + return Err(err); + } crate::aux_schema::write_epoch_changes::( &*epoch_changes, @@ -1430,6 +1314,44 @@ impl BlockImport for BabeBlockImport last_best_weight { + true + } else if total_weight == last_best_weight { + number > last_best_number + } else { + false + }) + }; + let import_result = self.inner.import_block(block, new_cache); // revert to the original epoch changes in case there's an error @@ -1452,81 +1374,80 @@ impl BlockImport for BabeBlockImport, I, RA, PRA>( + config: Config, + wrapped_block_import: I, + client: Arc>, + api: Arc, +) -> ClientResult<(BabeBlockImport, BabeLink)> where + B: Backend, + E: CallExecutor, +{ + let epoch_changes = aux_schema::load_epoch_changes(&*client)?; + let link = BabeLink { + epoch_changes: epoch_changes.clone(), + time_source: Default::default(), + config: config.clone(), + }; + + let import = BabeBlockImport::new( + client, + api, + epoch_changes, + wrapped_block_import, + config, + ); + + Ok((import, link)) +} + +/// Start an import queue for the BABE consensus algorithm. +/// +/// This method returns the import queue, some data that needs to be passed to the block authoring +/// logic (`BabeLink`), and a future that must be run to /// completion and is responsible for listening to finality notifications and /// pruning the epoch changes tree. -pub fn import_queue, I, RA, PRA, T>( - config: Config, +/// +/// The block import object provided must be the `BabeBlockImport` or a wrapper +/// of it, otherwise crucial import logic will be omitted. +pub fn import_queue, I, RA, PRA>( + babe_link: BabeLink, block_import: I, justification_import: Option>, finality_proof_import: Option>, client: Arc>, api: Arc, inherent_data_providers: InherentDataProviders, - transaction_pool: Option>, -) -> ClientResult<( - BabeImportQueue, - BabeLink, - BabeBlockImport, - impl futures01::Future, -)> where +) -> ClientResult> where B: Backend + 'static, - I: BlockImport + Clone + Send + Sync + 'static, - I::Error: Into, + I: BlockImport + Send + Sync + 'static, E: CallExecutor + Clone + Send + Sync + 'static, RA: Send + Sync + 'static, PRA: ProvideRuntimeApi + ProvideCache + Send + Sync + AuxStore + 'static, PRA::Api: BlockBuilderApi + BabeApi, - T: Send + Sync + 'static, { - register_babe_inherent_data_provider(&inherent_data_providers, config.get())?; - initialize_authorities_cache(&*api)?; + register_babe_inherent_data_provider(&inherent_data_providers, babe_link.config.slot_duration)?; let verifier = BabeVerifier { client: client.clone(), - api: api.clone(), + api, inherent_data_providers, - time_source: Default::default(), - config, - transaction_pool, + config: babe_link.config, + epoch_changes: babe_link.epoch_changes, + time_source: babe_link.time_source, }; - let epoch_changes = aux_schema::load_epoch_changes(&*client)?; - - let block_import = BabeBlockImport::new( - client.clone(), - api, - epoch_changes.clone(), - block_import, - ); - - let pruning_task = client.finality_notification_stream() - .map(|v| Ok::<_, ()>(v)).compat() - .for_each(move |notification| { - let is_descendent_of = is_descendent_of(&client, None); - epoch_changes.lock().prune( - ¬ification.hash, - *notification.header.number(), - &is_descendent_of, - ).map_err(|e| { - debug!(target: "babe", "Error pruning epoch changes fork tree: {:?}", e) - })?; - - Ok(()) - }); - - let timestamp_core = verifier.time_source.clone(); - let queue = BasicQueue::new( + Ok(BasicQueue::new( verifier, - Box::new(block_import.clone()), + Box::new(block_import), justification_import, finality_proof_import, - ); - - Ok((queue, timestamp_core, block_import, pruning_task)) + )) } /// BABE test helpers. Utility methods for manually authoring blocks. @@ -1540,26 +1461,25 @@ pub mod test_helpers { slot_number: u64, parent: &B::Header, client: &C, - c: (u64, u64), keystore: &KeyStorePtr, + link: &BabeLink, ) -> Option where - B: BlockT, - C: ProvideRuntimeApi + ProvideCache, + B: BlockT, + C: ProvideRuntimeApi + ProvideCache + HeaderBackend, C::Api: BabeApi, { - let epoch = match epoch(client, &BlockId::Hash(parent.hash())).unwrap() { - MaybeSpanEpoch::Regular(epoch) => epoch, - _ => unreachable!("it is always Regular epoch on full nodes"), - }; - - let weight = find_pre_digest::(parent).ok() - .map(|d| d.weight())?; + let epoch = link.epoch_changes.lock().epoch_for_child_of( + descendent_query(client), + &parent.hash(), + parent.number().clone(), + slot_number, + |slot| link.config.genesis_epoch(slot), + ).unwrap().unwrap(); super::claim_slot( slot_number, - weight, - &epoch, - c, + epoch.as_ref(), + &link.config, keystore, ).map(|(digest, _)| digest) } diff --git a/core/consensus/babe/src/tests.rs b/core/consensus/babe/src/tests.rs index 7274cc1230418..70c7738dec8e7 100644 --- a/core/consensus/babe/src/tests.rs +++ b/core/consensus/babe/src/tests.rs @@ -21,19 +21,22 @@ #![allow(deprecated)] use super::*; -use babe_primitives::AuthorityPair; +use babe_primitives::{AuthorityPair, SlotNumber}; use client::block_builder::BlockBuilder; use consensus_common::NoNetwork as DummyOracle; +use consensus_common::import_queue::{ + BoxBlockImport, BoxJustificationImport, BoxFinalityProofImport, +}; use network::test::*; use network::test::{Block as TestBlock, PeersClient}; +use network::config::BoxFinalityProofRequestBuilder; use sr_primitives::{generic::DigestItem, traits::{Block as BlockT, DigestFor}}; use network::config::ProtocolConfig; use tokio::runtime::current_thread; -use keyring::sr25519::Keyring; use client::BlockchainEvents; use test_client; use log::debug; -use std::{time::Duration, borrow::Borrow, cell::RefCell}; +use std::{time::Duration, cell::RefCell}; type Item = DigestItem; @@ -46,8 +49,28 @@ type TestClient = client::Client< test_client::runtime::RuntimeApi, >; -struct DummyFactory(Arc); -struct DummyProposer(u64, Arc); +#[derive(Copy, Clone, PartialEq)] +enum Stage { + PreSeal, + PostSeal, +} + +type Mutator = Arc; + +#[derive(Clone)] +struct DummyFactory { + client: Arc, + epoch_changes: crate::SharedEpochChanges, + config: Config, + mutator: Mutator, +} + +struct DummyProposer { + factory: DummyFactory, + parent_hash: Hash, + parent_number: u64, + parent_slot: SlotNumber, +} impl Environment for DummyFactory { type Proposer = DummyProposer; @@ -56,7 +79,69 @@ impl Environment for DummyFactory { fn init(&mut self, parent_header: &::Header) -> Result { - Ok(DummyProposer(parent_header.number + 1, self.0.clone())) + + let parent_slot = crate::find_pre_digest::(parent_header) + .expect("parent header has a pre-digest") + .slot_number(); + + Ok(DummyProposer { + factory: self.clone(), + parent_hash: parent_header.hash(), + parent_number: *parent_header.number(), + parent_slot, + }) + } +} + +impl DummyProposer { + fn propose_with(&mut self, pre_digests: DigestFor) + -> future::Ready> + { + let block_builder = self.factory.client.new_block_at( + &BlockId::Hash(self.parent_hash), + pre_digests, + ).unwrap(); + let mut block = match block_builder.bake().map_err(|e| e.into()) { + Ok(b) => b, + Err(e) => return future::ready(Err(e)), + }; + + let this_slot = crate::find_pre_digest::(block.header()) + .expect("baked block has valid pre-digest") + .slot_number(); + + // figure out if we should add a consensus digest, since the test runtime + // doesn't. + let epoch_changes = self.factory.epoch_changes.lock(); + let epoch = epoch_changes.epoch_for_child_of( + descendent_query(&*self.factory.client), + &self.parent_hash, + self.parent_number, + this_slot, + |slot| self.factory.config.genesis_epoch(slot), + ) + .expect("client has data to find epoch") + .expect("can compute epoch for baked block") + .into_inner(); + + let first_in_epoch = self.parent_slot < epoch.start_slot; + if first_in_epoch { + // push a `Consensus` digest signalling next change. + // we just reuse the same randomness and authorities as the prior + // epoch. this will break when we add light client support, since + // that will re-check the randomness logic off-chain. + let digest_data = ConsensusLog::NextEpochData(NextEpochDescriptor { + authorities: epoch.authorities.clone(), + randomness: epoch.randomness.clone(), + }).encode(); + let digest = DigestItem::Consensus(BABE_ENGINE_ID, digest_data); + block.header.digest_mut().push(digest) + } + + // mutate the block header according to the mutator. + (self.factory.mutator)(&mut block.header, Stage::PreSeal); + + future::ready(Ok(block)) } } @@ -67,21 +152,42 @@ impl Proposer for DummyProposer { fn propose( &mut self, _: InherentData, - digests: DigestFor, + pre_digests: DigestFor, _: Duration, ) -> Self::Create { - future::ready(self.1.new_block(digests).unwrap().bake().map_err(|e| e.into())) + self.propose_with(pre_digests) } } -type Mutator = Arc Fn(&'r mut TestHeader) + Send + Sync>; - thread_local! { - static MUTATOR: RefCell = RefCell::new(Arc::new(|_|())); + static MUTATOR: RefCell = RefCell::new(Arc::new(|_, _|())); +} + +#[derive(Clone)] +struct PanickingBlockImport(B); + +impl> BlockImport for PanickingBlockImport { + type Error = B::Error; + + fn import_block( + &mut self, + block: BlockImportParams, + new_cache: HashMap>, + ) -> Result { + Ok(self.0.import_block(block, new_cache).expect("importing block failed")) + } + + fn check_block( + &mut self, + hash: Hash, + parent_hash: Hash, + ) -> Result { + Ok(self.0.check_block(hash, parent_hash).expect("checking block failed")) + } } pub struct BabeTestNet { - peers: Vec>, + peers: Vec, DummySpecialization>>, } type TestHeader = ::Header; @@ -94,7 +200,6 @@ pub struct TestVerifier { TestBlock, test_client::runtime::RuntimeApi, PeersFullClient, - (), >, mutator: Mutator, } @@ -110,16 +215,22 @@ impl Verifier for TestVerifier { justification: Option, body: Option>, ) -> Result<(BlockImportParams, Option)>>), String> { - let cb: &(dyn Fn(&mut TestHeader) + Send + Sync) = self.mutator.borrow(); - cb(&mut header); + // apply post-sealing mutations (i.e. stripping seal, if desired). + (self.mutator)(&mut header, Stage::PostSeal); Ok(self.inner.verify(origin, header, justification, body).expect("verification failed!")) } } +pub struct PeerData { + link: BabeLink, + inherent_data_providers: InherentDataProviders, + block_import: Mutex>>, +} + impl TestNetFactory for BabeTestNet { type Specialization = DummySpecialization; type Verifier = TestVerifier; - type PeerData = (); + type PeerData = Option; /// Create new test network with peers and given config. fn from_config(_config: &ProtocolConfig) -> Self { @@ -129,31 +240,62 @@ impl TestNetFactory for BabeTestNet { } } - /// KLUDGE: this function gets the mutator from thread-local storage. - fn make_verifier(&self, client: PeersClient, _cfg: &ProtocolConfig) + fn make_block_import(&self, client: PeersClient) + -> ( + BoxBlockImport, + Option>, + Option>, + Option>, + Option, + ) + { + let client = client.as_full().expect("only full clients are tested"); + let inherent_data_providers = InherentDataProviders::new(); + + let config = Config::get_or_compute(&*client).expect("config available"); + let (block_import, link) = crate::block_import( + config, + client.clone(), + client.clone(), + client.clone(), + ).expect("can initialize block-import"); + + let block_import = PanickingBlockImport(block_import); + + let data_block_import = Mutex::new(Some(Box::new(block_import.clone()) as BoxBlockImport<_>)); + ( + Box::new(block_import), + None, + None, + None, + Some(PeerData { link, inherent_data_providers, block_import: data_block_import }), + ) + } + + fn make_verifier( + &self, + client: PeersClient, + _cfg: &ProtocolConfig, + maybe_link: &Option, + ) -> Self::Verifier { let client = client.as_full().expect("only full clients are used in test"); trace!(target: "babe", "Creating a verifier"); - let config = Config::get_or_compute(&*client) - .expect("slot duration available"); - let inherent_data_providers = InherentDataProviders::new(); - register_babe_inherent_data_provider( - &inherent_data_providers, - config.get() - ).expect("Registers babe inherent data provider"); - trace!(target: "babe", "Provider registered"); + + // ensure block import and verifier are linked correctly. + let data = maybe_link.as_ref().expect("babe link always provided to verifier instantiation"); TestVerifier { inner: BabeVerifier { client: client.clone(), api: client, - inherent_data_providers, - config, - time_source: Default::default(), - transaction_pool : Default::default(), + inherent_data_providers: data.inherent_data_providers.clone(), + config: data.link.config.clone(), + epoch_changes: data.link.epoch_changes.clone(), + time_source: data.link.time_source.clone(), }, - mutator: MUTATOR.with(|s| s.borrow().clone()), + mutator: MUTATOR.with(|m| m.borrow().clone()), } } @@ -188,8 +330,13 @@ fn rejects_empty_block() { }) } -fn run_one_test() { +fn run_one_test( + mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static, +) { let _ = env_logger::try_init(); + let mutator = Arc::new(mutator) as Mutator; + + MUTATOR.with(|m| *m.borrow_mut() = mutator.clone()); let net = BabeTestNet::new(3); let peers = &[ @@ -202,6 +349,7 @@ fn run_one_test() { let mut import_notifications = Vec::new(); let mut runtime = current_thread::Runtime::new().unwrap(); let mut keystore_paths = Vec::new(); + for (peer_id, seed) in peers { let mut net = net.lock(); let peer = net.peer(*peer_id); @@ -213,30 +361,46 @@ fn run_one_test() { keystore.write().insert_ephemeral_from_seed::(seed).expect("Generates authority key"); keystore_paths.push(keystore_path); - let environ = DummyFactory(client.clone()); + let mut got_own = false; + let mut got_other = false; + + let data = peer.data.as_ref().expect("babe link set up during initialization"); + + let environ = DummyFactory { + client: client.clone(), + config: data.link.config.clone(), + epoch_changes: data.link.epoch_changes.clone(), + mutator: mutator.clone(), + }; + import_notifications.push( + // run each future until we get one of our own blocks with number higher than 5 + // that was produced locally. client.import_notification_stream() - .take_while(|n| future::ready(!(n.origin != BlockOrigin::Own && n.header.number() < &5))) - .for_each(move |_| future::ready(())) + .take_while(move |n| future::ready(n.header.number() < &5 || { + if n.origin == BlockOrigin::Own { + got_own = true; + } else { + got_other = true; + } + + // continue until we have at least one block of our own + // and one of another peer. + !(got_own && got_other) + })) + .for_each(|_| future::ready(()) ) ); - let config = Config::get_or_compute(&*client).expect("slot duration available"); - - let inherent_data_providers = InherentDataProviders::new(); - register_babe_inherent_data_provider( - &inherent_data_providers, config.get() - ).expect("Registers babe inherent data provider"); runtime.spawn(start_babe(BabeParams { - config, - block_import: client.clone(), + block_import: data.block_import.lock().take().expect("import set up during init"), select_chain, client, env: environ, sync_oracle: DummyOracle, - inherent_data_providers, + inherent_data_providers: data.inherent_data_providers.clone(), force_authoring: false, - time_source: Default::default(), + babe_link: data.link.clone(), keystore, }).expect("Starts babe")); } @@ -251,45 +415,41 @@ fn run_one_test() { } #[test] -fn authoring_blocks() { run_one_test() } +fn authoring_blocks() { + run_one_test(|_, _| ()) +} #[test] #[should_panic] fn rejects_missing_inherent_digest() { - MUTATOR.with(|s| *s.borrow_mut() = Arc::new(move |header: &mut TestHeader| { + run_one_test(|header: &mut TestHeader, stage| { let v = std::mem::replace(&mut header.digest_mut().logs, vec![]); header.digest_mut().logs = v.into_iter() - .filter(|v| v.as_babe_pre_digest().is_none()) + .filter(|v| stage == Stage::PostSeal || v.as_babe_pre_digest().is_none()) .collect() - })); - run_one_test() + }) } #[test] #[should_panic] fn rejects_missing_seals() { - MUTATOR.with(|s| *s.borrow_mut() = Arc::new(move |header: &mut TestHeader| { + run_one_test(|header: &mut TestHeader, stage| { let v = std::mem::replace(&mut header.digest_mut().logs, vec![]); header.digest_mut().logs = v.into_iter() - .filter(|v| v.as_babe_seal().is_none()) + .filter(|v| stage == Stage::PreSeal || v.as_babe_seal().is_none()) .collect() - })); - run_one_test() + }) } -// TODO: this test assumes that the test runtime will trigger epoch changes -// which isn't the case since it doesn't include the session module. #[test] #[should_panic] -#[ignore] fn rejects_missing_consensus_digests() { - MUTATOR.with(|s| *s.borrow_mut() = Arc::new(move |header: &mut TestHeader| { + run_one_test(|header: &mut TestHeader, stage| { let v = std::mem::replace(&mut header.digest_mut().logs, vec![]); header.digest_mut().logs = v.into_iter() - .filter(|v| v.as_babe_epoch().is_none()) + .filter(|v| stage == Stage::PostSeal || v.as_next_epoch_descriptor().is_none()) .collect() - })); - run_one_test() + }); } #[test] @@ -326,28 +486,34 @@ fn can_author_block() { .expect("Generates authority pair"); let mut i = 0; - let mut epoch = Epoch { + let epoch = Epoch { start_slot: 0, authorities: vec![(pair.public(), 1)], randomness: [0; 32], epoch_index: 1, duration: 100, - secondary_slots: true, }; - let parent_weight = 0; + let mut config = crate::BabeConfiguration { + slot_duration: 1000, + epoch_length: 100, + c: (3, 10), + genesis_authorities: Vec::new(), + randomness: [0; 32], + secondary_slots: true, + }; // with secondary slots enabled it should never be empty - match claim_slot(i, parent_weight, &epoch, (3, 10), &keystore) { + match claim_slot(i, &epoch, &config, &keystore) { None => i += 1, Some(s) => debug!(target: "babe", "Authored block {:?}", s.0), } // otherwise with only vrf-based primary slots we might need to try a couple // of times. - epoch.secondary_slots = false; + config.secondary_slots = false; loop { - match claim_slot(i, parent_weight, &epoch, (3, 10), &keystore) { + match claim_slot(i, &epoch, &config, &keystore) { None => i += 1, Some(s) => { debug!(target: "babe", "Authored block {:?}", s.0); @@ -358,14 +524,75 @@ fn can_author_block() { } #[test] -fn authorities_call_works() { - let _ = env_logger::try_init(); - let client = test_client::new(); - - assert_eq!(client.info().chain.best_number, 0); - assert_eq!(epoch(&client, &BlockId::Number(0)).unwrap().into_regular().unwrap().authorities, vec![ - (Keyring::Alice.public().into(), 1), - (Keyring::Bob.public().into(), 1), - (Keyring::Charlie.public().into(), 1), - ]); +fn importing_block_one_sets_genesis_epoch() { + let mut net = BabeTestNet::new(1); + + let peer = net.peer(0); + let data = peer.data.as_ref().expect("babe link set up during initialization"); + let client = peer.client().as_full().expect("Only full clients are used in tests").clone(); + + let mut environ = DummyFactory { + client: client.clone(), + config: data.link.config.clone(), + epoch_changes: data.link.epoch_changes.clone(), + mutator: Arc::new(|_, _| ()), + }; + + let genesis_header = client.header(&BlockId::Number(0)).unwrap().unwrap(); + + let mut proposer = environ.init(&genesis_header).unwrap(); + let babe_claim = Item::babe_pre_digest(babe_primitives::BabePreDigest::Secondary { + authority_index: 0, + slot_number: 999, + }); + let pre_digest = sr_primitives::generic::Digest { logs: vec![babe_claim] }; + + let genesis_epoch = data.link.config.genesis_epoch(999); + + let mut block = futures::executor::block_on(proposer.propose_with(pre_digest)).unwrap(); + + // seal by alice. + let seal = { + // sign the pre-sealed hash of the block and then + // add it to a digest item. + let pair = AuthorityPair::from_seed(&[1; 32]); + let pre_hash = block.header.hash(); + let signature = pair.sign(pre_hash.as_ref()); + Item::babe_seal(signature) + }; + + let post_hash = { + block.header.digest_mut().push(seal.clone()); + let h = block.header.hash(); + block.header.digest_mut().pop(); + h + }; + assert_eq!(*block.header.number(), 1); + let (header, body) = block.deconstruct(); + + let post_digests = vec![seal]; + let mut block_import = data.block_import.lock().take().expect("import set up during init"); + block_import.import_block( + BlockImportParams { + origin: BlockOrigin::Own, + header, + justification: None, + post_digests, + body: Some(body), + finalized: false, + auxiliary: Vec::new(), + fork_choice: ForkChoiceStrategy::LongestChain, + }, + Default::default(), + ).unwrap(); + + let epoch_changes = data.link.epoch_changes.lock(); + let epoch_for_second_block = epoch_changes.epoch_for_child_of( + descendent_query(&*client), + &post_hash, + 1, + 1000, + |slot| data.link.config.genesis_epoch(slot), + ).unwrap().unwrap().into_inner(); + assert_eq!(epoch_for_second_block, genesis_epoch); } diff --git a/core/consensus/common/src/block_import.rs b/core/consensus/common/src/block_import.rs index bcafb352cd10b..4024322911c69 100644 --- a/core/consensus/common/src/block_import.rs +++ b/core/consensus/common/src/block_import.rs @@ -120,7 +120,8 @@ pub struct BlockImportParams { /// Contains a list of key-value pairs. If values are `None`, the keys /// will be deleted. pub auxiliary: Vec<(Vec, Option>)>, - /// Fork choice strategy of this import. + /// Fork choice strategy of this import. This should only be set by a + /// synchronous import, otherwise it may race against other imports. pub fork_choice: ForkChoiceStrategy, } @@ -185,7 +186,31 @@ pub trait BlockImport { ) -> Result; } -impl BlockImport for Arc +impl BlockImport for crate::import_queue::BoxBlockImport { + type Error = crate::error::Error; + + /// Check block preconditions. + fn check_block( + &mut self, + hash: B::Hash, + parent_hash: B::Hash, + ) -> Result { + (**self).check_block(hash, parent_hash) + } + + /// Import a block. + /// + /// Cached data can be accessed through the blockchain cache. + fn import_block( + &mut self, + block: BlockImportParams, + cache: HashMap>, + ) -> Result { + (**self).import_block(block, cache) + } +} + +impl BlockImport for Arc where for<'r> &'r T: BlockImport { type Error = E; diff --git a/core/consensus/common/src/import_queue.rs b/core/consensus/common/src/import_queue.rs index 533df2b179abd..07d6297acc734 100644 --- a/core/consensus/common/src/import_queue.rs +++ b/core/consensus/common/src/import_queue.rs @@ -105,6 +105,7 @@ pub trait ImportQueue: Send { number: NumberFor, finality_proof: Vec ); + /// Polls for actions to perform on the network. /// /// This method should behave in a way similar to `Future::poll`. It can register the current diff --git a/core/consensus/slots/src/lib.rs b/core/consensus/slots/src/lib.rs index bcceb62bb63f1..fd86a0f277331 100644 --- a/core/consensus/slots/src/lib.rs +++ b/core/consensus/slots/src/lib.rs @@ -77,8 +77,9 @@ pub trait SimpleSlotWorker { /// A handle to a `BlockImport`. fn block_import(&self) -> Arc>; - /// Returns the epoch data necessary for authoring. - fn epoch_data(&self, block: &B::Hash) -> Result; + /// Returns the epoch data necessary for authoring. For time-dependent epochs, + /// use the provided slot number as a canonical source of time. + fn epoch_data(&self, header: &B::Header, slot_number: u64) -> Result; /// Returns the number of authorities given the epoch data. fn authorities_len(&self, epoch_data: &Self::EpochData) -> usize; @@ -120,7 +121,7 @@ pub trait SimpleSlotWorker { let (timestamp, slot_number, slot_duration) = (slot_info.timestamp, slot_info.number, slot_info.duration); - let epoch_data = match self.epoch_data(&chain_head.hash()) { + let epoch_data = match self.epoch_data(&chain_head, slot_number) { Ok(epoch_data) => epoch_data, Err(err) => { warn!("Unable to fetch epoch data at block {:?}: {:?}", chain_head.hash(), err); @@ -359,7 +360,8 @@ impl SlotData for u64 { } /// A slot duration. Create with `get_or_compute`. -// The internal member should stay private here. +// The internal member should stay private here to maintain invariants of +// `get_or_compute`. #[derive(Clone, Copy, Debug, Encode, Decode, Hash, PartialOrd, Ord, PartialEq, Eq)] pub struct SlotDuration(T); diff --git a/core/finality-grandpa/src/environment.rs b/core/finality-grandpa/src/environment.rs index 27d398ca3e137..502d201b9436b 100644 --- a/core/finality-grandpa/src/environment.rs +++ b/core/finality-grandpa/src/environment.rs @@ -906,7 +906,7 @@ pub(crate) fn finalize_block, E, RA>( let status = authority_set.apply_standard_changes( hash, number, - &is_descendent_of(client, None), + &is_descendent_of::<_, _, Block::Hash>(client, None), ).map_err(|e| Error::Safety(e.to_string()))?; // check if this is this is the first finalization of some consensus changes diff --git a/core/finality-grandpa/src/import.rs b/core/finality-grandpa/src/import.rs index 8f7124b3d3ab2..46b32d999da3a 100644 --- a/core/finality-grandpa/src/import.rs +++ b/core/finality-grandpa/src/import.rs @@ -294,7 +294,7 @@ where // returns a function for checking whether a block is a descendent of another // consistent with querying client directly after importing the block. let parent_hash = *block.header.parent_hash(); - let is_descendent_of = is_descendent_of(&self.inner, Some((&hash, &parent_hash))); + let is_descendent_of = is_descendent_of(&*self.inner, Some((&hash, &parent_hash))); let mut guard = InnerGuard { guard: Some(self.authority_set.inner().write()), diff --git a/core/finality-grandpa/src/tests.rs b/core/finality-grandpa/src/tests.rs index 9193dae71aeba..f6f28e705d198 100644 --- a/core/finality-grandpa/src/tests.rs +++ b/core/finality-grandpa/src/tests.rs @@ -98,7 +98,12 @@ impl TestNetFactory for GrandpaTestNet { } } - fn make_verifier(&self, _client: PeersClient, _cfg: &ProtocolConfig) -> Self::Verifier { + fn make_verifier( + &self, + _client: PeersClient, + _cfg: &ProtocolConfig, + _: &PeerData, + ) -> Self::Verifier { PassThroughVerifier(false) // use non-instant finality. } diff --git a/core/network/src/test/mod.rs b/core/network/src/test/mod.rs index 8cceeeaae6543..276bce9f39e31 100644 --- a/core/network/src/test/mod.rs +++ b/core/network/src/test/mod.rs @@ -461,7 +461,12 @@ pub trait TestNetFactory: Sized { /// These two need to be implemented! fn from_config(config: &ProtocolConfig) -> Self; - fn make_verifier(&self, client: PeersClient, config: &ProtocolConfig) -> Self::Verifier; + fn make_verifier( + &self, + client: PeersClient, + config: &ProtocolConfig, + peer_data: &Self::PeerData, + ) -> Self::Verifier; /// Get reference to peer. fn peer(&mut self, i: usize) -> &mut Peer; @@ -509,12 +514,23 @@ pub trait TestNetFactory: Sized { let backend = test_client_builder.backend(); let (c, longest_chain) = test_client_builder.build_with_longest_chain(); let client = Arc::new(c); - let verifier = self.make_verifier(PeersClient::Full(client.clone(), backend.clone()), config); - let verifier = VerifierAdapter(Arc::new(Mutex::new(Box::new(verifier) as Box<_>))); - let (block_import, justification_import, finality_proof_import, finality_proof_request_builder, data) - = self.make_block_import(PeersClient::Full(client.clone(), backend.clone())); + + let ( + block_import, + justification_import, + finality_proof_import, + finality_proof_request_builder, + data, + ) = self.make_block_import(PeersClient::Full(client.clone(), backend.clone())); let block_import = BlockImportAdapter(Arc::new(Mutex::new(block_import))); + let verifier = self.make_verifier( + PeersClient::Full(client.clone(), backend.clone()), + config, + &data, + ); + let verifier = VerifierAdapter(Arc::new(Mutex::new(Box::new(verifier) as Box<_>))); + let import_queue = Box::new(BasicQueue::new( verifier.clone(), Box::new(block_import.clone()), @@ -572,12 +588,22 @@ pub trait TestNetFactory: Sized { let (c, backend) = test_client::new_light(); let client = Arc::new(c); - let verifier = self.make_verifier(PeersClient::Light(client.clone(), backend.clone()), &config); - let verifier = VerifierAdapter(Arc::new(Mutex::new(Box::new(verifier) as Box<_>))); - let (block_import, justification_import, finality_proof_import, finality_proof_request_builder, data) - = self.make_block_import(PeersClient::Light(client.clone(), backend.clone())); + let ( + block_import, + justification_import, + finality_proof_import, + finality_proof_request_builder, + data, + ) = self.make_block_import(PeersClient::Light(client.clone(), backend.clone())); let block_import = BlockImportAdapter(Arc::new(Mutex::new(block_import))); + let verifier = self.make_verifier( + PeersClient::Light(client.clone(), backend.clone()), + &config, + &data, + ); + let verifier = VerifierAdapter(Arc::new(Mutex::new(Box::new(verifier) as Box<_>))); + let import_queue = Box::new(BasicQueue::new( verifier.clone(), Box::new(block_import.clone()), @@ -696,7 +722,7 @@ impl TestNetFactory for TestNet { } } - fn make_verifier(&self, _client: PeersClient, _config: &ProtocolConfig) + fn make_verifier(&self, _client: PeersClient, _config: &ProtocolConfig, _peer_data: &()) -> Self::Verifier { PassThroughVerifier(false) @@ -742,8 +768,8 @@ impl TestNetFactory for JustificationTestNet { JustificationTestNet(TestNet::from_config(config)) } - fn make_verifier(&self, client: PeersClient, config: &ProtocolConfig) -> Self::Verifier { - self.0.make_verifier(client, config) + fn make_verifier(&self, client: PeersClient, config: &ProtocolConfig, peer_data: &()) -> Self::Verifier { + self.0.make_verifier(client, config, peer_data) } fn peer(&mut self, i: usize) -> &mut Peer { diff --git a/core/phragmen/benches/phragmen.rs b/core/phragmen/benches/phragmen.rs index ecbf235bab287..ae1daac468949 100644 --- a/core/phragmen/benches/phragmen.rs +++ b/core/phragmen/benches/phragmen.rs @@ -16,7 +16,7 @@ //! Note that execution times will not be accurate in an absolute scale, since //! - Everything is executed in the context of `TestExternalities` //! - Everything is executed in native environment. - +#![cfg(feature = "bench")] #![feature(test)] extern crate test; diff --git a/core/service/src/chain_ops.rs b/core/service/src/chain_ops.rs index 3a3677798b6ad..06390e80bd0ea 100644 --- a/core/service/src/chain_ops.rs +++ b/core/service/src/chain_ops.rs @@ -20,6 +20,7 @@ use crate::RuntimeGenesis; use crate::error; use crate::chain_spec::ChainSpec; +/// Defines the logic for an operation exporting blocks within a range. #[macro_export] macro_rules! export_blocks { ($client:ident, $exit:ident, $output:ident, $from:ident, $to:ident, $json:ident) => {{ @@ -36,7 +37,7 @@ macro_rules! export_blocks { } let (exit_send, exit_recv) = std::sync::mpsc::channel(); - ::std::thread::spawn(move || { + std::thread::spawn(move || { let _ = $exit.wait(); let _ = exit_send.send(()); }); @@ -75,6 +76,7 @@ macro_rules! export_blocks { }} } +/// Defines the logic for an operation importing blocks from some known import. #[macro_export] macro_rules! import_blocks { ($block:ty, $client:ident, $queue:ident, $exit:ident, $input:ident) => {{ @@ -203,6 +205,7 @@ macro_rules! import_blocks { }} } +/// Revert the chain some number of blocks. #[macro_export] macro_rules! revert_chain { ($client:ident, $blocks:ident) => {{ diff --git a/core/service/test/src/lib.rs b/core/service/test/src/lib.rs index 870f287bff8f2..6e096ec35c6b2 100644 --- a/core/service/test/src/lib.rs +++ b/core/service/test/src/lib.rs @@ -20,7 +20,6 @@ use std::iter; use std::sync::{Arc, Mutex, MutexGuard}; use std::net::Ipv4Addr; use std::time::Duration; -use std::collections::HashMap; use log::info; use futures::{Future, Stream, Poll}; use tempdir::TempDir; @@ -36,7 +35,6 @@ use service::{ use network::{multiaddr, Multiaddr}; use network::config::{NetworkConfiguration, TransportConfig, NodeKeyConfig, Secret, NonReservedPeerMode}; use sr_primitives::{generic::BlockId, traits::Block as BlockT}; -use consensus::{BlockImportParams, BlockImport}; /// Maximum duration of single wait call. const MAX_WAIT_TIME: Duration = Duration::from_secs(60 * 3); @@ -276,7 +274,13 @@ impl TestNet where } } -pub fn connectivity(spec: ChainSpec, full_builder: Fb, light_builder: Lb) where +pub fn connectivity( + spec: ChainSpec, + full_builder: Fb, + light_builder: Lb, + light_node_interconnectivity: bool, // should normally be false, unless the light nodes + // aren't actually light. +) where Fb: Fn(Configuration<(), G>) -> Result, F: AbstractService, Lb: Fn(Configuration<(), G>) -> Result, @@ -284,6 +288,14 @@ pub fn connectivity(spec: ChainSpec, full_builder: Fb, light { const NUM_FULL_NODES: usize = 5; const NUM_LIGHT_NODES: usize = 5; + + let expected_full_connections = NUM_FULL_NODES - 1 + NUM_LIGHT_NODES; + let expected_light_connections = if light_node_interconnectivity { + expected_full_connections + } else { + NUM_FULL_NODES + }; + { let temp = TempDir::new("substrate-connectivity-test").expect("Error creating test dir"); let runtime = { @@ -307,11 +319,14 @@ pub fn connectivity(spec: ChainSpec, full_builder: Fb, light service.get().network().add_reserved_peer(first_address.to_string()) .expect("Error adding reserved peer"); } + network.run_until_all_full( - |_index, service| service.get().network().num_connected() == NUM_FULL_NODES - 1 - + NUM_LIGHT_NODES, - |_index, service| service.get().network().num_connected() == NUM_FULL_NODES, + move |_index, service| service.get().network().num_connected() + == expected_full_connections, + move |_index, service| service.get().network().num_connected() + == expected_light_connections, ); + network.runtime }; @@ -350,10 +365,12 @@ pub fn connectivity(spec: ChainSpec, full_builder: Fb, light address = node_id.clone(); } } + network.run_until_all_full( - |_index, service| service.get().network().num_connected() == NUM_FULL_NODES - 1 - + NUM_LIGHT_NODES, - |_index, service| service.get().network().num_connected() == NUM_FULL_NODES, + move |_index, service| service.get().network().num_connected() + == expected_full_connections, + move |_index, service| service.get().network().num_connected() + == expected_light_connections, ); } temp.close().expect("Error removing temp dir"); @@ -364,14 +381,14 @@ pub fn sync( spec: ChainSpec, full_builder: Fb, light_builder: Lb, - mut block_factory: B, + mut make_block_and_import: B, mut extrinsic_factory: E ) where Fb: Fn(Configuration<(), G>) -> Result<(F, U), Error>, F: AbstractService, Lb: Fn(Configuration<(), G>) -> Result, L: AbstractService, - B: FnMut(&F, &U) -> BlockImportParams, + B: FnMut(&F, &mut U), E: FnMut(&F, &U) -> ::Extrinsic, U: Clone + Send + 'static, { @@ -392,15 +409,13 @@ pub fn sync( ); info!("Checking block sync"); let first_address = { - let first_service = &network.full_nodes[0].1; - let first_user_data = &network.full_nodes[0].2; - let mut client = first_service.get().client(); + let &mut (_, ref first_service, ref mut first_user_data, _) = &mut network.full_nodes[0]; for i in 0 .. NUM_BLOCKS { if i % 128 == 0 { - info!("Generating #{}", i); + info!("Generating #{}", i + 1); } - let import_data = block_factory(&first_service.get(), first_user_data); - client.import_block(import_data, HashMap::new()).expect("Error importing test block"); + + make_block_and_import(&first_service.get(), first_user_data); } network.full_nodes[0].3.clone() }; diff --git a/core/sr-io/src/lib.rs b/core/sr-io/src/lib.rs index aee9e23909b22..06114c02a3af8 100644 --- a/core/sr-io/src/lib.rs +++ b/core/sr-io/src/lib.rs @@ -26,7 +26,6 @@ #![cfg_attr(feature = "std", doc = "Substrate runtime standard library as compiled when linked with Rust's standard library.")] #![cfg_attr(not(feature = "std"), doc = "Substrate's runtime standard library as compiled without Rust's standard library.")] -use hash_db::Hasher; use rstd::vec::Vec; use primitives::{ diff --git a/core/sr-io/with_std.rs b/core/sr-io/with_std.rs index f93194bb47409..919f4a913acc9 100644 --- a/core/sr-io/with_std.rs +++ b/core/sr-io/with_std.rs @@ -17,6 +17,7 @@ use primitives::{ blake2_128, blake2_256, twox_128, twox_256, twox_64, ed25519, Blake2Hasher, sr25519, Pair, H256, traits::Externalities, child_storage_key::ChildStorageKey, hexdisplay::HexDisplay, offchain, + Hasher, }; // Switch to this after PoC-3 // pub use primitives::BlakeHasher; diff --git a/core/sr-io/without_std.rs b/core/sr-io/without_std.rs index 0ff1702f907bf..ad5ed77d70b0d 100644 --- a/core/sr-io/without_std.rs +++ b/core/sr-io/without_std.rs @@ -20,7 +20,7 @@ pub use rstd::{mem, slice}; use core::{intrinsics, panic::PanicInfo}; use rstd::{vec::Vec, cell::Cell, convert::TryInto}; -use primitives::{offchain, Blake2Hasher}; +use primitives::offchain; use codec::Decode; #[cfg(not(feature = "no_panic_handler"))] @@ -732,7 +732,7 @@ impl StorageApi for () { } - fn blake2_256_trie_root(input: Vec<(Vec, Vec)>) -> H256 { + fn blake2_256_trie_root(_input: Vec<(Vec, Vec)>) -> H256 { unimplemented!() } diff --git a/core/sr-primitives/src/testing.rs b/core/sr-primitives/src/testing.rs index 198870f8c4f42..5391735576aa2 100644 --- a/core/sr-primitives/src/testing.rs +++ b/core/sr-primitives/src/testing.rs @@ -25,7 +25,7 @@ use crate::traits::{ }; use crate::{generic, KeyTypeId, ApplyResult}; use crate::weights::{GetDispatchInfo, DispatchInfo}; -pub use primitives::H256; +pub use primitives::{H256, sr25519}; use primitives::{crypto::{CryptoType, Dummy, key_types, Public}, U256}; use crate::transaction_validity::{TransactionValidity, TransactionValidityError}; diff --git a/core/test-runtime/src/lib.rs b/core/test-runtime/src/lib.rs index 815f43c3dbf20..ff9826acaedb8 100644 --- a/core/test-runtime/src/lib.rs +++ b/core/test-runtime/src/lib.rs @@ -619,25 +619,15 @@ cfg_if! { } impl babe_primitives::BabeApi for Runtime { - fn startup_data() -> babe_primitives::BabeConfiguration { + fn configuration() -> babe_primitives::BabeConfiguration { babe_primitives::BabeConfiguration { - median_required_blocks: 0, - slot_duration: 3000, + slot_duration: 1000, + epoch_length: EpochDuration::get(), c: (3, 10), - } - } - - fn epoch() -> babe_primitives::Epoch { - let authorities = system::authorities(); - let authorities: Vec<_> = authorities.into_iter().map(|x|(x, 1)).collect(); - - babe_primitives::Epoch { - start_slot: >::epoch_start_slot(), - authorities, + genesis_authorities: system::authorities() + .into_iter().map(|x|(x, 1)).collect(), randomness: >::randomness(), - epoch_index: >::epoch_index(), - duration: EpochDuration::get(), - secondary_slots: >::secondary_slots().0, + secondary_slots: true, } } } @@ -839,25 +829,15 @@ cfg_if! { } impl babe_primitives::BabeApi for Runtime { - fn startup_data() -> babe_primitives::BabeConfiguration { + fn configuration() -> babe_primitives::BabeConfiguration { babe_primitives::BabeConfiguration { - median_required_blocks: 0, slot_duration: 1000, + epoch_length: EpochDuration::get(), c: (3, 10), - } - } - - fn epoch() -> babe_primitives::Epoch { - let authorities = system::authorities(); - let authorities: Vec<_> = authorities.into_iter().map(|x|(x, 1)).collect(); - - babe_primitives::Epoch { - start_slot: >::epoch_start_slot(), - authorities, + genesis_authorities: system::authorities() + .into_iter().map(|x|(x, 1)).collect(), randomness: >::randomness(), - epoch_index: >::epoch_index(), - duration: EpochDuration::get(), - secondary_slots: >::secondary_slots().0, + secondary_slots: true, } } } diff --git a/core/utils/fork-tree/src/lib.rs b/core/utils/fork-tree/src/lib.rs index 42646b652164e..42999187558fa 100644 --- a/core/utils/fork-tree/src/lib.rs +++ b/core/utils/fork-tree/src/lib.rs @@ -153,6 +153,8 @@ impl ForkTree where /// should return `true` if the second hash (target) is a descendent of the /// first hash (base). This method assumes that nodes in the same branch are /// imported in order. + /// + /// Returns `true` if the imported node is a root. pub fn import( &mut self, mut hash: H, @@ -208,7 +210,7 @@ impl ForkTree where self.node_iter().map(|node| (&node.hash, &node.number, &node.data)) } - /// Find a node in the tree that is the lowest ancestor of the given + /// Find a node in the tree that is the deepest ancestor of the given /// block hash and which passes the given predicate. The given function /// `is_descendent_of` should return `true` if the second hash (target) /// is a descendent of the first hash (base). @@ -228,8 +230,8 @@ impl ForkTree where let node = root.find_node_where(hash, number, is_descendent_of, predicate)?; // found the node, early exit - if let Some(node) = node { - return Ok(node); + if let FindOutcome::Found(node) = node { + return Ok(Some(node)); } } @@ -510,6 +512,17 @@ impl ForkTree where mod node_implementation { use super::*; + /// The outcome of a search within a node. + pub enum FindOutcome { + // this is the node we were looking for. + Found(T), + // not the node we're looking for. contains a flag indicating + // whether the node was a descendent. true implies the predicate failed. + Failure(bool), + // Abort search. + Abort, + } + #[derive(Clone, Debug, Decode, Encode, PartialEq)] pub struct Node { pub hash: H, @@ -560,9 +573,10 @@ mod node_implementation { } } - /// Find a node in the tree that is the lowest ancestor of the given - /// block hash and which passes the given predicate. The given function - /// `is_descendent_of` should return `true` if the second hash (target) + /// Find a node in the tree that is the deepest ancestor of the given + /// block hash which also passes the given predicate, backtracking + /// when the predicate fails. + /// The given function `is_descendent_of` should return `true` if the second hash (target) /// is a descendent of the first hash (base). // FIXME: it would be useful if this returned a mutable reference but // rustc can't deal with lifetimes properly. an option would be to try @@ -573,23 +587,32 @@ mod node_implementation { number: &N, is_descendent_of: &F, predicate: &P, - ) -> Result>>, Error> + ) -> Result>, Error> where E: std::error::Error, F: Fn(&H, &H) -> Result, P: Fn(&V) -> bool, { // stop searching this branch if *number < self.number { - return Ok(None); + return Ok(FindOutcome::Failure(false)); } + let mut known_descendent_of = false; + // continue depth-first search through all children for node in self.children.iter() { - let node = node.find_node_where(hash, number, is_descendent_of, predicate)?; - // found node, early exit - if node.is_some() { - return Ok(node); + match node.find_node_where(hash, number, is_descendent_of, predicate)? { + FindOutcome::Abort => return Ok(FindOutcome::Abort), + FindOutcome::Found(x) => return Ok(FindOutcome::Found(x)), + FindOutcome::Failure(true) => { + // if the block was a descendent of this child, + // then it cannot be a descendent of any others, + // so we don't search them. + known_descendent_of = true; + break; + }, + FindOutcome::Failure(false) => {}, } } @@ -597,24 +620,23 @@ mod node_implementation { // searching for is a descendent of this node then we will stop the // search here, since there aren't any more children and we found // the correct node so we don't want to backtrack. - if is_descendent_of(&self.hash, hash)? { + let is_descendent_of = known_descendent_of || is_descendent_of(&self.hash, hash)?; + if is_descendent_of { // if the predicate passes we return the node if predicate(&self.data) { - Ok(Some(Some(self))) - - // otherwise we stop the search returning `None` - } else { - Ok(Some(None)) + return Ok(FindOutcome::Found(self)); } - } else { - Ok(None) } + + // otherwise, tell our ancestor that we failed, and whether + // the block was a descendent. + Ok(FindOutcome::Failure(is_descendent_of)) } } } // Workaround for: https://github.com/rust-lang/rust/issues/34537 -use node_implementation::Node; +use node_implementation::{Node, FindOutcome}; struct ForkTreeIterator<'a, H, N, V> { stack: Vec<&'a Node>, @@ -1197,7 +1219,7 @@ mod test { } #[test] - fn find_node_doesnt_backtrack_after_finding_highest_descending_node() { + fn find_node_backtracks_after_finding_highest_descending_node() { let mut tree = ForkTree::new(); // @@ -1215,11 +1237,12 @@ mod test { }; tree.import("A", 1, 1, &is_descendent_of).unwrap(); - tree.import("B", 2, 4, &is_descendent_of).unwrap(); + tree.import("B", 2, 2, &is_descendent_of).unwrap(); tree.import("C", 2, 4, &is_descendent_of).unwrap(); - // when searching the tree we reach both node `B` and `C`, but the - // predicate doesn't pass. still, we should not backtrack to node `A`. + // when searching the tree we reach node `C`, but the + // predicate doesn't pass. we should backtrack to `B`, but not to `A`, + // since "B" fulfills the predicate. let node = tree.find_node_where( &"D", &3, @@ -1227,6 +1250,6 @@ mod test { &|data| *data < 3, ).unwrap(); - assert_eq!(node, None); + assert_eq!(node.unwrap().hash, "B"); } } diff --git a/node-template/runtime/src/lib.rs b/node-template/runtime/src/lib.rs index fa4d022d17f42..f736d9c62d6bb 100644 --- a/node-template/runtime/src/lib.rs +++ b/node-template/runtime/src/lib.rs @@ -374,27 +374,19 @@ impl_runtime_apis! { } impl babe_primitives::BabeApi for Runtime { - fn startup_data() -> babe_primitives::BabeConfiguration { + fn configuration() -> babe_primitives::BabeConfiguration { // The choice of `c` parameter (where `1 - c` represents the // probability of a slot being empty), is done in accordance to the // slot duration and expected target block time, for safely // resisting network delays of maximum two seconds. // babe_primitives::BabeConfiguration { - median_required_blocks: 1000, slot_duration: Babe::slot_duration(), + epoch_length: EpochDuration::get(), c: PRIMARY_PROBABILITY, - } - } - - fn epoch() -> babe_primitives::Epoch { - babe_primitives::Epoch { - start_slot: Babe::epoch_start_slot(), - authorities: Babe::authorities(), - epoch_index: Babe::epoch_index(), + genesis_authorities: Babe::authorities(), randomness: Babe::randomness(), - duration: EpochDuration::get(), - secondary_slots: Babe::secondary_slots().0, + secondary_slots: true, } } } diff --git a/node-template/src/service.rs b/node-template/src/service.rs index f4ab3f40000e2..c26959d302c4f 100644 --- a/node-template/src/service.rs +++ b/node-template/src/service.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use std::time::Duration; use substrate_client::LongestChain; -use babe::{import_queue, start_babe, Config}; +use babe; use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}; use futures::prelude::*; use node_template_runtime::{self, GenesisConfig, opaque::Block, RuntimeApi}; @@ -34,7 +34,6 @@ macro_rules! new_full_start { ($config:expr) => {{ let mut import_setup = None; let inherent_data_providers = inherents::InherentDataProviders::new(); - let mut tasks_to_spawn = None; let builder = substrate_service::ServiceBuilder::new_full::< node_template_runtime::opaque::Block, node_template_runtime::RuntimeApi, crate::service::Executor @@ -45,33 +44,38 @@ macro_rules! new_full_start { .with_transaction_pool(|config, client| Ok(transaction_pool::txpool::Pool::new(config, transaction_pool::ChainApi::new(client))) )? - .with_import_queue(|_config, client, mut select_chain, transaction_pool| { + .with_import_queue(|_config, client, mut select_chain, _transaction_pool| { let select_chain = select_chain.take() .ok_or_else(|| substrate_service::Error::SelectChainRequired)?; - let (block_import, link_half) = + let (grandpa_block_import, grandpa_link) = grandpa::block_import::<_, _, _, node_template_runtime::RuntimeApi, _, _>( client.clone(), client.clone(), select_chain )?; - let justification_import = block_import.clone(); + let justification_import = grandpa_block_import.clone(); - let (import_queue, babe_link, babe_block_import, pruning_task) = babe::import_queue( + let (babe_block_import, babe_link) = babe::block_import( babe::Config::get_or_compute(&*client)?, - block_import, + grandpa_block_import, + client.clone(), + client.clone(), + )?; + + let import_queue = babe::import_queue( + babe_link.clone(), + babe_block_import.clone(), Some(Box::new(justification_import)), None, client.clone(), client, inherent_data_providers.clone(), - Some(transaction_pool) )?; - import_setup = Some((babe_block_import.clone(), link_half, babe_link)); - tasks_to_spawn = Some(vec![Box::new(pruning_task)]); + import_setup = Some((babe_block_import, grandpa_link, babe_link)); Ok(import_queue) })?; - (builder, import_setup, inherent_data_providers, tasks_to_spawn) + (builder, import_setup, inherent_data_providers) }} } @@ -85,7 +89,7 @@ pub fn new_full(config: Configuration(config: Configuration(config: Configuration(config: Configuration(config: Configuration(config: Configuration( + let grandpa_block_import = grandpa::light_block_import::<_, _, _, RuntimeApi, _>( client.clone(), backend, Arc::new(fetch_checker), client.clone() )?; - let finality_proof_import = block_import.clone(); + let finality_proof_import = grandpa_block_import.clone(); let finality_proof_request_builder = finality_proof_import.create_finality_proof_request_builder(); - // FIXME: pruning task isn't started since light client doesn't do `AuthoritySetup`. - let (import_queue, ..) = import_queue( - Config::get_or_compute(&*client)?, - block_import, + let (babe_block_import, babe_link) = babe::block_import( + babe::Config::get_or_compute(&*client)?, + grandpa_block_import, + client.clone(), + client.clone(), + )?; + + let import_queue = babe::import_queue( + babe_link.clone(), + babe_block_import, None, Some(Box::new(finality_proof_import)), client.clone(), client, inherent_data_providers.clone(), - Some(transaction_pool) )?; Ok((import_queue, finality_proof_request_builder)) diff --git a/node/cli/src/chain_spec.rs b/node/cli/src/chain_spec.rs index 00bcb2577613d..47de737614af1 100644 --- a/node/cli/src/chain_spec.rs +++ b/node/cli/src/chain_spec.rs @@ -350,7 +350,8 @@ pub fn local_testnet_config() -> ChainSpec { #[cfg(test)] pub(crate) mod tests { use super::*; - use crate::service::{new_full, new_light}; + use crate::service::new_full; + use substrate_service::Roles; use service_test; fn local_testnet_genesis_instant_single() -> GenesisConfig { @@ -398,7 +399,12 @@ pub(crate) mod tests { service_test::connectivity( integration_test_config_with_two_authorities(), |config| new_full(config), - |config| new_light(config), + |mut config| { + // light nodes are unsupported + config.roles = Roles::FULL; + new_full(config) + }, + true, ); } } diff --git a/node/cli/src/service.rs b/node/cli/src/service.rs index b1d13480697f8..06c88f3d58bf5 100644 --- a/node/cli/src/service.rs +++ b/node/cli/src/service.rs @@ -20,7 +20,7 @@ use std::sync::Arc; -use babe::{import_queue, Config}; +use babe; use client::{self, LongestChain}; use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}; use node_executor; @@ -47,7 +47,6 @@ macro_rules! new_full_start { type RpcExtension = jsonrpc_core::IoHandler; let mut import_setup = None; let inherent_data_providers = inherents::InherentDataProviders::new(); - let mut tasks_to_spawn = Vec::new(); let builder = substrate_service::ServiceBuilder::new_full::< node_primitives::Block, node_runtime::RuntimeApi, node_executor::Executor @@ -58,36 +57,40 @@ macro_rules! new_full_start { .with_transaction_pool(|config, client| Ok(transaction_pool::txpool::Pool::new(config, transaction_pool::ChainApi::new(client))) )? - .with_import_queue(|_config, client, mut select_chain, transaction_pool| { + .with_import_queue(|_config, client, mut select_chain, _transaction_pool| { let select_chain = select_chain.take() .ok_or_else(|| substrate_service::Error::SelectChainRequired)?; - let (block_import, link_half) = + let (grandpa_block_import, grandpa_link) = grandpa::block_import::<_, _, _, node_runtime::RuntimeApi, _, _>( client.clone(), client.clone(), select_chain )?; - let justification_import = block_import.clone(); + let justification_import = grandpa_block_import.clone(); - let (import_queue, babe_link, babe_block_import, pruning_task) = babe::import_queue( + let (block_import, babe_link) = babe::block_import( babe::Config::get_or_compute(&*client)?, - block_import, + grandpa_block_import, + client.clone(), + client.clone(), + )?; + + let import_queue = babe::import_queue( + babe_link.clone(), + block_import.clone(), Some(Box::new(justification_import)), None, client.clone(), client, inherent_data_providers.clone(), - Some(transaction_pool) )?; - import_setup = Some((babe_block_import.clone(), link_half, babe_link)); - tasks_to_spawn.push(Box::new(pruning_task)); - + import_setup = Some((block_import, grandpa_link, babe_link)); Ok(import_queue) })? .with_rpc_extensions(|client, pool| -> RpcExtension { node_rpc::create(client, pool) })?; - (builder, import_setup, inherent_data_providers, tasks_to_spawn) + (builder, import_setup, inherent_data_providers) }} } @@ -96,7 +99,7 @@ macro_rules! new_full_start { /// We need to use a macro because the test suit doesn't work with an opaque service. It expects /// concrete types instead. macro_rules! new_full { - ($config:expr) => {{ + ($config:expr, $with_startup_data: expr) => {{ use futures::sync::mpsc; use network::DhtEvent; @@ -112,7 +115,7 @@ macro_rules! new_full { $config.disable_grandpa ); - let (builder, mut import_setup, inherent_data_providers, tasks_to_spawn) = new_full_start!($config); + let (builder, mut import_setup, inherent_data_providers) = new_full_start!($config); // Dht event channel from the network to the authority discovery module. Use bounded channel to ensure // back-pressure. Authority discovery is triggering one event per authority within the current authority set. @@ -128,11 +131,10 @@ macro_rules! new_full { .with_dht_event_tx(dht_event_tx)? .build()?; - let (block_import, link_half, babe_link) = import_setup.take() + let (block_import, grandpa_link, babe_link) = import_setup.take() .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); - // spawn any futures that were created in the previous setup steps - tasks_to_spawn.into_iter().for_each(|t| service.spawn_task(t)); + ($with_startup_data)(&block_import, &babe_link); if is_authority { let proposer = substrate_basic_authorship::ProposerFactory { @@ -145,16 +147,15 @@ macro_rules! new_full { .ok_or(substrate_service::Error::SelectChainRequired)?; let babe_config = babe::BabeParams { - config: babe::Config::get_or_compute(&*client)?, keystore: service.keystore(), client, select_chain, - block_import, env: proposer, + block_import, sync_oracle: service.network(), inherent_data_providers: inherent_data_providers.clone(), - force_authoring: force_authoring, - time_source: babe_link, + force_authoring, + babe_link, }; let babe = babe::start_babe(babe_config)?; @@ -181,7 +182,7 @@ macro_rules! new_full { // start the lightweight GRANDPA observer service.spawn_task(Box::new(grandpa::run_grandpa_observer( config, - link_half, + grandpa_link, service.network(), service.on_exit(), )?)); @@ -190,7 +191,7 @@ macro_rules! new_full { // start the full GRANDPA voter let grandpa_config = grandpa::GrandpaParams { config: config, - link: link_half, + link: grandpa_link, network: service.network(), inherent_data_providers: inherent_data_providers.clone(), on_exit: service.on_exit(), @@ -208,6 +209,9 @@ macro_rules! new_full { } Ok((service, inherent_data_providers)) + }}; + ($config:expr) => {{ + new_full!($config, |_, _| {}) }} } @@ -220,11 +224,8 @@ pub fn new_full(config: Configuration(config: Configuration) -> Result { - use futures::Future; - type RpcExtension = jsonrpc_core::IoHandler; let inherent_data_providers = InherentDataProviders::new(); - let mut tasks_to_spawn = Vec::new(); let service = ServiceBuilder::new_light::(config)? .with_select_chain(|_config, backend| { @@ -233,31 +234,35 @@ pub fn new_light(config: Configuration( + let grandpa_block_import = grandpa::light_block_import::<_, _, _, RuntimeApi, _>( client.clone(), backend, Arc::new(fetch_checker), client.clone() )?; - let finality_proof_import = block_import.clone(); + let finality_proof_import = grandpa_block_import.clone(); let finality_proof_request_builder = finality_proof_import.create_finality_proof_request_builder(); - let (import_queue, _, _, pruning_task) = import_queue( - Config::get_or_compute(&*client)?, - block_import, + let (babe_block_import, babe_link) = babe::block_import( + babe::Config::get_or_compute(&*client)?, + grandpa_block_import, + client.clone(), + client.clone(), + )?; + + let import_queue = babe::import_queue( + babe_link, + babe_block_import, None, Some(Box::new(finality_proof_import)), client.clone(), client, inherent_data_providers.clone(), - Some(transaction_pool) )?; - tasks_to_spawn.push(Box::new(pruning_task)); - Ok((import_queue, finality_proof_request_builder)) })? .with_network_protocol(|_| Ok(NodeProtocol::new()))? @@ -269,15 +274,6 @@ pub fn new_light(config: Configuration, + babe_link: &babe::BabeLink, + | { + setup_handles = Some((block_import.clone(), babe_link.clone())); + }).map(move |(node, x)| (node, (x, setup_handles.unwrap()))) + }, + |mut config| { + // light nodes are unsupported + config.roles = Roles::FULL; + new_full(config) + }, + |service, &mut (ref inherent_data_providers, (ref mut block_import, ref babe_link))| { let mut inherent_data = inherent_data_providers .create_inherent_data() .expect("Creates inherent data."); @@ -411,8 +427,8 @@ mod tests { slot_num, &parent_header, &*service.client(), - PRIMARY_PROBABILITY, &keystore, + &babe_link, ) { break babe_pre_digest; } @@ -440,7 +456,7 @@ mod tests { ); slot_num += 1; - BlockImportParams { + let params = BlockImportParams { origin: BlockOrigin::File, header: new_header, justification: None, @@ -449,7 +465,10 @@ mod tests { finalized: true, auxiliary: Vec::new(), fork_choice: ForkChoiceStrategy::LongestChain, - } + }; + + block_import.import_block(params, Default::default()) + .expect("error importing test block"); }, |service, _| { let amount = 5 * CENTS; @@ -506,7 +525,11 @@ mod tests { service_test::consensus( crate::chain_spec::tests::integration_test_config_with_two_authorities(), |config| new_full(config), - |config| new_light(config), + |mut config| { + // light nodes are unsupported + config.roles = Roles::FULL; + new_full(config) + }, vec![ "//Alice".into(), "//Bob".into(), diff --git a/node/runtime/src/lib.rs b/node/runtime/src/lib.rs index 23f4d3845a80e..642040f9bb7e4 100644 --- a/node/runtime/src/lib.rs +++ b/node/runtime/src/lib.rs @@ -29,7 +29,7 @@ use node_primitives::{ AccountId, AccountIndex, Balance, BlockNumber, Hash, Index, Moment, Signature, ContractExecResult, }; -use babe::{AuthorityId as BabeId}; +use babe_primitives::{AuthorityId as BabeId}; use grandpa::fg_primitives::{self, ScheduledChange}; use client::{ block_builder::api::{self as block_builder_api, InherentData, CheckInherentsResult}, @@ -84,8 +84,8 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to equal spec_version. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 161, - impl_version: 161, + spec_version: 162, + impl_version: 162, apis: RUNTIME_API_VERSIONS, }; @@ -223,7 +223,7 @@ impl session::Trait for Runtime { type ShouldEndSession = Babe; type Event = Event; type Keys = SessionKeys; - type ValidatorId = AccountId; + type ValidatorId = ::AccountId; type ValidatorIdOf = staking::StashOf; type SelectInitialValidators = Staking; type DisabledValidatorsThreshold = DisabledValidatorsThreshold; @@ -614,27 +614,19 @@ impl_runtime_apis! { } impl babe_primitives::BabeApi for Runtime { - fn startup_data() -> babe_primitives::BabeConfiguration { + fn configuration() -> babe_primitives::BabeConfiguration { // The choice of `c` parameter (where `1 - c` represents the // probability of a slot being empty), is done in accordance to the // slot duration and expected target block time, for safely // resisting network delays of maximum two seconds. // babe_primitives::BabeConfiguration { - median_required_blocks: 1000, slot_duration: Babe::slot_duration(), + epoch_length: EpochDuration::get(), c: PRIMARY_PROBABILITY, - } - } - - fn epoch() -> babe_primitives::Epoch { - babe_primitives::Epoch { - start_slot: Babe::epoch_start_slot(), - authorities: Babe::authorities(), - epoch_index: Babe::epoch_index(), + genesis_authorities: Babe::authorities(), randomness: Babe::randomness(), - duration: EpochDuration::get(), - secondary_slots: Babe::secondary_slots().0, + secondary_slots: true, } } } diff --git a/srml/babe/Cargo.toml b/srml/babe/Cargo.toml index 3819bb14c7b1d..1cce8386854af 100644 --- a/srml/babe/Cargo.toml +++ b/srml/babe/Cargo.toml @@ -22,7 +22,9 @@ runtime-io ={ package = "sr-io", path = "../../core/sr-io", default-features = f [dev-dependencies] lazy_static = "1.3.0" parking_lot = "0.9.0" +sr-version = { path = "../../core/sr-version", default-features = false } primitives = { package = "substrate-primitives", path = "../../core/primitives" } +test-runtime = { package = "substrate-test-runtime", path = "../../core/test-runtime" } [features] default = ["std"] diff --git a/srml/babe/src/lib.rs b/srml/babe/src/lib.rs index 17c405dc25996..a08ccd18886ff 100644 --- a/srml/babe/src/lib.rs +++ b/srml/babe/src/lib.rs @@ -18,32 +18,36 @@ //! from VRF outputs and manages epoch transitions. #![cfg_attr(not(feature = "std"), no_std)] -#![forbid(unused_must_use, unsafe_code, unused_variables)] - -// TODO: @marcio uncomment this when BabeEquivocation is integrated. -// #![forbid(dead_code)] - +#![forbid(unused_must_use, unsafe_code, unused_variables, unused_must_use)] +#![deny(unused_imports)] pub use timestamp; use rstd::{result, prelude::*}; use support::{decl_storage, decl_module, StorageValue, StorageMap, traits::FindAuthor, traits::Get}; -use timestamp::{OnTimestampSet}; +use timestamp::OnTimestampSet; use sr_primitives::{generic::DigestItem, ConsensusEngineId, Perbill}; use sr_primitives::traits::{IsMember, SaturatedConversion, Saturating, RandomnessBeacon}; use sr_staking_primitives::{ SessionIndex, offence::{Offence, Kind}, }; -use sr_primitives::weights::SimpleDispatchInfo; #[cfg(feature = "std")] use timestamp::TimestampInherentData; use codec::{Encode, Decode}; use inherents::{RuntimeString, InherentIdentifier, InherentData, ProvideInherent, MakeFatalError}; #[cfg(feature = "std")] use inherents::{InherentDataProviders, ProvideInherentData}; -use babe_primitives::{BABE_ENGINE_ID, ConsensusLog, BabeAuthorityWeight, Epoch, RawBabePreDigest}; +use babe_primitives::{ + BABE_ENGINE_ID, ConsensusLog, BabeAuthorityWeight, NextEpochDescriptor, RawBabePreDigest, + SlotNumber, +}; pub use babe_primitives::{AuthorityId, VRF_OUTPUT_LENGTH, PUBLIC_KEY_LENGTH}; -use system::ensure_root; + +#[cfg(all(feature = "std", test))] +mod tests; + +#[cfg(all(feature = "std", test))] +mod mock; /// The BABE inherent identifier. pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"babeslot"; @@ -118,7 +122,7 @@ impl ProvideInherentData for InherentDataProvider { } pub trait Trait: timestamp::Trait { - type EpochDuration: Get; + type EpochDuration: Get; type ExpectedBlockTime: Get; } @@ -127,6 +131,8 @@ pub const RANDOMNESS_LENGTH: usize = 32; const UNDER_CONSTRUCTION_SEGMENT_LENGTH: usize = 256; +type MaybeVrf = Option<[u8; 32 /* VRF_OUTPUT_LENGTH */]>; + decl_storage! { trait Store for Module as Babe { /// Current epoch index. @@ -135,22 +141,13 @@ decl_storage! { /// Current epoch authorities. pub Authorities get(authorities): Vec<(AuthorityId, BabeAuthorityWeight)>; - /// Slot at which the current epoch started. It is possible that no - /// block was authored at the given slot and the epoch change was - /// signalled later than this. - pub EpochStartSlot get(epoch_start_slot): u64; + /// The slot at which the first epoch actually started. This is 0 + /// until the first block of the chain. + pub GenesisSlot get(genesis_slot): u64; /// Current slot number. pub CurrentSlot get(current_slot): u64; - /// Whether secondary slots are enabled in case the VRF-based slot is - /// empty for the current epoch and the next epoch, respectively. - pub SecondarySlots get(secondary_slots): (bool, bool) = (true, true); - - /// Pending change to enable/disable secondary slots which will be - /// triggered at `current_epoch + 2`. - pub PendingSecondarySlotsChange get(pending_secondary_slots_change): Option = None; - /// The epoch randomness for the *current* epoch. /// /// # Security @@ -181,9 +178,9 @@ decl_storage! { SegmentIndex build(|_| 0): u32; UnderConstruction: map u32 => Vec<[u8; 32 /* VRF_OUTPUT_LENGTH */]>; - /// Temporary value (cleared at block finalization) which is true + /// Temporary value (cleared at block finalization) which is `Some` /// if per-block initialization has already been called for current block. - Initialized get(initialized): Option; + Initialized get(initialized): Option; } add_extra_genesis { config(authorities): Vec<(AuthorityId, BabeAuthorityWeight)>; @@ -212,20 +209,13 @@ decl_module! { /// Block finalization fn on_finalize() { - Initialized::kill(); - } - - /// Sets a pending change to enable / disable secondary slot assignment. - /// The pending change will be set at the end of the current epoch and - /// will be enacted at `current_epoch + 2`. - #[weight = SimpleDispatchInfo::FixedOperational(10_000)] - fn set_pending_secondary_slots_change(origin, change: Option) { - ensure_root(origin)?; - match change { - Some(change) => PendingSecondarySlotsChange::put(change), - None => { - PendingSecondarySlotsChange::take(); - }, + // at the end of the block, we can safely include the new VRF output + // from this block into the under-construction randomness. If we've determined + // that this block was the first in a new epoch, the changeover logic has + // already occurred at this point, so the under-construction randomness + // will only contain outputs from the right epoch. + if let Some(Some(vrf_output)) = Initialized::take() { + Self::deposit_vrf_output(&vrf_output); } } } @@ -269,15 +259,25 @@ impl IsMember for Module { } impl session::ShouldEndSession for Module { - fn should_end_session(_: T::BlockNumber) -> bool { + fn should_end_session(now: T::BlockNumber) -> bool { // it might be (and it is in current implementation) that session module is calling // should_end_session() from it's own on_initialize() handler // => because session on_initialize() is called earlier than ours, let's ensure // that we have synced with digest before checking if session should be ended Self::do_initialize(); - let diff = CurrentSlot::get().saturating_sub(EpochStartSlot::get()); - diff >= T::EpochDuration::get() + // The session has technically ended during the passage of time + // between this block and the last, but we have to "end" the session now, + // since there is no earlier possible block we could have done it. + // + // The exception is for block 1: the genesis has slot 0, so we treat + // epoch 0 as having started at the slot of block 1. We want to use + // the same randomness and validator set as signalled in the genesis, + // so we don't rotate the session. + now != sr_primitives::traits::One::one() && { + let diff = CurrentSlot::get().saturating_sub(Self::current_epoch_start()); + diff >= T::EpochDuration::get() + } } } @@ -336,15 +336,18 @@ impl Module { ::MinimumPeriod::get().saturating_mul(2.into()) } + // finds the start slot of the current epoch. only guaranteed to + // give correct results after `do_initialize` of the first block + // in the chain (as its result is based off of `GenesisSlot`). + fn current_epoch_start() -> SlotNumber { + (EpochIndex::get() * T::EpochDuration::get()) + GenesisSlot::get() + } + fn deposit_consensus(new: U) { let log: DigestItem = DigestItem::Consensus(BABE_ENGINE_ID, new.encode()); >::deposit_log(log.into()) } - fn get_inherent_digests() -> system::DigestOf { - >::digest() - } - fn deposit_vrf_output(vrf_output: &[u8; VRF_OUTPUT_LENGTH]) { let segment_idx = ::get(); let mut segment = ::get(&segment_idx); @@ -363,13 +366,12 @@ impl Module { fn do_initialize() { // since do_initialize can be called twice (if session module is present) // => let's ensure that we only modify the storage once per block - let initialized = Self::initialized().unwrap_or(false); + let initialized = Self::initialized().is_some(); if initialized { return; } - Initialized::put(true); - for digest in Self::get_inherent_digests() + let maybe_pre_digest = >::digest() .logs .iter() .filter_map(|s| s.as_pre_runtime()) @@ -378,19 +380,40 @@ impl Module { } else { None }) - { - if EpochStartSlot::get() == 0 { - EpochStartSlot::put(digest.slot_number()); + .next(); + + let maybe_vrf = maybe_pre_digest.and_then(|digest| { + // on the first non-zero block (i.e. block #1) + // this is where the first epoch (epoch #0) actually starts. + // we need to adjust internal storage accordingly. + if GenesisSlot::get() == 0 { + GenesisSlot::put(digest.slot_number()); + debug_assert_ne!(GenesisSlot::get(), 0); + + // deposit a log because this is the first block in epoch #0 + // we use the same values as genesis because we haven't collected any + // randomness yet. + let next = NextEpochDescriptor { + authorities: Self::authorities(), + randomness: Self::randomness(), + }; + + Self::deposit_consensus(ConsensusLog::NextEpochData(next)) } CurrentSlot::put(digest.slot_number()); if let RawBabePreDigest::Primary { vrf_output, .. } = digest { - Self::deposit_vrf_output(&vrf_output); + // place the VRF output into the `Initialized` storage item + // and it'll be put onto the under-construction randomness + // later, once we've decided which epoch this block is in. + Some(vrf_output) + } else { + None } + }); - return; - } + Initialized::put(maybe_vrf); } /// Call this function exactly once when an epoch changes, to update the @@ -437,7 +460,12 @@ impl session::OneSessionHandler for Module { fn on_new_session<'a, I: 'a>(_changed: bool, validators: I, queued_validators: I) where I: Iterator { - Self::do_initialize(); + // PRECONDITION: `should_end_session` has done initialization and is guaranteed + // by the session module to be called before this. + #[cfg(debug_assertions)] + { + assert!(Self::initialized().is_some()) + } // Update epoch index let epoch_index = EpochIndex::get() @@ -453,21 +481,6 @@ impl session::OneSessionHandler for Module { Authorities::put(authorities); - // Update epoch start slot. - let now = CurrentSlot::get(); - EpochStartSlot::mutate(|previous| { - loop { - // on the first epoch we must account for skipping at least one - // whole epoch, in case the first block is authored with a slot - // number far in the past. - if now.saturating_sub(*previous) < T::EpochDuration::get() { - break; - } - - *previous = previous.saturating_add(T::EpochDuration::get()); - } - }); - // Update epoch randomness. let next_epoch_index = epoch_index .checked_add(1) @@ -484,34 +497,11 @@ impl session::OneSessionHandler for Module { (k, 1) }).collect::>(); - let next_epoch_start_slot = EpochStartSlot::get().saturating_add(T::EpochDuration::get()); let next_randomness = NextRandomness::get(); - // Update any pending secondary slots change - let mut secondary_slots = SecondarySlots::get(); - - // change for E + 1 now becomes change at E - secondary_slots.0 = secondary_slots.1; - - if let Some(change) = PendingSecondarySlotsChange::take() { - // if there's a pending change schedule it for E + 1 - secondary_slots.1 = change; - } else { - // otherwise E + 1 will have the same value as E - secondary_slots.1 = secondary_slots.0; - } - - SecondarySlots::mutate(|secondary| { - *secondary = secondary_slots; - }); - - let next = Epoch { - epoch_index: next_epoch_index, - start_slot: next_epoch_start_slot, - duration: T::EpochDuration::get(), + let next = NextEpochDescriptor { authorities: next_authorities, randomness: next_randomness, - secondary_slots: secondary_slots.1, }; Self::deposit_consensus(ConsensusLog::NextEpochData(next)) diff --git a/srml/babe/src/mock.rs b/srml/babe/src/mock.rs new file mode 100644 index 0000000000000..741f08fc0845c --- /dev/null +++ b/srml/babe/src/mock.rs @@ -0,0 +1,112 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Test utilities +#![allow(dead_code, unused_imports)] + +use super::{Trait, Module, GenesisConfig}; +use babe_primitives::AuthorityId; +use sr_primitives::{ + traits::IdentityLookup, Perbill, + testing::{Header, UintAuthorityId}, + impl_opaque_keys, key_types::DUMMY, +}; +use sr_version::RuntimeVersion; +use support::{impl_outer_origin, parameter_types}; +use runtime_io; +use primitives::{H256, Blake2Hasher}; + +impl_outer_origin!{ + pub enum Origin for Test {} +} + +type DummyValidatorId = u64; + +// Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Test; + +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: u32 = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub const MinimumPeriod: u64 = 1; + pub const EpochDuration: u64 = 3; + pub const ExpectedBlockTime: u64 = 1; + pub const Version: RuntimeVersion = test_runtime::VERSION; + pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(16); +} + +impl system::Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = (); + type Hash = H256; + type Version = Version; + type Hashing = sr_primitives::traits::BlakeTwo256; + type AccountId = DummyValidatorId; + type Lookup = IdentityLookup; + type Header = Header; + type WeightMultiplierUpdate = (); + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type AvailableBlockRatio = AvailableBlockRatio; + type MaximumBlockLength = MaximumBlockLength; +} + +impl_opaque_keys! { + pub struct MockSessionKeys { + #[id(DUMMY)] + pub dummy: UintAuthorityId, + } +} + +impl session::Trait for Test { + type Event = (); + type ValidatorId = ::AccountId; + type ShouldEndSession = Babe; + type SessionHandler = (Babe,Babe,); + type OnSessionEnding = (); + type ValidatorIdOf = (); + type SelectInitialValidators = (); + type Keys = MockSessionKeys; + type DisabledValidatorsThreshold = DisabledValidatorsThreshold; +} + +impl timestamp::Trait for Test { + type Moment = u64; + type OnTimestampSet = Babe; + type MinimumPeriod = MinimumPeriod; +} + +impl Trait for Test { + type EpochDuration = EpochDuration; + type ExpectedBlockTime = ExpectedBlockTime; +} + +pub fn new_test_ext(authorities: Vec) -> runtime_io::TestExternalities { + let mut t = system::GenesisConfig::default().build_storage::().unwrap(); + GenesisConfig { + authorities: authorities.into_iter().map(|a| (UintAuthorityId(a).to_public_key(), 1)).collect(), + }.assimilate_storage::(&mut t).unwrap(); + t.into() +} + +pub type System = system::Module; +pub type Babe = Module; diff --git a/srml/babe/src/tests.rs b/srml/babe/src/tests.rs new file mode 100644 index 0000000000000..ef449485b77ed --- /dev/null +++ b/srml/babe/src/tests.rs @@ -0,0 +1,127 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Consensus extension module tests for BABE consensus. + +use super::*; +use runtime_io::with_externalities; +use mock::{new_test_ext, Babe, Test}; +use sr_primitives::{traits::OnFinalize, testing::{Digest, DigestItem}}; +use session::ShouldEndSession; + +const EMPTY_RANDOMNESS: [u8; 32] = [ + 74, 25, 49, 128, 53, 97, 244, 49, + 222, 202, 176, 2, 231, 66, 95, 10, + 133, 49, 213, 228, 86, 161, 164, 127, + 217, 153, 138, 37, 48, 192, 248, 0, +]; + +fn make_pre_digest( + authority_index: babe_primitives::AuthorityIndex, + slot_number: babe_primitives::SlotNumber, + vrf_output: [u8; babe_primitives::VRF_OUTPUT_LENGTH], + vrf_proof: [u8; babe_primitives::VRF_PROOF_LENGTH], +) -> Digest { + let digest_data = babe_primitives::RawBabePreDigest::Primary { + authority_index, + slot_number, + vrf_output, + vrf_proof, + }; + let log = DigestItem::PreRuntime(babe_primitives::BABE_ENGINE_ID, digest_data.encode()); + Digest { logs: vec![log] } +} + +#[test] +fn empty_randomness_is_correct() { + let s = compute_randomness([0; RANDOMNESS_LENGTH], 0, std::iter::empty(), None); + assert_eq!(s, EMPTY_RANDOMNESS); +} + +#[test] +fn initial_values() { + with_externalities(&mut new_test_ext(vec![0, 1, 2, 3]), || { + assert_eq!(Babe::authorities().len(), 4) + }) +} + +#[test] +fn check_module() { + with_externalities(&mut new_test_ext(vec![0, 1, 2, 3]), || { + assert!(!Babe::should_end_session(0), "Genesis does not change sessions"); + assert!(!Babe::should_end_session(200000), + "BABE does not include the block number in epoch calculations"); + }) +} + +type System = system::Module; + +#[test] +fn first_block_epoch_zero_start() { + with_externalities(&mut new_test_ext(vec![0, 1, 2, 3]), || { + let genesis_slot = 100; + let first_vrf = [1; 32]; + let pre_digest = make_pre_digest( + 0, + genesis_slot, + first_vrf, + [0xff; 64], + ); + + assert_eq!(Babe::genesis_slot(), 0); + System::initialize(&1, &Default::default(), &Default::default(), &pre_digest); + + // see implementation of the function for details why: we issue an + // epoch-change digest but don't do it via the normal session mechanism. + assert!(!Babe::should_end_session(1)); + assert_eq!(Babe::genesis_slot(), genesis_slot); + assert_eq!(Babe::current_slot(), genesis_slot); + assert_eq!(Babe::epoch_index(), 0); + + Babe::on_finalize(1); + let header = System::finalize(); + + assert_eq!(SegmentIndex::get(), 0); + assert_eq!(UnderConstruction::get(0), vec![first_vrf]); + assert_eq!(Babe::randomness(), [0; 32]); + assert_eq!(NextRandomness::get(), [0; 32]); + + assert_eq!(header.digest.logs.len(), 2); + assert_eq!(pre_digest.logs.len(), 1); + assert_eq!(header.digest.logs[0], pre_digest.logs[0]); + + let authorities = Babe::authorities(); + let consensus_log = babe_primitives::ConsensusLog::NextEpochData( + babe_primitives::NextEpochDescriptor { + authorities, + randomness: Babe::randomness(), + } + ); + let consensus_digest = DigestItem::Consensus(BABE_ENGINE_ID, consensus_log.encode()); + + // first epoch descriptor has same info as last. + assert_eq!(header.digest.logs[1], consensus_digest.clone()) + }) +} + +#[test] +fn authority_index() { + with_externalities(&mut new_test_ext(vec![0, 1, 2, 3]), || { + assert_eq!( + Babe::find_author((&[(BABE_ENGINE_ID, &[][..])]).into_iter().cloned()), None, + "Trivially invalid authorities are ignored") + }) +}