From 3529efa9748d7046bdcbf95a98d0514b0a04934f Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Tue, 17 Dec 2019 11:04:07 +0100 Subject: [PATCH 01/75] Create folder structures --- client/consensus/habe/Cargo.toml | 6 ++++++ client/consensus/habe/src/lib.rs | 0 primitives/consensus/habe/Cargo.toml | 12 ++++++++++++ primitives/consensus/habe/src/lib.rs | 0 4 files changed, 18 insertions(+) create mode 100644 client/consensus/habe/Cargo.toml create mode 100644 client/consensus/habe/src/lib.rs create mode 100644 primitives/consensus/habe/Cargo.toml create mode 100644 primitives/consensus/habe/src/lib.rs diff --git a/client/consensus/habe/Cargo.toml b/client/consensus/habe/Cargo.toml new file mode 100644 index 0000000000000..84e73295dcc77 --- /dev/null +++ b/client/consensus/habe/Cargo.toml @@ -0,0 +1,6 @@ +[package] +name = "sc-consensus-habe" +version = "2.0.0" +authors = ["Parity Technologies "] +description = "HABE consensus algorithm for substrate" +edition = "2018" diff --git a/client/consensus/habe/src/lib.rs b/client/consensus/habe/src/lib.rs new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/primitives/consensus/habe/Cargo.toml b/primitives/consensus/habe/Cargo.toml new file mode 100644 index 0000000000000..560636cbf35d9 --- /dev/null +++ b/primitives/consensus/habe/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "sp-consensus-habe" +version = "2.0.0" +authors = ["Parity Technologies "] +description = "Primitives for HABE consensus" +edition = "2018" + +[features] +default = ["std"] +std = [ + +] diff --git a/primitives/consensus/habe/src/lib.rs b/primitives/consensus/habe/src/lib.rs new file mode 100644 index 0000000000000..e69de29bb2d1d From 78bbf4e52f41b45b9e989d0dab585d983a497612 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Thu, 9 Jan 2020 20:29:48 +0100 Subject: [PATCH 02/75] Init all necessary sassafras primitives --- Cargo.lock | 13 +++ Cargo.toml | 1 + primitives/consensus/babe/src/digest.rs | 29 +++--- primitives/consensus/babe/src/lib.rs | 5 +- primitives/consensus/habe/Cargo.toml | 12 --- primitives/consensus/habe/src/lib.rs | 0 primitives/consensus/sassafras/Cargo.toml | 27 ++++++ primitives/consensus/sassafras/src/digest.rs | 57 ++++++++++++ .../consensus/sassafras/src/inherents.rs | 91 +++++++++++++++++++ primitives/consensus/sassafras/src/lib.rs | 67 ++++++++++++++ primitives/core/src/crypto.rs | 6 +- 11 files changed, 277 insertions(+), 31 deletions(-) delete mode 100644 primitives/consensus/habe/Cargo.toml delete mode 100644 primitives/consensus/habe/src/lib.rs create mode 100644 primitives/consensus/sassafras/Cargo.toml create mode 100644 primitives/consensus/sassafras/src/digest.rs create mode 100644 primitives/consensus/sassafras/src/inherents.rs create mode 100644 primitives/consensus/sassafras/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 7bb1f65567d3c..0d0bf86add71c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6202,6 +6202,19 @@ dependencies = [ "sp-std 2.0.0", ] +[[package]] +name = "sp-consensus-sassafras" +version = "2.0.0" +dependencies = [ + "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "schnorrkel 0.8.5 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-application-crypto 2.0.0", + "sp-inherents 2.0.0", + "sp-runtime 2.0.0", + "sp-std 2.0.0", + "sp-timestamp 2.0.0", +] + [[package]] name = "sp-core" version = "2.0.0" diff --git a/Cargo.toml b/Cargo.toml index 4aabe5916f411..ce5d15695e8f8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -103,6 +103,7 @@ members = [ "primitives/consensus/babe", "primitives/consensus/common", "primitives/consensus/pow", + "primitives/consensus/sassafras", "primitives/core", "primitives/debug-derive", "primitives/storage", diff --git a/primitives/consensus/babe/src/digest.rs b/primitives/consensus/babe/src/digest.rs index 343cec4db79aa..5c4d23f0f3b08 100644 --- a/primitives/consensus/babe/src/digest.rs +++ b/primitives/consensus/babe/src/digest.rs @@ -41,7 +41,7 @@ use sp_std::vec::Vec; /// (VRF based) and to a secondary (slot number based). #[cfg(feature = "std")] #[derive(Clone, Debug)] -pub enum BabePreDigest { +pub enum PreDigest { /// A primary VRF-based slot assignment. Primary { /// VRF output @@ -63,7 +63,7 @@ pub enum BabePreDigest { } #[cfg(feature = "std")] -impl BabePreDigest { +impl PreDigest { /// Returns the slot number of the pre digest. pub fn authority_index(&self) -> AuthorityIndex { match self { @@ -90,12 +90,9 @@ impl BabePreDigest { } } -/// The prefix used by BABE for its VRF keys. -pub const BABE_VRF_PREFIX: &[u8] = b"substrate-babe-vrf"; - /// A raw version of `BabePreDigest`, usable on `no_std`. #[derive(Copy, Clone, Encode, Decode)] -pub enum RawBabePreDigest { +pub enum RawPreDigest { /// A primary VRF-based slot assignment. #[codec(index = "1")] Primary { @@ -123,7 +120,7 @@ pub enum RawBabePreDigest { }, } -impl RawBabePreDigest { +impl RawPreDigest { /// Returns the slot number of the pre digest. pub fn slot_number(&self) -> SlotNumber { match self { @@ -134,27 +131,27 @@ impl RawBabePreDigest { } #[cfg(feature = "std")] -impl Encode for BabePreDigest { +impl Encode for PreDigest { fn encode(&self) -> Vec { let raw = match self { - BabePreDigest::Primary { + PreDigest::Primary { vrf_output, vrf_proof, authority_index, slot_number, } => { - RawBabePreDigest::Primary { + RawPreDigest::Primary { vrf_output: *vrf_output.as_bytes(), vrf_proof: vrf_proof.to_bytes(), authority_index: *authority_index, slot_number: *slot_number, } }, - BabePreDigest::Secondary { + PreDigest::Secondary { authority_index, slot_number, } => { - RawBabePreDigest::Secondary { + RawPreDigest::Secondary { authority_index: *authority_index, slot_number: *slot_number, } @@ -166,10 +163,10 @@ impl Encode for BabePreDigest { } #[cfg(feature = "std")] -impl codec::EncodeLike for BabePreDigest {} +impl codec::EncodeLike for PreDigest {} #[cfg(feature = "std")] -impl Decode for BabePreDigest { +impl Decode for PreDigest { fn decode(i: &mut R) -> Result { let pre_digest = match Decode::decode(i)? { RawBabePreDigest::Primary { vrf_output, vrf_proof, authority_index, slot_number } => { @@ -208,10 +205,10 @@ pub struct NextEpochDescriptor { #[cfg(feature = "std")] pub trait CompatibleDigestItem: Sized { /// Construct a digest item which contains a BABE pre-digest. - fn babe_pre_digest(seal: BabePreDigest) -> Self; + fn babe_pre_digest(seal: PreDigest) -> Self; /// If this item is an BABE pre-digest, return it. - fn as_babe_pre_digest(&self) -> Option; + fn as_babe_pre_digest(&self) -> Option; /// Construct a digest item which contains a BABE seal. fn babe_seal(signature: AuthoritySignature) -> Self; diff --git a/primitives/consensus/babe/src/lib.rs b/primitives/consensus/babe/src/lib.rs index 196f1be1a65d6..27eb753542701 100644 --- a/primitives/consensus/babe/src/lib.rs +++ b/primitives/consensus/babe/src/lib.rs @@ -19,7 +19,7 @@ #![forbid(unsafe_code, missing_docs, unused_variables, unused_imports)] #![cfg_attr(not(feature = "std"), no_std)] -mod digest; +pub mod digest; pub mod inherents; use codec::{Encode, Decode}; @@ -35,6 +35,9 @@ mod app { app_crypto!(sr25519, BABE); } +/// The prefix used by BABE for its VRF keys. +pub const BABE_VRF_PREFIX: &[u8] = b"substrate-babe-vrf"; + /// A Babe authority keypair. Necessarily equivalent to the schnorrkel public key used in /// the main Babe module. If that ever changes, then this must, too. #[cfg(feature = "std")] diff --git a/primitives/consensus/habe/Cargo.toml b/primitives/consensus/habe/Cargo.toml deleted file mode 100644 index 560636cbf35d9..0000000000000 --- a/primitives/consensus/habe/Cargo.toml +++ /dev/null @@ -1,12 +0,0 @@ -[package] -name = "sp-consensus-habe" -version = "2.0.0" -authors = ["Parity Technologies "] -description = "Primitives for HABE consensus" -edition = "2018" - -[features] -default = ["std"] -std = [ - -] diff --git a/primitives/consensus/habe/src/lib.rs b/primitives/consensus/habe/src/lib.rs deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/primitives/consensus/sassafras/Cargo.toml b/primitives/consensus/sassafras/Cargo.toml new file mode 100644 index 0000000000000..1b08e43b3095b --- /dev/null +++ b/primitives/consensus/sassafras/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "sp-consensus-sassafras" +version = "2.0.0" +authors = ["Parity Technologies "] +description = "Primitives for Sassafras consensus" +edition = "2018" + +[dependencies] +codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false } +schnorrkel = { version = "0.8.5", features = ["preaudit_deprecated"], optional = true } +sp-std = { path = "../../std", default-features = false } +sp-inherents = { path = "../../inherents", default-features = false } +sp-timestamp = { path = "../../timestamp", default-features = false } +sp-runtime = { path = "../../runtime", default-features = false } +sp-application-crypto = { path = "../../application-crypto", default-features = false } + +[features] +default = ["std"] +std = [ + "codec/std", + "schnorrkel/std", + "sp-std/std", + "sp-inherents/std", + "sp-timestamp/std", + "sp-runtime/std", + "sp-application-crypto/std", +] diff --git a/primitives/consensus/sassafras/src/digest.rs b/primitives/consensus/sassafras/src/digest.rs new file mode 100644 index 0000000000000..4b319966a6f4a --- /dev/null +++ b/primitives/consensus/sassafras/src/digest.rs @@ -0,0 +1,57 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use schnorrkel::vrf::{VRFProof, VRFOutput, VRF_OUTPUT_LENGTH}; +use crate::{VRFIndex, AuthorityIndex, SlotNumber, AuthorityId, SassafrasAuthorityWeight}; + +/// A Sassafras pre-digest. The validator pre-commit a VRF proof at `vrf_index`, and now reveal it +/// as `vrf_output`. +/// +/// This digest is included in every block, generated by Sassafras consensus engine. +pub struct PreDigest { + /// Index of ticket VRF proof that has been previously committed. + pub ticket_vrf_index: VRFIndex, + /// Reveal of tocket VRF output. + pub tocket_vrf_output: VRFOutput, + /// Validator index. + pub authority_index: AuthorityIndex, + /// Corresponding slot number. + pub slot_number: SlotNumber, + /// Secondary "Post Block VRF" proof. + pub post_vrf_proof: VRFProof, + /// Secondary "Post Block VRF" output. + pub post_vrf_output: VRFOutput, +} + +/// Post-digest about next epoch information. +/// +/// This digest is generated by runtime, at the beginning of every epoch. +pub struct NextEpochDescriptor { + /// The authorities that generate VRF proofs. Note that those keys will only be generating + /// blocks two epochs later. + pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, + /// Value of randomness to use for slot-assignment. This is expected to use the secondary "Post + /// VRF". + pub randomness: [u8; VRF_OUTPUT_LENGTH], +} + +/// Post-digest about post-block information such as ticket commitments. +/// +/// This digest is generated by runtime, optional, and can be included at every block. +pub struct PostBlockDescriptor { + /// Commitments of tickets. + pub commitments: Vec, +} diff --git a/primitives/consensus/sassafras/src/inherents.rs b/primitives/consensus/sassafras/src/inherents.rs new file mode 100644 index 0000000000000..c4bb811caa3e3 --- /dev/null +++ b/primitives/consensus/sassafras/src/inherents.rs @@ -0,0 +1,91 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Inherents for Sassafras + +use sp_inherents::{Error, InherentData, InherentIdentifier}; +#[cfg(feature = "std")] +use sp_inherents::{InherentDataProviders, ProvideInherentData}; +#[cfg(feature = "std")] +use sp_timestamp::TimestampInherentData; + +#[cfg(feature = "std")] +use codec::Decode; +use sp_std::result::Result; + +/// The Sassafras inherent identifier. +pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"sassslot"; + +/// The type of the Sassafras inherent. +pub type InherentType = u64; +/// Auxiliary trait to extract Sassafras inherent data. +pub trait SassafrasInherentData { + /// Get Sassafras inherent data. + fn sassafras_inherent_data(&self) -> Result; + /// Replace Sassafras inherent data. + fn sassafras_replace_inherent_data(&mut self, new: InherentType); +} + +impl SassafrasInherentData for InherentData { + fn sassafras_inherent_data(&self) -> Result { + self.get_data(&INHERENT_IDENTIFIER) + .and_then(|r| r.ok_or_else(|| "Sassafras inherent data not found".into())) + } + + fn sassafras_replace_inherent_data(&mut self, new: InherentType) { + self.replace_data(INHERENT_IDENTIFIER, &new); + } +} + +/// Provides the slot duration inherent data for Sassafras. +#[cfg(feature = "std")] +pub struct InherentDataProvider { + slot_duration: u64, +} + +#[cfg(feature = "std")] +impl InherentDataProvider { + /// Constructs `Self` + pub fn new(slot_duration: u64) -> Self { + Self { slot_duration } + } +} + +#[cfg(feature = "std")] +impl ProvideInherentData for InherentDataProvider { + fn on_register(&self, providers: &InherentDataProviders) -> Result<(), Error> { + if !providers.has_provider(&sp_timestamp::INHERENT_IDENTIFIER) { + // Add the timestamp inherent data provider, as we require it. + providers.register_provider(sp_timestamp::InherentDataProvider) + } else { + Ok(()) + } + } + + fn inherent_identifier(&self) -> &'static InherentIdentifier { + &INHERENT_IDENTIFIER + } + + fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { + let timestamp = inherent_data.timestamp_inherent_data()?; + let slot_number = timestamp / self.slot_duration; + inherent_data.put_data(INHERENT_IDENTIFIER, &slot_number) + } + + fn error_to_string(&self, error: &[u8]) -> Option { + Error::decode(&mut &error[..]).map(|e| e.into_string()).ok() + } +} diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs new file mode 100644 index 0000000000000..a0cd0e5d2a248 --- /dev/null +++ b/primitives/consensus/sassafras/src/lib.rs @@ -0,0 +1,67 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Primitives for Sassafras. + +// #![deny(warnings)] +// #![forbid(unsafe_code, missing_docs, unused_variables, unused_imports)] +#![cfg_attr(not(feature = "std"), no_std)] + +pub mod digest; +pub mod inherents; + +use sp_runtime::ConsensusEngineId; + +mod app { + use sp_application_crypto::{app_crypto, key_types::SASSAFRAS, sr25519}; + app_crypto!(sr25519, SASSAFRAS); +} + +/// The prefix used by Sassafras for its ticket VRF keys. +pub const SASSAFRAS_TICKET_VRF_PREFIX: &[u8] = b"substrate-sassafras-ticket-vrf"; + +/// The prefix used by Sassafras for its post-block VRF keys. +pub const SASSAFRAS_POST_VRF_PREFIX: &[u8] = b"substrate-sassafras-post-vrf"; + +/// A Sassafras authority keypair, used by both ticket VRF and post-block VRF. +#[cfg(feature = "std")] +pub type AuthorityPair = app::Pair; + +/// A Sassafras authority signature. +pub type AuthoritySignature = app::Signature; + +/// A Sassafras authority identifier. +pub type AuthorityId = app::Public; + +/// The `ConsensusEngineId` of Sassafras. +pub const SASSAFRAS_ENGINE_ID: ConsensusEngineId = *b"SASS"; + +/// Index of ticket VRF. +pub type VRFIndex = u32; + +/// The index of an authority. +pub type AuthorityIndex = u32; + +/// A slot number. +pub type SlotNumber = u64; + +/// The weight of an authority. +// NOTE: we use a unique name for the weight to avoid conflicts with other +// `Weight` types, since the metadata isn't able to disambiguate. +pub type SassafrasAuthorityWeight = u64; + +/// The weight of a Sassafras block. +pub type SassafrasBlockWeight = u32; diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index 5119203a0859d..b02b6f80a896a 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -917,9 +917,11 @@ impl<'a> TryFrom<&'a str> for KeyTypeId { pub mod key_types { use super::KeyTypeId; - /// Key type for Babe module, build-in. + /// Key type for Babe module, built-in. pub const BABE: KeyTypeId = KeyTypeId(*b"babe"); - /// Key type for Grandpa module, build-in. + /// Key type for Sassafras module, built-in. + pub const SASSAFRAS: KeyTypeId = KeyTypeId(*b"sass"); + /// Key type for Grandpa module, built-in. pub const GRANDPA: KeyTypeId = KeyTypeId(*b"gran"); /// Key type for controlling an account in a Substrate runtime, built-in. pub const ACCOUNT: KeyTypeId = KeyTypeId(*b"acco"); From 1ddd0229fadb9d9c5edb8fb96a43765a6fd727e3 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Thu, 9 Jan 2020 22:35:34 +0100 Subject: [PATCH 03/75] Init sassafras verifier --- Cargo.lock | 22 +++ Cargo.toml | 1 + client/consensus/babe/src/lib.rs | 2 +- client/consensus/habe/Cargo.toml | 6 - client/consensus/habe/src/lib.rs | 0 client/consensus/sassafras/Cargo.toml | 24 ++++ client/consensus/sassafras/src/aux_schema.rs | 37 +++++ client/consensus/sassafras/src/lib.rs | 139 +++++++++++++++++++ primitives/consensus/sassafras/Cargo.toml | 2 +- 9 files changed, 225 insertions(+), 8 deletions(-) delete mode 100644 client/consensus/habe/Cargo.toml delete mode 100644 client/consensus/habe/src/lib.rs create mode 100644 client/consensus/sassafras/Cargo.toml create mode 100644 client/consensus/sassafras/src/aux_schema.rs create mode 100644 client/consensus/sassafras/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 0d0bf86add71c..d9a74c0cf8e96 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5236,6 +5236,28 @@ dependencies = [ "sp-timestamp 2.0.0", ] +[[package]] +name = "sc-consensus-sassafras" +version = "2.0.0" +dependencies = [ + "derive_more 0.99.2 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sc-client 2.0.0", + "sc-client-api 2.0.0", + "sc-consensus-slots 2.0.0", + "schnorrkel 0.8.5 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-block-builder 2.0.0", + "sp-blockchain 2.0.0", + "sp-consensus 2.0.0", + "sp-consensus-sassafras 2.0.0", + "sp-core 2.0.0", + "sp-inherents 2.0.0", + "sp-runtime 2.0.0", + "sp-timestamp 2.0.0", +] + [[package]] name = "sc-consensus-slots" version = "2.0.0" diff --git a/Cargo.toml b/Cargo.toml index ce5d15695e8f8..22b91edf482f4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,6 +22,7 @@ members = [ "client/cli", "client/consensus/aura", "client/consensus/babe", + "client/consensus/sassafras", "client/consensus/pow", "client/consensus/slots", "client/consensus/uncles", diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index d46486b1d83f0..a4bad3ba87aa4 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -697,7 +697,7 @@ impl Verifier for BabeVerifier::Runtime)?; + .map_err(Error::::Runtime)?; let (_, slot_now, _) = self.time_source.extract_timestamp_and_slot(&inherent_data) .map_err(Error::::Extraction)?; diff --git a/client/consensus/habe/Cargo.toml b/client/consensus/habe/Cargo.toml deleted file mode 100644 index 84e73295dcc77..0000000000000 --- a/client/consensus/habe/Cargo.toml +++ /dev/null @@ -1,6 +0,0 @@ -[package] -name = "sc-consensus-habe" -version = "2.0.0" -authors = ["Parity Technologies "] -description = "HABE consensus algorithm for substrate" -edition = "2018" diff --git a/client/consensus/habe/src/lib.rs b/client/consensus/habe/src/lib.rs deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml new file mode 100644 index 0000000000000..d19c805fe34da --- /dev/null +++ b/client/consensus/sassafras/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "sc-consensus-sassafras" +version = "2.0.0" +authors = ["Parity Technologies "] +description = "SASSAFRAS consensus algorithm for substrate" +edition = "2018" + +[dependencies] +codec = { package = "parity-scale-codec", version = "1.0.0", features = ["derive"] } +log = "0.4.8" +schnorrkel = { version = "0.8.5", features = ["preaudit_deprecated"] } +derive_more = "0.99.2" +parking_lot = "0.9.0" +sp-core = { path = "../../../primitives/core" } +sp-blockchain = { path = "../../../primitives/blockchain" } +sp-consensus = { path = "../../../primitives/consensus/common" } +sp-consensus-sassafras = { path = "../../../primitives/consensus/sassafras" } +sp-runtime = { path = "../../../primitives/runtime" } +sp-block-builder = { path = "../../../primitives/block-builder" } +sp-inherents = { path = "../../../primitives/inherents" } +sp-timestamp = { path = "../../../primitives/timestamp" } +sc-client = { path = "../../" } +sc-client-api = { path = "../../api" } +sc-consensus-slots = { path = "../slots" } diff --git a/client/consensus/sassafras/src/aux_schema.rs b/client/consensus/sassafras/src/aux_schema.rs new file mode 100644 index 0000000000000..1e145063c9285 --- /dev/null +++ b/client/consensus/sassafras/src/aux_schema.rs @@ -0,0 +1,37 @@ +use schnorrkel::vrf::VRFProof; +use sp_core::H256; +use sp_consensus_sassafras::{SlotNumber, SassafrasBlockWeight}; +use sp_blockchain::Result as ClientResult; +use sc_client_api::AuxStore; + +pub struct PublishingAuxiliary { + pub proofs: Vec, + pub start_slot: SlotNumber, +} + +pub struct GeneratingAuxiliary { + pub proofs: Vec, + pub start_slot: SlotNumber, +} + +pub struct Auxiliary { + pub total_weight: SassafrasBlockWeight, + pub weight: SassafrasBlockWeight, + + pub publishing: PublishingAuxiliary, + pub validating: GeneratingAuxiliary, +} + +pub(crate) fn read_auxiliary( + hash: &H256, + backend: &B +) -> ClientResult { + unimplemented!() +} + +pub(crate) fn write_auxiliary( + auxiliary: &Auxiliary, + backend: &B +) -> ClientResult<()> { + unimplemented!() +} diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs new file mode 100644 index 0000000000000..0fa010d22afb2 --- /dev/null +++ b/client/consensus/sassafras/src/lib.rs @@ -0,0 +1,139 @@ +use std::{sync::Arc, marker::PhantomData, time::{Duration, Instant}}; +use log::trace; +use parking_lot::Mutex; +use sp_core::{Blake2Hasher, H256}; +use sp_blockchain::{Result as ClientResult, ProvideCache, HeaderMetadata}; +use sp_inherents::InherentData; +use sp_timestamp::{TimestampInherentData, InherentType as TimestampInherent}; +use sp_consensus::{Error as ConsensusError, BlockImportParams, BlockOrigin}; +use sp_consensus::import_queue::{Verifier, CacheKeyId, BasicQueue}; +use sp_consensus_sassafras::digest::{NextEpochDescriptor, PostBlockDescriptor, PreDigest}; +use sp_consensus_sassafras::inherents::SassafrasInherentData; +use sp_runtime::{generic::BlockId, Justification}; +use sp_runtime::traits::{Block as BlockT, Header, ProvideRuntimeApi}; +use sp_block_builder::BlockBuilder as BlockBuilderApi; +use sc_client::{Client, CallExecutor}; +use sc_client_api::backend::{AuxStore, Backend}; +use sc_consensus_slots::SlotCompatible; + +mod aux_schema; + +#[derive(derive_more::Display, Debug)] +enum Error { + #[display(fmt = "Could not extract timestamp and slot: {:?}", _0)] + Extraction(sp_consensus::Error), + #[display(fmt = "Header {:?} rejected: too far in the future", _0)] + TooFarInFuture(B::Hash), + #[display(fmt = "Parent ({}) of {} unavailable. Cannot import", _0, _1)] + ParentUnavailable(B::Hash, B::Hash), + #[display(fmt = "Could not fetch parent header: {:?}", _0)] + FetchParentHeader(sp_blockchain::Error), + Runtime(sp_inherents::Error), + Client(sp_blockchain::Error), +} + +impl std::convert::From> for String { + fn from(error: Error) -> String { + error.to_string() + } +} + +pub struct SassafrasVerifier { + client: Arc>, + api: Arc, + inherent_data_providers: sp_inherents::InherentDataProviders, + time_source: TimeSource, +} + +impl Verifier for SassafrasVerifier where + Block: BlockT, + B: Backend + 'static, + E: CallExecutor + 'static + Clone + Send + Sync, + RA: Send + Sync, + PRA: ProvideRuntimeApi + Send + Sync + AuxStore + ProvideCache, + PRA::Api: BlockBuilderApi, +{ + fn verify( + &mut self, + origin: BlockOrigin, + header: Block::Header, + justification: Option, + mut body: Option>, + ) -> Result<(BlockImportParams, Option)>>), String> { + trace!( + target: "sassafras", + "Verifying origin: {:?} header: {:?} justification: {:?} body: {:?}", + origin, + header, + justification, + body, + ); + + let mut inherent_data = self + .inherent_data_providers + .create_inherent_data() + .map_err(Error::::Runtime)?; + + let (_, slot_now, _) = self.time_source.extract_timestamp_and_slot(&inherent_data) + .map_err(Error::::Extraction)?; + + let hash = header.hash(); + let parent_hash = *header.parent_hash(); + let mut auxiliary = aux_schema::read_auxiliary(&parent_hash, self.api.as_ref()) + .map_err(Error::::Client)?; + + let parent_header_metadata = self.client.header_metadata(parent_hash) + .map_err(Error::::FetchParentHeader)?; + + let pre_digest = find_pre_digest::(&header)?; + let post_block_desc = find_post_block_descriptor::(&header)?; + + // TODO: verify ticket VRF and post VRF. + // TODO: verify that auxiliary.validating is the ticket VRF. + + // auxiliary.publishing.push(post_digests.commitments); + + if let Some(next_epoch_desc) = find_next_epoch_descriptor::(&header)? { + unimplemented!() + } + + unimplemented!() + } +} + +pub type SassafrasImportQueue = BasicQueue; + +#[derive(Default, Clone)] +struct TimeSource(Arc, Vec<(Instant, u64)>)>>); + +impl SlotCompatible for TimeSource { + fn extract_timestamp_and_slot( + &self, + data: &InherentData, + ) -> Result<(TimestampInherent, u64, std::time::Duration), sp_consensus::Error> { + trace!(target: "babe", "extract timestamp"); + data.timestamp_inherent_data() + .and_then(|t| data.sassafras_inherent_data().map(|a| (t, a))) + .map_err(Into::into) + .map_err(sp_consensus::Error::InherentData) + .map(|(x, y)| (x, y, self.0.lock().0.take().unwrap_or_default())) + } +} + +fn find_pre_digest( + header: &B::Header, +) -> Result> { + unimplemented!() +} + +fn find_post_block_descriptor( + header: &B::Header, +) -> Result, Error> { + unimplemented!() +} + +fn find_next_epoch_descriptor( + header: &B::Header, +) -> Result, Error> { + unimplemented!() +} diff --git a/primitives/consensus/sassafras/Cargo.toml b/primitives/consensus/sassafras/Cargo.toml index 1b08e43b3095b..16398e8e4d733 100644 --- a/primitives/consensus/sassafras/Cargo.toml +++ b/primitives/consensus/sassafras/Cargo.toml @@ -18,7 +18,7 @@ sp-application-crypto = { path = "../../application-crypto", default-features = default = ["std"] std = [ "codec/std", - "schnorrkel/std", + "schnorrkel", "sp-std/std", "sp-inherents/std", "sp-timestamp/std", From e20ce3621ba04fb50adc31be7a90c6bfc7671ea2 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Fri, 10 Jan 2020 18:08:06 +0100 Subject: [PATCH 04/75] Better VRF type handling --- Cargo.lock | 1 + client/consensus/sassafras/src/aux_schema.rs | 67 +++++++++-- client/consensus/sassafras/src/lib.rs | 2 +- primitives/consensus/sassafras/Cargo.toml | 2 + primitives/consensus/sassafras/src/digest.rs | 15 ++- primitives/consensus/sassafras/src/lib.rs | 7 ++ primitives/consensus/sassafras/src/vrf.rs | 114 +++++++++++++++++++ 7 files changed, 192 insertions(+), 16 deletions(-) create mode 100644 primitives/consensus/sassafras/src/vrf.rs diff --git a/Cargo.lock b/Cargo.lock index d9a74c0cf8e96..0c3623db2e242 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6231,6 +6231,7 @@ dependencies = [ "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "schnorrkel 0.8.5 (registry+https://github.com/rust-lang/crates.io-index)", "sp-application-crypto 2.0.0", + "sp-core 2.0.0", "sp-inherents 2.0.0", "sp-runtime 2.0.0", "sp-std 2.0.0", diff --git a/client/consensus/sassafras/src/aux_schema.rs b/client/consensus/sassafras/src/aux_schema.rs index 1e145063c9285..e3a84c21d36ed 100644 --- a/client/consensus/sassafras/src/aux_schema.rs +++ b/client/consensus/sassafras/src/aux_schema.rs @@ -1,37 +1,82 @@ -use schnorrkel::vrf::VRFProof; +use codec::{Encode, Decode}; use sp_core::H256; -use sp_consensus_sassafras::{SlotNumber, SassafrasBlockWeight}; -use sp_blockchain::Result as ClientResult; +use sp_consensus_sassafras::{SlotNumber, SassafrasBlockWeight, VRFProof}; +use sp_blockchain::{Result as ClientResult, Error as ClientError}; use sc_client_api::AuxStore; +#[derive(Clone, Debug, Encode, Decode)] pub struct PublishingAuxiliary { pub proofs: Vec, pub start_slot: SlotNumber, } -pub struct GeneratingAuxiliary { +#[derive(Clone, Debug, Encode, Decode)] +pub struct ValidatingAuxiliary { pub proofs: Vec, pub start_slot: SlotNumber, } +#[derive(Clone, Debug, Encode, Decode)] pub struct Auxiliary { pub total_weight: SassafrasBlockWeight, pub weight: SassafrasBlockWeight, pub publishing: PublishingAuxiliary, - pub validating: GeneratingAuxiliary, + pub validating: ValidatingAuxiliary, } -pub(crate) fn read_auxiliary( +impl Default for Auxiliary { + fn default() -> Self { + Self { + total_weight: 0, + weight: 0, + publishing: PublishingAuxiliary { + proofs: Vec::new(), + start_slot: 0, + }, + validating: ValidatingAuxiliary { + proofs: Vec::new(), + start_slot: 0, + } + } + } +} + +const AUXILIARY_KEY: &[u8] = b"sassafras_auxiliary"; + +fn load_decode(backend: &B, key: &[u8]) -> ClientResult> + where + B: AuxStore, + T: Decode, +{ + let corrupt = |e: codec::Error| { + ClientError::Backend(format!("Sassafras DB is corrupted. Decode error: {}", e.what())) + }; + match backend.get_aux(key)? { + None => Ok(None), + Some(t) => T::decode(&mut &t[..]).map(Some).map_err(corrupt) + } +} + +pub(crate) fn load_auxiliary( hash: &H256, backend: &B ) -> ClientResult { - unimplemented!() + let auxiliary = load_decode::<_, Auxiliary>(backend, AUXILIARY_KEY)? + .map(Into::into) + .unwrap_or_default(); + + Ok(auxiliary) } -pub(crate) fn write_auxiliary( +pub(crate) fn write_auxiliary( auxiliary: &Auxiliary, - backend: &B -) -> ClientResult<()> { - unimplemented!() + write_aux: F, +) -> R where + F: FnOnce(&[(&'static [u8], &[u8])]) -> R, +{ + let encoded_auxiliary = auxiliary.encode(); + write_aux( + &[(AUXILIARY_KEY, encoded_auxiliary.as_slice())], + ) } diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 0fa010d22afb2..eb88ea91b46e5 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -79,7 +79,7 @@ impl Verifier for SassafrasVerifier::Client)?; let parent_header_metadata = self.client.header_metadata(parent_hash) diff --git a/primitives/consensus/sassafras/Cargo.toml b/primitives/consensus/sassafras/Cargo.toml index 16398e8e4d733..1e7bbb5d6bf4a 100644 --- a/primitives/consensus/sassafras/Cargo.toml +++ b/primitives/consensus/sassafras/Cargo.toml @@ -9,6 +9,7 @@ edition = "2018" codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false } schnorrkel = { version = "0.8.5", features = ["preaudit_deprecated"], optional = true } sp-std = { path = "../../std", default-features = false } +sp-core = { path = "../../core", default-features = false } sp-inherents = { path = "../../inherents", default-features = false } sp-timestamp = { path = "../../timestamp", default-features = false } sp-runtime = { path = "../../runtime", default-features = false } @@ -20,6 +21,7 @@ std = [ "codec/std", "schnorrkel", "sp-std/std", + "sp-core/std", "sp-inherents/std", "sp-timestamp/std", "sp-runtime/std", diff --git a/primitives/consensus/sassafras/src/digest.rs b/primitives/consensus/sassafras/src/digest.rs index 4b319966a6f4a..02e1ad83c0d2e 100644 --- a/primitives/consensus/sassafras/src/digest.rs +++ b/primitives/consensus/sassafras/src/digest.rs @@ -14,18 +14,23 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use schnorrkel::vrf::{VRFProof, VRFOutput, VRF_OUTPUT_LENGTH}; -use crate::{VRFIndex, AuthorityIndex, SlotNumber, AuthorityId, SassafrasAuthorityWeight}; +use codec::{Encode, Decode}; +use sp_core::RuntimeDebug; +use crate::{ + Randomness, VRFProof, VRFOutput, VRFIndex, + AuthorityIndex, SlotNumber, AuthorityId, SassafrasAuthorityWeight +}; /// A Sassafras pre-digest. The validator pre-commit a VRF proof at `vrf_index`, and now reveal it /// as `vrf_output`. /// /// This digest is included in every block, generated by Sassafras consensus engine. +#[derive(Clone, RuntimeDebug, Encode, Decode)] pub struct PreDigest { /// Index of ticket VRF proof that has been previously committed. pub ticket_vrf_index: VRFIndex, /// Reveal of tocket VRF output. - pub tocket_vrf_output: VRFOutput, + pub ticket_vrf_output: VRFOutput, /// Validator index. pub authority_index: AuthorityIndex, /// Corresponding slot number. @@ -39,18 +44,20 @@ pub struct PreDigest { /// Post-digest about next epoch information. /// /// This digest is generated by runtime, at the beginning of every epoch. +#[derive(Clone, RuntimeDebug, Encode, Decode)] pub struct NextEpochDescriptor { /// The authorities that generate VRF proofs. Note that those keys will only be generating /// blocks two epochs later. pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, /// Value of randomness to use for slot-assignment. This is expected to use the secondary "Post /// VRF". - pub randomness: [u8; VRF_OUTPUT_LENGTH], + pub randomness: Randomness, } /// Post-digest about post-block information such as ticket commitments. /// /// This digest is generated by runtime, optional, and can be included at every block. +#[derive(Clone, RuntimeDebug, Encode, Decode)] pub struct PostBlockDescriptor { /// Commitments of tickets. pub commitments: Vec, diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index a0cd0e5d2a248..5539e54dfdf33 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -22,7 +22,14 @@ pub mod digest; pub mod inherents; +mod vrf; +pub use crate::vrf::{ + VRF_PROOF_LENGTH, VRF_OUTPUT_LENGTH, RawVRFOutput, VRFOutput, + RawVRFProof, VRFProof, Randomness, +}; + +use core::ops::{Deref, DerefMut}; use sp_runtime::ConsensusEngineId; mod app { diff --git a/primitives/consensus/sassafras/src/vrf.rs b/primitives/consensus/sassafras/src/vrf.rs new file mode 100644 index 0000000000000..2e36b773f6080 --- /dev/null +++ b/primitives/consensus/sassafras/src/vrf.rs @@ -0,0 +1,114 @@ +use codec::{Encode, Decode, EncodeLike}; +use schnorrkel::{SignatureError, errors::MultiSignatureStage}; +use sp_std::ops::{Deref, DerefMut}; + +pub use schnorrkel::vrf::{VRF_PROOF_LENGTH, VRF_OUTPUT_LENGTH}; + +pub type RawVRFOutput = [u8; VRF_OUTPUT_LENGTH]; + +#[cfg(feature = "std")] +#[derive(Clone, Debug)] +pub struct VRFOutput(pub schnorrkel::vrf::VRFOutput); + +#[cfg(not(feature = "std"))] +pub type VRFOutput = RawVRFOutput; + +#[cfg(feature = "std")] +impl Deref for VRFOutput { + type Target = schnorrkel::vrf::VRFOutput; + fn deref(&self) -> &Self::Target { &self.0 } +} + +#[cfg(feature = "std")] +impl DerefMut for VRFOutput { + fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } +} + +#[cfg(feature = "std")] +impl Encode for VRFOutput { + fn encode(&self) -> Vec { + self.0.as_bytes().encode() + } +} + +#[cfg(feature = "std")] +impl EncodeLike for VRFOutput { } + +#[cfg(feature = "std")] +impl Decode for VRFOutput { + fn decode(i: &mut R) -> Result { + let decoded = <[u8; VRF_OUTPUT_LENGTH]>::decode(i)?; + Ok(Self(schnorrkel::vrf::VRFOutput::from_bytes(&decoded).map_err(convert_error)?)) + } +} + +pub type RawVRFProof = [u8; VRF_PROOF_LENGTH]; + +#[cfg(feature = "std")] +#[derive(Clone, Debug)] +pub struct VRFProof(pub schnorrkel::vrf::VRFProof); + +#[cfg(not(feature = "std"))] +pub type VRFProof = RawVRFProof; + +#[cfg(feature = "std")] +impl Deref for VRFProof { + type Target = schnorrkel::vrf::VRFProof; + fn deref(&self) -> &Self::Target { &self.0 } +} + +#[cfg(feature = "std")] +impl DerefMut for VRFProof { + fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } +} + +#[cfg(feature = "std")] +impl Encode for VRFProof { + fn encode(&self) -> Vec { + self.0.to_bytes().encode() + } +} + +#[cfg(feature = "std")] +impl EncodeLike for VRFProof { } + +#[cfg(feature = "std")] +impl Decode for VRFProof { + fn decode(i: &mut R) -> Result { + let decoded = <[u8; VRF_PROOF_LENGTH]>::decode(i)?; + Ok(Self(schnorrkel::vrf::VRFProof::from_bytes(&decoded).map_err(convert_error)?)) + } +} + +#[cfg(feature = "std")] +fn convert_error(e: SignatureError) -> codec::Error { + use SignatureError::*; + use MultiSignatureStage::*; + match e { + EquationFalse => "Signature error: `EquationFalse`".into(), + PointDecompressionError => "Signature error: `PointDecompressionError`".into(), + ScalarFormatError => "Signature error: `ScalarFormatError`".into(), + NotMarkedSchnorrkel => "Signature error: `NotMarkedSchnorrkel`".into(), + BytesLengthError { .. } => "Signature error: `BytesLengthError`".into(), + MuSigAbsent { musig_stage: Commitment } => + "Signature error: `MuSigAbsent` at stage `Commitment`".into(), + MuSigAbsent { musig_stage: Reveal } => + "Signature error: `MuSigAbsent` at stage `Reveal`".into(), + MuSigAbsent { musig_stage: Cosignature } => + "Signature error: `MuSigAbsent` at stage `Commitment`".into(), + MuSigInconsistent { musig_stage: Commitment, duplicate: true } => + "Signature error: `MuSigInconsistent` at stage `Commitment` on duplicate".into(), + MuSigInconsistent { musig_stage: Commitment, duplicate: false } => + "Signature error: `MuSigInconsistent` at stage `Commitment` on not duplicate".into(), + MuSigInconsistent { musig_stage: Reveal, duplicate: true } => + "Signature error: `MuSigInconsistent` at stage `Reveal` on duplicate".into(), + MuSigInconsistent { musig_stage: Reveal, duplicate: false } => + "Signature error: `MuSigInconsistent` at stage `Reveal` on not duplicate".into(), + MuSigInconsistent { musig_stage: Cosignature, duplicate: true } => + "Signature error: `MuSigInconsistent` at stage `Cosignature` on duplicate".into(), + MuSigInconsistent { musig_stage: Cosignature, duplicate: false } => + "Signature error: `MuSigInconsistent` at stage `Cosignature` on not duplicate".into(), + } +} + +pub type Randomness = [u8; VRF_OUTPUT_LENGTH]; From c15210df33f6e070725ef25fe96ff9897826a198 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Fri, 10 Jan 2020 20:10:50 +0100 Subject: [PATCH 05/75] CompatibleDigestItem for Sassafras --- primitives/consensus/sassafras/src/digest.rs | 77 +++++++++++++++++++- 1 file changed, 74 insertions(+), 3 deletions(-) diff --git a/primitives/consensus/sassafras/src/digest.rs b/primitives/consensus/sassafras/src/digest.rs index 02e1ad83c0d2e..72f0713a28186 100644 --- a/primitives/consensus/sassafras/src/digest.rs +++ b/primitives/consensus/sassafras/src/digest.rs @@ -14,13 +14,75 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use codec::{Encode, Decode}; +use codec::{Encode, Decode, Codec}; use sp_core::RuntimeDebug; +#[cfg(feature = "std")] +use sp_runtime::{DigestItem, generic::OpaqueDigestItemId}; use crate::{ - Randomness, VRFProof, VRFOutput, VRFIndex, - AuthorityIndex, SlotNumber, AuthorityId, SassafrasAuthorityWeight + SASSAFRAS_ENGINE_ID, Randomness, VRFProof, VRFOutput, VRFIndex, + AuthorityIndex, SlotNumber, AuthorityId, SassafrasAuthorityWeight, + AuthoritySignature, }; +/// A digest item which is usable with Sassafras consensus. +#[cfg(feature = "std")] +pub trait CompatibleDigestItem: Sized { + /// Construct a digest item which contains a Sassafras `PreDigest`. + fn sassafras_pre_digest(seal: PreDigest) -> Self; + + /// Construct a digest item which contains a Sassafras seal. + fn sassafras_seal(signature: AuthoritySignature) -> Self; + + /// If this item is a Sassafras `PreDigest`, return it. + fn as_sassafras_pre_digest(&self) -> Option; + + /// If this item is a Sassafras `NextEpochDescriptor`, return it. + fn as_sassafras_next_epoch_descriptor(&self) -> Option; + + /// If this item is a Sassafras `PostBlockDescriptor`, return it. + fn as_sassafras_post_block_descriptor(&self) -> Option; + + /// If this item is a Sassafras seal, return it. + fn as_sassafras_seal(&self) -> Option; +} + +#[cfg(feature = "std")] +impl CompatibleDigestItem for DigestItem where + Hash: core::fmt::Debug + Send + Sync + Eq + Clone + Codec + 'static +{ + fn sassafras_pre_digest(seal: PreDigest) -> Self { + DigestItem::PreRuntime(SASSAFRAS_ENGINE_ID, seal.encode()) + } + + fn sassafras_seal(signature: AuthoritySignature) -> Self { + DigestItem::Seal(SASSAFRAS_ENGINE_ID, signature.encode()) + } + + fn as_sassafras_pre_digest(&self) -> Option { + self.try_to(OpaqueDigestItemId::PreRuntime(&SASSAFRAS_ENGINE_ID)) + } + + fn as_sassafras_next_epoch_descriptor(&self) -> Option { + self.try_to(OpaqueDigestItemId::Consensus(&SASSAFRAS_ENGINE_ID)) + .and_then(|x: ConsensusDigest| match x { + ConsensusDigest::NextEpoch(n) => Some(n), + _ => None, + }) + } + + fn as_sassafras_post_block_descriptor(&self) -> Option { + self.try_to(OpaqueDigestItemId::Consensus(&SASSAFRAS_ENGINE_ID)) + .and_then(|x: ConsensusDigest| match x { + ConsensusDigest::PostBlock(p) => Some(p), + _ => None, + }) + } + + fn as_sassafras_seal(&self) -> Option { + self.try_to(OpaqueDigestItemId::Seal(&SASSAFRAS_ENGINE_ID)) + } +} + /// A Sassafras pre-digest. The validator pre-commit a VRF proof at `vrf_index`, and now reveal it /// as `vrf_output`. /// @@ -41,6 +103,15 @@ pub struct PreDigest { pub post_vrf_output: VRFOutput, } +/// Consensus logs. +#[derive(Clone, RuntimeDebug, Encode, Decode)] +pub enum ConsensusDigest { + /// Next epoch descriptor digest. + NextEpoch(NextEpochDescriptor), + /// Post block descriptor digest. + PostBlock(PostBlockDescriptor), +} + /// Post-digest about next epoch information. /// /// This digest is generated by runtime, at the beginning of every epoch. From 9ed4d63bf6dc74714e7665c46369f5e84f8106d6 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Fri, 10 Jan 2020 20:18:27 +0100 Subject: [PATCH 06/75] Helper function for finding pre-digest, post-block/next-epoch descriptor --- client/consensus/sassafras/src/lib.rs | 38 ++++++++++++++++++++++++--- 1 file changed, 34 insertions(+), 4 deletions(-) diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index eb88ea91b46e5..c4d332285427a 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -7,7 +7,9 @@ use sp_inherents::InherentData; use sp_timestamp::{TimestampInherentData, InherentType as TimestampInherent}; use sp_consensus::{Error as ConsensusError, BlockImportParams, BlockOrigin}; use sp_consensus::import_queue::{Verifier, CacheKeyId, BasicQueue}; -use sp_consensus_sassafras::digest::{NextEpochDescriptor, PostBlockDescriptor, PreDigest}; +use sp_consensus_sassafras::digest::{ + NextEpochDescriptor, PostBlockDescriptor, PreDigest, CompatibleDigestItem +}; use sp_consensus_sassafras::inherents::SassafrasInherentData; use sp_runtime::{generic::BlockId, Justification}; use sp_runtime::traits::{Block as BlockT, Header, ProvideRuntimeApi}; @@ -28,6 +30,10 @@ enum Error { ParentUnavailable(B::Hash, B::Hash), #[display(fmt = "Could not fetch parent header: {:?}", _0)] FetchParentHeader(sp_blockchain::Error), + MultiplePreRuntimeDigest, + NoPreRuntimeDigest, + MultipleNextEpochDescriptor, + MultiplePostBlockDescriptor, Runtime(sp_inherents::Error), Client(sp_blockchain::Error), } @@ -123,17 +129,41 @@ impl SlotCompatible for TimeSource { fn find_pre_digest( header: &B::Header, ) -> Result> { - unimplemented!() + let mut pre_digest = None; + for log in header.digest().logs() { + match (log.as_sassafras_pre_digest(), pre_digest.is_some()) { + (Some(_), true) => return Err(Error::MultiplePreRuntimeDigest), + (None, _) => (), + (s, false) => pre_digest = s, + } + } + pre_digest.ok_or_else(|| Error::NoPreRuntimeDigest) } fn find_post_block_descriptor( header: &B::Header, ) -> Result, Error> { - unimplemented!() + let mut desc = None; + for log in header.digest().logs() { + match (log.as_sassafras_post_block_descriptor(), desc.is_some()) { + (Some(_), true) => return Err(Error::MultiplePostBlockDescriptor), + (None, _) => (), + (s, false) => desc = s, + } + } + Ok(desc) } fn find_next_epoch_descriptor( header: &B::Header, ) -> Result, Error> { - unimplemented!() + let mut desc = None; + for log in header.digest().logs() { + match (log.as_sassafras_next_epoch_descriptor(), desc.is_some()) { + (Some(_), true) => return Err(Error::MultipleNextEpochDescriptor), + (None, _) => (), + (s, false) => desc = s, + } + } + Ok(desc) } From 45361fc742b2ba229219693be79e5430ff34c58e Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Fri, 10 Jan 2020 21:17:03 +0100 Subject: [PATCH 07/75] Finish draft implementation of validation logic --- Cargo.lock | 1 + client/consensus/sassafras/Cargo.toml | 1 + client/consensus/sassafras/src/aux_schema.rs | 45 ++---- client/consensus/sassafras/src/lib.rs | 138 +++++++++++++++++-- primitives/consensus/sassafras/src/digest.rs | 2 +- primitives/consensus/sassafras/src/lib.rs | 2 + 6 files changed, 146 insertions(+), 43 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0c3623db2e242..cddc20b6b71d2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5242,6 +5242,7 @@ version = "2.0.0" dependencies = [ "derive_more 0.99.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", + "merlin 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "sc-client 2.0.0", diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml index d19c805fe34da..fb9dd479b4a21 100644 --- a/client/consensus/sassafras/Cargo.toml +++ b/client/consensus/sassafras/Cargo.toml @@ -11,6 +11,7 @@ log = "0.4.8" schnorrkel = { version = "0.8.5", features = ["preaudit_deprecated"] } derive_more = "0.99.2" parking_lot = "0.9.0" +merlin = "1.2.1" sp-core = { path = "../../../primitives/core" } sp-blockchain = { path = "../../../primitives/blockchain" } sp-consensus = { path = "../../../primitives/consensus/common" } diff --git a/client/consensus/sassafras/src/aux_schema.rs b/client/consensus/sassafras/src/aux_schema.rs index e3a84c21d36ed..0708e419ce229 100644 --- a/client/consensus/sassafras/src/aux_schema.rs +++ b/client/consensus/sassafras/src/aux_schema.rs @@ -1,48 +1,31 @@ use codec::{Encode, Decode}; use sp_core::H256; -use sp_consensus_sassafras::{SlotNumber, SassafrasBlockWeight, VRFProof}; +use sp_consensus_sassafras::{ + EpochNumber, SlotNumber, SassafrasBlockWeight, SassafrasAuthorityWeight, + VRFProof, Randomness, AuthorityId +}; use sp_blockchain::{Result as ClientResult, Error as ClientError}; use sc_client_api::AuxStore; -#[derive(Clone, Debug, Encode, Decode)] -pub struct PublishingAuxiliary { +#[derive(Clone, Debug, Encode, Decode, Default)] +pub struct PoolAuxiliary { pub proofs: Vec, - pub start_slot: SlotNumber, + pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, + pub randomness: Randomness, + pub epoch: EpochNumber, } -#[derive(Clone, Debug, Encode, Decode)] -pub struct ValidatingAuxiliary { - pub proofs: Vec, - pub start_slot: SlotNumber, -} - -#[derive(Clone, Debug, Encode, Decode)] +#[derive(Clone, Debug, Encode, Decode, Default)] pub struct Auxiliary { pub total_weight: SassafrasBlockWeight, pub weight: SassafrasBlockWeight, + pub slot: SlotNumber, - pub publishing: PublishingAuxiliary, - pub validating: ValidatingAuxiliary, -} - -impl Default for Auxiliary { - fn default() -> Self { - Self { - total_weight: 0, - weight: 0, - publishing: PublishingAuxiliary { - proofs: Vec::new(), - start_slot: 0, - }, - validating: ValidatingAuxiliary { - proofs: Vec::new(), - start_slot: 0, - } - } - } + pub publishing: PoolAuxiliary, + pub validating: PoolAuxiliary, } -const AUXILIARY_KEY: &[u8] = b"sassafras_auxiliary"; +pub const AUXILIARY_KEY: &[u8] = b"sassafras_auxiliary"; fn load_decode(backend: &B, key: &[u8]) -> ClientResult> where diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index c4d332285427a..ddca1cd08bfda 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -1,12 +1,21 @@ +mod aux_schema; + use std::{sync::Arc, marker::PhantomData, time::{Duration, Instant}}; use log::trace; +use codec::Encode; use parking_lot::Mutex; -use sp_core::{Blake2Hasher, H256}; +use merlin::Transcript; +use sp_core::{Blake2Hasher, H256, crypto::{Pair, Public}}; use sp_blockchain::{Result as ClientResult, ProvideCache, HeaderMetadata}; use sp_inherents::InherentData; use sp_timestamp::{TimestampInherentData, InherentType as TimestampInherent}; -use sp_consensus::{Error as ConsensusError, BlockImportParams, BlockOrigin}; +use sp_consensus::{ + Error as ConsensusError, BlockImportParams, BlockOrigin, ForkChoiceStrategy, +}; use sp_consensus::import_queue::{Verifier, CacheKeyId, BasicQueue}; +use sp_consensus_sassafras::{ + SASSAFRAS_ENGINE_ID, AuthorityPair, AuthorityId, Randomness, +}; use sp_consensus_sassafras::digest::{ NextEpochDescriptor, PostBlockDescriptor, PreDigest, CompatibleDigestItem }; @@ -17,8 +26,7 @@ use sp_block_builder::BlockBuilder as BlockBuilderApi; use sc_client::{Client, CallExecutor}; use sc_client_api::backend::{AuxStore, Backend}; use sc_consensus_slots::SlotCompatible; - -mod aux_schema; +use crate::aux_schema::{AUXILIARY_KEY, PoolAuxiliary}; #[derive(derive_more::Display, Debug)] enum Error { @@ -34,6 +42,14 @@ enum Error { NoPreRuntimeDigest, MultipleNextEpochDescriptor, MultiplePostBlockDescriptor, + InvalidTicketVRFIndex, + InvalidAuthorityId, + InvalidSeal, + HeaderUnsealed(B::Hash), + TicketVRFVerificationFailed, + PostVRFVerificationFailed, + SlotInPast, + SlotInFuture, Runtime(sp_inherents::Error), Client(sp_blockchain::Error), } @@ -62,7 +78,7 @@ impl Verifier for SassafrasVerifier, mut body: Option>, ) -> Result<(BlockImportParams, Option)>>), String> { @@ -91,19 +107,93 @@ impl Verifier for SassafrasVerifier::FetchParentHeader)?; + // First, Verify pre-runtime digest. let pre_digest = find_pre_digest::(&header)?; - let post_block_desc = find_post_block_descriptor::(&header)?; - // TODO: verify ticket VRF and post VRF. - // TODO: verify that auxiliary.validating is the ticket VRF. + // Verify that the slot is increasing, and not in the future. + if pre_digest.slot <= auxiliary.slot { + return Err(Error::::SlotInPast.into()) + } + if pre_digest.slot > slot_now { + return Err(Error::::SlotInFuture.into()) + } + auxiliary.slot = pre_digest.slot; + + // Check the signature. + let (author, block_weight) = auxiliary.validating.authorities + .get(pre_digest.authority_index as usize) + .cloned() + .ok_or(Error::::InvalidAuthorityId)?; + let seal = header.digest_mut().pop() + .ok_or(Error::::HeaderUnsealed(header.hash()))?; + let signature = seal.as_sassafras_seal().ok_or(Error::::InvalidSeal)?; + let pre_hash = header.hash(); + if !AuthorityPair::verify(&signature, pre_hash, &author) { + return Err(Error::::InvalidSeal.into()) + } + + // Check that the ticket VRF is of a valid index in auxiliary.validating. + let ticket_vrf_proof = auxiliary.validating.proofs + .get(pre_digest.ticket_vrf_index as usize) + .cloned() + .ok_or(Error::::InvalidTicketVRFIndex)?; + + // Check that the ticket VRF is valid. + let ticket_transcript = make_ticket_transcript( + &auxiliary.validating.randomness, + pre_digest.slot, + auxiliary.validating.epoch, + ); + schnorrkel::PublicKey::from_bytes(author.as_slice()).and_then(|p| { + p.vrf_verify(ticket_transcript, &pre_digest.ticket_vrf_output, &ticket_vrf_proof) + }).map_err(|_| Error::::TicketVRFVerificationFailed)?; + + // Check that the post-block VRF is valid. + let post_transcript = make_post_transcript( + &auxiliary.validating.randomness, + pre_digest.slot, + auxiliary.validating.epoch, + ); + schnorrkel::PublicKey::from_bytes(author.as_slice()).and_then(|p| { + p.vrf_verify(post_transcript, &pre_digest.post_vrf_output, &pre_digest.post_vrf_proof) + }).map_err(|_| Error::::PostVRFVerificationFailed)?; + + // Second, push in any commitments of ticket VRF. + if let Some(post_block_desc) = find_post_block_descriptor::(&header)? { + // TODO: verify that proofs are below threshold. - // auxiliary.publishing.push(post_digests.commitments); + auxiliary.publishing.proofs.append(&mut post_block_desc.commitments.clone()); + } + // Finally, if we are switching epoch, move publishing to validating, and sort the proofs. if let Some(next_epoch_desc) = find_next_epoch_descriptor::(&header)? { - unimplemented!() + // TODO: check descriptor validity. + + std::mem::swap(&mut auxiliary.publishing, &mut auxiliary.validating); + auxiliary.publishing = PoolAuxiliary { + proofs: Vec::new(), + authorities: next_epoch_desc.authorities, + randomness: next_epoch_desc.randomness, + epoch: auxiliary.validating.epoch + 1, + }; + + // TODO: sort the validating proofs in "outside-in" order. } - unimplemented!() + let block_import_params = BlockImportParams { + origin, + header, + post_digests: vec![seal], + body, + finalized: false, + justification, + auxiliary: vec![(AUXILIARY_KEY.to_vec(), Some(auxiliary.encode()))], + fork_choice: ForkChoiceStrategy::LongestChain, + allow_missing_state: false, + import_existing: false, + }; + + Ok((block_import_params, Default::default())) } } @@ -167,3 +257,29 @@ fn find_next_epoch_descriptor( } Ok(desc) } + +fn make_ticket_transcript( + randomness: &[u8], + slot_number: u64, + epoch: u64, +) -> Transcript { + let mut transcript = Transcript::new(&SASSAFRAS_ENGINE_ID); + transcript.commit_bytes(b"type", b"ticket"); + transcript.commit_bytes(b"slot number", &slot_number.to_le_bytes()); + transcript.commit_bytes(b"current epoch", &epoch.to_le_bytes()); + transcript.commit_bytes(b"chain randomness", randomness); + transcript +} + +fn make_post_transcript( + randomness: &[u8], + slot_number: u64, + epoch: u64, +) -> Transcript { + let mut transcript = Transcript::new(&SASSAFRAS_ENGINE_ID); + transcript.commit_bytes(b"type", b"post"); + transcript.commit_bytes(b"slot number", &slot_number.to_le_bytes()); + transcript.commit_bytes(b"current epoch", &epoch.to_le_bytes()); + transcript.commit_bytes(b"chain randomness", randomness); + transcript +} diff --git a/primitives/consensus/sassafras/src/digest.rs b/primitives/consensus/sassafras/src/digest.rs index 72f0713a28186..2f4ca291250c9 100644 --- a/primitives/consensus/sassafras/src/digest.rs +++ b/primitives/consensus/sassafras/src/digest.rs @@ -96,7 +96,7 @@ pub struct PreDigest { /// Validator index. pub authority_index: AuthorityIndex, /// Corresponding slot number. - pub slot_number: SlotNumber, + pub slot: SlotNumber, /// Secondary "Post Block VRF" proof. pub post_vrf_proof: VRFProof, /// Secondary "Post Block VRF" output. diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index 5539e54dfdf33..5d13f37a0a82a 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -43,6 +43,8 @@ pub const SASSAFRAS_TICKET_VRF_PREFIX: &[u8] = b"substrate-sassafras-ticket-vrf" /// The prefix used by Sassafras for its post-block VRF keys. pub const SASSAFRAS_POST_VRF_PREFIX: &[u8] = b"substrate-sassafras-post-vrf"; +pub type EpochNumber = u64; + /// A Sassafras authority keypair, used by both ticket VRF and post-block VRF. #[cfg(feature = "std")] pub type AuthorityPair = app::Pair; From 7927d53c6a5a3cdcf3a461e179350caf2a6e656e Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 13 Jan 2020 20:53:19 +0100 Subject: [PATCH 08/75] Cleanup: error returned by verify function to be Error --- client/consensus/sassafras/src/lib.rs | 49 +++++++++++++++++++-------- 1 file changed, 34 insertions(+), 15 deletions(-) diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index ddca1cd08bfda..6f209bae4930c 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -67,7 +67,7 @@ pub struct SassafrasVerifier { time_source: TimeSource, } -impl Verifier for SassafrasVerifier where +impl SassafrasVerifier where Block: BlockT, B: Backend + 'static, E: CallExecutor + 'static + Clone + Send + Sync, @@ -81,7 +81,7 @@ impl Verifier for SassafrasVerifier, mut body: Option>, - ) -> Result<(BlockImportParams, Option)>>), String> { + ) -> Result<(BlockImportParams, Option)>>), Error> { trace!( target: "sassafras", "Verifying origin: {:?} header: {:?} justification: {:?} body: {:?}", @@ -94,28 +94,28 @@ impl Verifier for SassafrasVerifier::Runtime)?; + .map_err(Error::Runtime)?; let (_, slot_now, _) = self.time_source.extract_timestamp_and_slot(&inherent_data) - .map_err(Error::::Extraction)?; + .map_err(Error::Extraction)?; let hash = header.hash(); let parent_hash = *header.parent_hash(); let mut auxiliary = aux_schema::load_auxiliary(&parent_hash, self.api.as_ref()) - .map_err(Error::::Client)?; + .map_err(Error::Client)?; let parent_header_metadata = self.client.header_metadata(parent_hash) - .map_err(Error::::FetchParentHeader)?; + .map_err(Error::FetchParentHeader)?; // First, Verify pre-runtime digest. let pre_digest = find_pre_digest::(&header)?; // Verify that the slot is increasing, and not in the future. if pre_digest.slot <= auxiliary.slot { - return Err(Error::::SlotInPast.into()) + return Err(Error::SlotInPast.into()) } if pre_digest.slot > slot_now { - return Err(Error::::SlotInFuture.into()) + return Err(Error::SlotInFuture.into()) } auxiliary.slot = pre_digest.slot; @@ -123,20 +123,20 @@ impl Verifier for SassafrasVerifier::InvalidAuthorityId)?; + .ok_or(Error::InvalidAuthorityId)?; let seal = header.digest_mut().pop() - .ok_or(Error::::HeaderUnsealed(header.hash()))?; - let signature = seal.as_sassafras_seal().ok_or(Error::::InvalidSeal)?; + .ok_or(Error::HeaderUnsealed(header.hash()))?; + let signature = seal.as_sassafras_seal().ok_or(Error::InvalidSeal)?; let pre_hash = header.hash(); if !AuthorityPair::verify(&signature, pre_hash, &author) { - return Err(Error::::InvalidSeal.into()) + return Err(Error::InvalidSeal.into()) } // Check that the ticket VRF is of a valid index in auxiliary.validating. let ticket_vrf_proof = auxiliary.validating.proofs .get(pre_digest.ticket_vrf_index as usize) .cloned() - .ok_or(Error::::InvalidTicketVRFIndex)?; + .ok_or(Error::InvalidTicketVRFIndex)?; // Check that the ticket VRF is valid. let ticket_transcript = make_ticket_transcript( @@ -146,7 +146,7 @@ impl Verifier for SassafrasVerifier::TicketVRFVerificationFailed)?; + }).map_err(|_| Error::TicketVRFVerificationFailed)?; // Check that the post-block VRF is valid. let post_transcript = make_post_transcript( @@ -156,7 +156,7 @@ impl Verifier for SassafrasVerifier::PostVRFVerificationFailed)?; + }).map_err(|_| Error::PostVRFVerificationFailed)?; // Second, push in any commitments of ticket VRF. if let Some(post_block_desc) = find_post_block_descriptor::(&header)? { @@ -197,6 +197,25 @@ impl Verifier for SassafrasVerifier Verifier for SassafrasVerifier where + Block: BlockT, + B: Backend + 'static, + E: CallExecutor + 'static + Clone + Send + Sync, + RA: Send + Sync, + PRA: ProvideRuntimeApi + Send + Sync + AuxStore + ProvideCache, + PRA::Api: BlockBuilderApi, +{ + fn verify( + &mut self, + origin: BlockOrigin, + mut header: Block::Header, + justification: Option, + mut body: Option>, + ) -> Result<(BlockImportParams, Option)>>), String> { + self.verify(origin, header, justification, body).map_err(Into::into) + } +} + pub type SassafrasImportQueue = BasicQueue; #[derive(Default, Clone)] From 4cf7208d41fc73318e4f6c2f0215ad4d28caff37 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Tue, 21 Jan 2020 14:17:31 +0100 Subject: [PATCH 09/75] Generalize EpochChanges into separate epochs crate --- Cargo.lock | 23 +- Cargo.toml | 1 + client/consensus/epochs/Cargo.toml | 14 ++ client/consensus/epochs/src/lib.rs | 332 ++++++++++++++++++++++++++ client/consensus/sassafras/Cargo.toml | 1 + client/consensus/sassafras/src/lib.rs | 62 ++++- utils/fork-tree/src/lib.rs | 2 +- 7 files changed, 420 insertions(+), 15 deletions(-) create mode 100644 client/consensus/epochs/Cargo.toml create mode 100644 client/consensus/epochs/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index c29fad71c6e7a..5cbd3e6fbac1d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5313,6 +5313,18 @@ dependencies = [ "tokio 0.1.22 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "sc-consensus-epochs" +version = "0.8.0" +dependencies = [ + "fork-tree 2.0.0", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", + "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", + "sc-client-api 2.0.0", + "sp-blockchain 2.0.0", + "sp-runtime 2.0.0", +] + [[package]] name = "sc-consensus-pow" version = "0.8.0" @@ -5340,15 +5352,16 @@ dependencies = [ "derive_more 0.99.2 (registry+https://github.com/rust-lang/crates.io-index)", "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "merlin 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sc-client 2.0.0", + "sc-client 0.8.0", "sc-client-api 2.0.0", - "sc-consensus-slots 2.0.0", + "sc-consensus-slots 0.8.0", "schnorrkel 0.8.5 (registry+https://github.com/rust-lang/crates.io-index)", + "sp-api 2.0.0", "sp-block-builder 2.0.0", "sp-blockchain 2.0.0", - "sp-consensus 2.0.0", + "sp-consensus 0.8.0", "sp-consensus-sassafras 2.0.0", "sp-core 2.0.0", "sp-inherents 2.0.0", @@ -6380,7 +6393,7 @@ dependencies = [ name = "sp-consensus-sassafras" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", "schnorrkel 0.8.5 (registry+https://github.com/rust-lang/crates.io-index)", "sp-application-crypto 2.0.0", "sp-core 2.0.0", diff --git a/Cargo.toml b/Cargo.toml index 10067712993fd..212bdde97e36a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -25,6 +25,7 @@ members = [ "client/consensus/sassafras", "client/consensus/pow", "client/consensus/slots", + "client/consensus/epochs", "client/consensus/uncles", "client/db", "client/executor", diff --git a/client/consensus/epochs/Cargo.toml b/client/consensus/epochs/Cargo.toml new file mode 100644 index 0000000000000..2c9ce40a52156 --- /dev/null +++ b/client/consensus/epochs/Cargo.toml @@ -0,0 +1,14 @@ +[package] +name = "sc-consensus-epochs" +version = "0.8.0" +authors = ["Parity Technologies "] +description = "Generic epochs-based utilities for consensus" +edition = "2018" + +[dependencies] +codec = { package = "parity-scale-codec", version = "1.0.0", features = ["derive"] } +parking_lot = "0.9.0" +fork-tree = { version = "2.0.0", path = "../../../utils/fork-tree" } +sp-runtime = { path = "../../../primitives/runtime" } +sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } +sc-client-api = { path = "../../api" } diff --git a/client/consensus/epochs/src/lib.rs b/client/consensus/epochs/src/lib.rs new file mode 100644 index 0000000000000..498764cde87f8 --- /dev/null +++ b/client/consensus/epochs/src/lib.rs @@ -0,0 +1,332 @@ +use std::{sync::Arc, ops::Add}; +use parking_lot::Mutex; +use codec::{Encode, Decode}; +use fork_tree::ForkTree; +use sc_client_api::utils::is_descendent_of; +use sp_blockchain::{HeaderMetadata, HeaderBackend, Error as ClientError}; +use sp_runtime::traits::{Block as BlockT, NumberFor, One, Zero}; + +/// Definition of slot number type. +pub type SlotNumber = u64; +/// Definition of epoch number type. +pub type EpochNumber = u64; + +/// A builder for `is_descendent_of` functions. +pub trait IsDescendentOfBuilder { + /// The error returned by the function. + type Error: std::error::Error; + /// A function that can tell you if the second parameter is a descendent of + /// the first. + type IsDescendentOf: Fn(&Hash, &Hash) -> Result; + + /// Build an `is_descendent_of` function. + /// + /// The `current` parameter can be `Some` with the details a fresh block whose + /// details aren't yet stored, but its parent is. + /// + /// The format of `current` when `Some` is `(current, current_parent)`. + fn build_is_descendent_of(&self, current: Option<(Hash, Hash)>) + -> Self::IsDescendentOf; +} + +/// Produce a descendent query object given the client. +pub fn descendent_query(client: &H) -> HeaderBackendDescendentBuilder<&H, Block> { + HeaderBackendDescendentBuilder(client, std::marker::PhantomData) +} + +/// Wrapper to get around unconstrained type errors when implementing +/// `IsDescendentOfBuilder` for header backends. +pub struct HeaderBackendDescendentBuilder(H, std::marker::PhantomData); + +impl<'a, H, Block> IsDescendentOfBuilder + for HeaderBackendDescendentBuilder<&'a H, Block> where + H: HeaderBackend + HeaderMetadata, + Block: BlockT, +{ + type Error = ClientError; + type IsDescendentOf = Box Result + 'a>; + + fn build_is_descendent_of(&self, current: Option<(Block::Hash, Block::Hash)>) + -> Self::IsDescendentOf + { + Box::new(is_descendent_of(self.0, current)) + } +} + +/// Epoch data, distinguish whether it is genesis or not. +pub trait Epoch { + /// Descriptor for the next epoch. + type NextEpochDescriptor; + + /// Increment the epoch data, using the next epoch descriptor. + fn increment(&self, descriptor: Self::NextEpochDescriptor) -> Self; + + /// Produce the "end slot" of the epoch. This is NOT inclusive to the epoch, + /// i.e. the slots covered by the epoch are `self.start_slot() .. self.end_slot()`. + fn end_slot(&self) -> SlotNumber; + /// Produce the "start slot" of the epoch. + fn start_slot(&self) -> SlotNumber; +} + +/// An unimported genesis epoch. +pub struct UnimportedGenesisEpoch(Epoch); + +/// The viable epoch under which a block can be verified. +/// +/// If this is the first non-genesis block in the chain, then it will +/// hold an `UnimportedGenesis` epoch. +pub enum ViableEpoch { + /// Genesis viable epoch data. + Genesis(UnimportedGenesisEpoch), + /// Regular viable epoch data. + Regular(Epoch), +} + +impl From for ViableEpoch { + fn from(epoch: Epoch) -> ViableEpoch { + ViableEpoch::Regular(epoch) + } +} + +impl AsRef for ViableEpoch { + fn as_ref(&self) -> &Epoch { + match *self { + ViableEpoch::Genesis(UnimportedGenesisEpoch(ref e)) => e, + ViableEpoch::Regular(ref e) => e, + } + } +} + +impl ViableEpoch where + Epoch: crate::Epoch + Clone, +{ + /// Extract the underlying epoch, disregarding the fact that a genesis + /// epoch may be unimported. + pub fn into_inner(self) -> Epoch { + match self { + ViableEpoch::Genesis(UnimportedGenesisEpoch(e)) => e, + ViableEpoch::Regular(e) => e, + } + } + + /// Increment the epoch, yielding an `IncrementedEpoch` to be imported + /// into the fork-tree. + pub fn increment( + &self, + next_descriptor: Epoch::NextEpochDescriptor + ) -> IncrementedEpoch { + let next = self.as_ref().increment(next_descriptor); + let to_persist = match *self { + ViableEpoch::Genesis(UnimportedGenesisEpoch(ref epoch_0)) => + PersistedEpoch::Genesis(epoch_0.clone(), next), + ViableEpoch::Regular(_) => PersistedEpoch::Regular(next), + }; + + IncrementedEpoch(to_persist) + } +} + +/// The datatype encoded on disk. +#[derive(Clone, Encode, Decode)] +pub enum PersistedEpoch { + /// Genesis persisted epoch data. epoch_0, epoch_1. + Genesis(Epoch, Epoch), + /// Regular persisted epoch data. epoch_n. + Regular(Epoch), +} + +/// A fresh, incremented epoch to import into the underlying fork-tree. +/// +/// Create this with `ViableEpoch::increment`. +#[must_use = "Freshly-incremented epoch must be imported with `EpochChanges::import`"] +pub struct IncrementedEpoch(PersistedEpoch); + +impl AsRef for IncrementedEpoch { + fn as_ref(&self) -> &Epoch { + match self.0 { + PersistedEpoch::Genesis(_, ref epoch_1) => epoch_1, + PersistedEpoch::Regular(ref epoch_n) => epoch_n, + } + } +} + +/// Tree of all epoch changes across all *seen* forks. Data stored in tree is +/// the hash and block number of the block signaling the epoch change, and the +/// epoch that was signalled at that block. +/// +/// The first epoch, epoch_0, is special cased by saying that it starts at +/// slot number of the first block in the chain. When bootstrapping a chain, +/// there can be multiple competing block #1s, so we have to ensure that the overlayed +/// DAG doesn't get confused. +/// +/// The first block of every epoch should be producing a descriptor for the next +/// epoch - this is checked in higher-level code. So the first block of epoch_0 contains +/// a descriptor for epoch_1. We special-case these and bundle them together in the +/// same DAG entry, pinned to a specific block #1. +/// +/// Further epochs (epoch_2, ..., epoch_n) each get their own entry. +#[derive(Clone, Encode, Decode)] +pub struct EpochChanges { + inner: ForkTree>, +} + +// create a fake header hash which hasn't been included in the chain. +fn fake_head_hash + AsMut<[u8]> + Clone>(parent_hash: &H) -> H { + let mut h = parent_hash.clone(); + // dirty trick: flip the first bit of the parent hash to create a hash + // which has not been in the chain before (assuming a strong hash function). + h.as_mut()[0] ^= 0b10000000; + h +} + +impl EpochChanges where + Hash: PartialEq + AsRef<[u8]> + AsMut<[u8]> + Copy, + Number: Ord + One + Zero + Add + Copy, + Epoch: crate::Epoch + Clone, +{ + /// Create a new epoch-change tracker. + pub fn new() -> Self { + EpochChanges { inner: ForkTree::new() } + } + + /// Rebalances the tree of epoch changes so that it is sorted by length of + /// fork (longest fork first). + pub fn rebalance(&mut self) { + self.inner.rebalance() + } + + /// Prune out finalized epochs, except for the ancestor of the finalized + /// block. The given slot should be the slot number at which the finalized + /// block was authored. + pub fn prune_finalized>( + &mut self, + descendent_of_builder: D, + hash: &Hash, + number: Number, + slot: SlotNumber, + ) -> Result<(), fork_tree::Error> { + let is_descendent_of = descendent_of_builder + .build_is_descendent_of(None); + + let predicate = |epoch: &PersistedEpoch| match *epoch { + PersistedEpoch::Genesis(_, ref epoch_1) => + slot >= epoch_1.end_slot(), + PersistedEpoch::Regular(ref epoch_n) => + slot >= epoch_n.end_slot(), + }; + + // prune any epochs which could not be _live_ as of the children of the + // finalized block, i.e. re-root the fork tree to the oldest ancestor of + // (hash, number) where epoch.end_slot() >= finalized_slot + self.inner.prune( + hash, + &number, + &is_descendent_of, + &predicate, + )?; + + Ok(()) + } + + /// Finds the epoch for a child of the given block, assuming the given slot number. + /// + /// If the returned epoch is an `UnimportedGenesis` epoch, it should be imported into the + /// tree. + pub fn epoch_for_child_of, G>( + &self, + descendent_of_builder: D, + parent_hash: &Hash, + parent_number: Number, + slot_number: SlotNumber, + make_genesis: G, + ) -> Result>, fork_tree::Error> + where G: FnOnce(SlotNumber) -> Epoch + { + // find_node_where will give you the node in the fork-tree which is an ancestor + // of the `parent_hash` by default. if the last epoch was signalled at the parent_hash, + // then it won't be returned. we need to create a new fake chain head hash which + // "descends" from our parent-hash. + let fake_head_hash = fake_head_hash(parent_hash); + + let is_descendent_of = descendent_of_builder + .build_is_descendent_of(Some((fake_head_hash, *parent_hash))); + + if parent_number == Zero::zero() { + // need to insert the genesis epoch. + let genesis_epoch = make_genesis(slot_number); + return Ok(Some(ViableEpoch::Genesis(UnimportedGenesisEpoch(genesis_epoch)))); + } + + // We want to find the deepest node in the tree which is an ancestor + // of our block and where the start slot of the epoch was before the + // slot of our block. The genesis special-case doesn't need to look + // at epoch_1 -- all we're doing here is figuring out which node + // we need. + let predicate = |epoch: &PersistedEpoch| match *epoch { + PersistedEpoch::Genesis(ref epoch_0, _) => + epoch_0.start_slot() <= slot_number, + PersistedEpoch::Regular(ref epoch_n) => + epoch_n.start_slot() <= slot_number, + }; + + self.inner.find_node_where( + &fake_head_hash, + &(parent_number + One::one()), + &is_descendent_of, + &predicate, + ) + .map(|n| n.map(|node| ViableEpoch::Regular(match node.data { + // Ok, we found our node. + // and here we figure out which of the internal epochs + // of a genesis node to use based on their start slot. + PersistedEpoch::Genesis(ref epoch_0, ref epoch_1) => + if epoch_1.start_slot() <= slot_number { + epoch_1.clone() + } else { + epoch_0.clone() + }, + PersistedEpoch::Regular(ref epoch_n) => epoch_n.clone(), + }))) + } + + /// Import a new epoch-change, signalled at the given block. + /// + /// This assumes that the given block is prospective (i.e. has not been + /// imported yet), but its parent has. This is why the parent hash needs + /// to be provided. + pub fn import>( + &mut self, + descendent_of_builder: D, + hash: Hash, + number: Number, + parent_hash: Hash, + epoch: IncrementedEpoch, + ) -> Result<(), fork_tree::Error> { + let is_descendent_of = descendent_of_builder + .build_is_descendent_of(Some((hash, parent_hash))); + + let res = self.inner.import( + hash, + number, + epoch.0, + &is_descendent_of, + ); + + match res { + Ok(_) | Err(fork_tree::Error::Duplicate) => Ok(()), + Err(e) => Err(e), + } + } + + /// Return the inner fork tree, useful for testing purposes. + #[cfg(test)] + pub fn tree(&self) -> &ForkTree { + &self.inner + } +} + +/// Type alias to produce the epoch-changes tree from a block type. +pub type EpochChangesFor = EpochChanges<::Hash, NumberFor, Epoch>; + +/// A shared epoch changes tree. +pub type SharedEpochChanges = Arc>>; diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml index fb9dd479b4a21..28946cedc557a 100644 --- a/client/consensus/sassafras/Cargo.toml +++ b/client/consensus/sassafras/Cargo.toml @@ -20,6 +20,7 @@ sp-runtime = { path = "../../../primitives/runtime" } sp-block-builder = { path = "../../../primitives/block-builder" } sp-inherents = { path = "../../../primitives/inherents" } sp-timestamp = { path = "../../../primitives/timestamp" } +sp-api = { version = "2.0.0", path = "../../../primitives/api" } sc-client = { path = "../../" } sc-client-api = { path = "../../api" } sc-consensus-slots = { path = "../slots" } diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 6f209bae4930c..ad4c6b4ff9364 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -1,6 +1,8 @@ mod aux_schema; -use std::{sync::Arc, marker::PhantomData, time::{Duration, Instant}}; +use std::{ + sync::Arc, marker::PhantomData, time::{Duration, Instant}, collections::HashMap, +}; use log::trace; use codec::Encode; use parking_lot::Mutex; @@ -11,6 +13,7 @@ use sp_inherents::InherentData; use sp_timestamp::{TimestampInherentData, InherentType as TimestampInherent}; use sp_consensus::{ Error as ConsensusError, BlockImportParams, BlockOrigin, ForkChoiceStrategy, + ImportResult, BlockImport, }; use sp_consensus::import_queue::{Verifier, CacheKeyId, BasicQueue}; use sp_consensus_sassafras::{ @@ -21,7 +24,8 @@ use sp_consensus_sassafras::digest::{ }; use sp_consensus_sassafras::inherents::SassafrasInherentData; use sp_runtime::{generic::BlockId, Justification}; -use sp_runtime::traits::{Block as BlockT, Header, ProvideRuntimeApi}; +use sp_runtime::traits::{Block as BlockT, Header}; +use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder as BlockBuilderApi; use sc_client::{Client, CallExecutor}; use sc_client_api::backend::{AuxStore, Backend}; @@ -101,8 +105,6 @@ impl SassafrasVerifier where let hash = header.hash(); let parent_hash = *header.parent_hash(); - let mut auxiliary = aux_schema::load_auxiliary(&parent_hash, self.api.as_ref()) - .map_err(Error::Client)?; let parent_header_metadata = self.client.header_metadata(parent_hash) .map_err(Error::FetchParentHeader)?; @@ -110,14 +112,9 @@ impl SassafrasVerifier where // First, Verify pre-runtime digest. let pre_digest = find_pre_digest::(&header)?; - // Verify that the slot is increasing, and not in the future. - if pre_digest.slot <= auxiliary.slot { - return Err(Error::SlotInPast.into()) - } if pre_digest.slot > slot_now { return Err(Error::SlotInFuture.into()) } - auxiliary.slot = pre_digest.slot; // Check the signature. let (author, block_weight) = auxiliary.validating.authorities @@ -218,6 +215,53 @@ impl Verifier for SassafrasVerifier = BasicQueue; +pub struct SassafrasBlockImport { + inner: I, + client: Arc>, + api: Arc, +} + +impl BlockImport for + SassafrasBlockImport +where + I: BlockImport> + Send + Sync, + I::Error: Into, + B: Backend + 'static, + E: CallExecutor + 'static + Clone + Send + Sync, + Client: AuxStore, + RA: Send + Sync, + PRA: ProvideRuntimeApi + ProvideCache, + PRA::Api: ApiExt, +{ + type Error = ConsensusError; + type Transaction = sp_api::TransactionFor; + + fn import_block( + &mut self, + mut block: BlockImportParams, + new_cache: HashMap>, + ) -> Result { + let hash = block.post_header().hash(); + let parent_hash = *block.header.parent_hash(); + let number = block.header.number().clone(); + + let mut auxiliary = aux_schema::load_auxiliary(&parent_hash, self.api.as_ref()) + .map_err(Error::Client)?; + + let pre_digest = find_pre_digest::(&block.header)?; + + // Verify that the slot is increasing, and not in the future. + if pre_digest.slot <= auxiliary.slot { + return Err(Error::SlotInPast.into()) + } + auxiliary.slot = pre_digest.slot; + + let import_result = self.inner.import_block(block, new_cache); + + import_result.map_err(Into::into) + } +} + #[derive(Default, Clone)] struct TimeSource(Arc, Vec<(Instant, u64)>)>>); diff --git a/utils/fork-tree/src/lib.rs b/utils/fork-tree/src/lib.rs index 1aa085c3da485..e92900be1f73b 100644 --- a/utils/fork-tree/src/lib.rs +++ b/utils/fork-tree/src/lib.rs @@ -200,7 +200,7 @@ impl ForkTree where data, hash: hash, number: number, - children: Vec::new(), + children: Vec::new(), }); self.rebalance(); From 6cb23663ee47ae59dde4d624e66231fd4ad6a89e Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Wed, 22 Jan 2020 11:52:46 +0100 Subject: [PATCH 10/75] Port over tests --- client/consensus/epochs/src/lib.rs | 317 ++++++++++++++++++++++++++++- 1 file changed, 316 insertions(+), 1 deletion(-) diff --git a/client/consensus/epochs/src/lib.rs b/client/consensus/epochs/src/lib.rs index 498764cde87f8..43c5c4e37ef4e 100644 --- a/client/consensus/epochs/src/lib.rs +++ b/client/consensus/epochs/src/lib.rs @@ -320,7 +320,7 @@ impl EpochChanges where /// Return the inner fork tree, useful for testing purposes. #[cfg(test)] - pub fn tree(&self) -> &ForkTree { + pub fn tree(&self) -> &ForkTree> { &self.inner } } @@ -330,3 +330,318 @@ pub type EpochChangesFor = EpochChanges<::Hash, N /// A shared epoch changes tree. pub type SharedEpochChanges = Arc>>; + +#[cfg(test)] +mod tests { + use super::*; + use super::Epoch as EpochT; + + #[derive(Debug, PartialEq)] + pub struct TestError; + + impl std::fmt::Display for TestError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "TestError") + } + } + + impl std::error::Error for TestError {} + + impl<'a, F: 'a , H: 'a + PartialEq + std::fmt::Debug> IsDescendentOfBuilder for &'a F + where F: Fn(&H, &H) -> Result + { + type Error = TestError; + type IsDescendentOf = Box Result + 'a>; + + fn build_is_descendent_of(&self, current: Option<(H, H)>) + -> Self::IsDescendentOf + { + let f = *self; + Box::new(move |base, head| { + let mut head = head; + + if let Some((ref c_head, ref c_parent)) = current { + if head == c_head { + if base == c_parent { + return Ok(true); + } else { + head = c_parent; + } + } + } + + f(base, head) + }) + } + } + + type Hash = [u8; 1]; + + #[derive(Debug, Clone, Eq, PartialEq)] + struct Epoch { + start_slot: SlotNumber, + duration: SlotNumber, + } + + impl EpochT for Epoch { + type NextEpochDescriptor = (); + + fn increment(&self, _: ()) -> Self { + Epoch { + start_slot: self.start_slot + self.duration, + duration: self.duration, + } + } + + fn end_slot(&self) -> SlotNumber { + self.start_slot + self.duration + } + + fn start_slot(&self) -> SlotNumber { + self.start_slot + } + } + + #[test] + fn genesis_epoch_is_created_but_not_imported() { + // + // A - B + // \ + // — C + // + let is_descendent_of = |base: &Hash, block: &Hash| -> Result { + match (base, *block) { + (b"A", b) => Ok(b == *b"B" || b == *b"C" || b == *b"D"), + (b"B", b) | (b"C", b) => Ok(b == *b"D"), + (b"0", _) => Ok(true), + _ => Ok(false), + } + }; + + let make_genesis = |slot| Epoch { + start_slot: slot, + duration: 100, + }; + + let epoch_changes = EpochChanges::new(); + let genesis_epoch = epoch_changes.epoch_for_child_of( + &is_descendent_of, + b"0", + 0, + 10101, + &make_genesis, + ).unwrap().unwrap(); + + match genesis_epoch { + ViableEpoch::Genesis(_) => {}, + _ => panic!("should be unimported genesis"), + }; + assert_eq!(genesis_epoch.as_ref(), &make_genesis(10101)); + + let genesis_epoch_2 = epoch_changes.epoch_for_child_of( + &is_descendent_of, + b"0", + 0, + 10102, + &make_genesis, + ).unwrap().unwrap(); + + match genesis_epoch_2 { + ViableEpoch::Genesis(_) => {}, + _ => panic!("should be unimported genesis"), + }; + assert_eq!(genesis_epoch_2.as_ref(), &make_genesis(10102)); + } + + #[test] + fn epoch_changes_between_blocks() { + // + // A - B + // \ + // — C + // + let is_descendent_of = |base: &Hash, block: &Hash| -> Result { + match (base, *block) { + (b"A", b) => Ok(b == *b"B" || b == *b"C" || b == *b"D"), + (b"B", b) | (b"C", b) => Ok(b == *b"D"), + (b"0", _) => Ok(true), + _ => Ok(false), + } + }; + + let make_genesis = |slot| Epoch { + start_slot: slot, + duration: 100, + }; + + let mut epoch_changes = EpochChanges::new(); + let genesis_epoch = epoch_changes.epoch_for_child_of( + &is_descendent_of, + b"0", + 0, + 100, + &make_genesis, + ).unwrap().unwrap(); + + assert_eq!(genesis_epoch.as_ref(), &make_genesis(100)); + + let import_epoch_1 = genesis_epoch.increment(()); + let epoch_1 = import_epoch_1.as_ref().clone(); + + epoch_changes.import( + &is_descendent_of, + *b"A", + 1, + *b"0", + import_epoch_1, + ).unwrap(); + let genesis_epoch = genesis_epoch.into_inner(); + + assert!(is_descendent_of(b"0", b"A").unwrap()); + + let end_slot = genesis_epoch.end_slot(); + assert_eq!(end_slot, epoch_1.start_slot); + + { + // x is still within the genesis epoch. + let x = epoch_changes.epoch_for_child_of( + &is_descendent_of, + b"A", + 1, + end_slot - 1, + &make_genesis, + ).unwrap().unwrap().into_inner(); + + assert_eq!(x, genesis_epoch); + } + + { + // x is now at the next epoch, because the block is now at the + // start slot of epoch 1. + let x = epoch_changes.epoch_for_child_of( + &is_descendent_of, + b"A", + 1, + end_slot, + &make_genesis, + ).unwrap().unwrap().into_inner(); + + assert_eq!(x, epoch_1); + } + + { + // x is now at the next epoch, because the block is now after + // start slot of epoch 1. + let x = epoch_changes.epoch_for_child_of( + &is_descendent_of, + b"A", + 1, + epoch_1.end_slot() - 1, + &make_genesis, + ).unwrap().unwrap().into_inner(); + + assert_eq!(x, epoch_1); + } + } + + #[test] + fn two_block_ones_dont_conflict() { + // X - Y + // / + // 0 - A - B + // + let is_descendent_of = |base: &Hash, block: &Hash| -> Result { + match (base, *block) { + (b"A", b) => Ok(b == *b"B"), + (b"X", b) => Ok(b == *b"Y"), + (b"0", _) => Ok(true), + _ => Ok(false), + } + }; + + let duration = 100; + + let make_genesis = |slot| Epoch { + start_slot: slot, + duration, + }; + + let mut epoch_changes = EpochChanges::new(); + let next_descriptor = (); + + // insert genesis epoch for A + { + let genesis_epoch_a = epoch_changes.epoch_for_child_of( + &is_descendent_of, + b"0", + 0, + 100, + &make_genesis, + ).unwrap().unwrap(); + + epoch_changes.import( + &is_descendent_of, + *b"A", + 1, + *b"0", + genesis_epoch_a.increment(next_descriptor.clone()), + ).unwrap(); + + } + + // insert genesis epoch for X + { + let genesis_epoch_x = epoch_changes.epoch_for_child_of( + &is_descendent_of, + b"0", + 0, + 1000, + &make_genesis, + ).unwrap().unwrap(); + + epoch_changes.import( + &is_descendent_of, + *b"X", + 1, + *b"0", + genesis_epoch_x.increment(next_descriptor.clone()), + ).unwrap(); + } + + // now check that the genesis epochs for our respective block 1s + // respect the chain structure. + { + let epoch_for_a_child = epoch_changes.epoch_for_child_of( + &is_descendent_of, + b"A", + 1, + 101, + &make_genesis, + ).unwrap().unwrap(); + + assert_eq!(epoch_for_a_child.into_inner(), make_genesis(100)); + + let epoch_for_x_child = epoch_changes.epoch_for_child_of( + &is_descendent_of, + b"X", + 1, + 1001, + &make_genesis, + ).unwrap().unwrap(); + + assert_eq!(epoch_for_x_child.into_inner(), make_genesis(1000)); + + let epoch_for_x_child_before_genesis = epoch_changes.epoch_for_child_of( + &is_descendent_of, + b"X", + 1, + 101, + &make_genesis, + ).unwrap(); + + // even though there is a genesis epoch at that slot, it's not in + // this chain. + assert!(epoch_for_x_child_before_genesis.is_none()); + } + } +} From 73ab9e33428910dae731b5b1641f25563437a8a1 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Wed, 22 Jan 2020 11:53:02 +0100 Subject: [PATCH 11/75] Remove epoch changes from BABE Refactored to a separate crate --- client/consensus/babe/src/epoch_changes.rs | 657 --------------------- 1 file changed, 657 deletions(-) delete mode 100644 client/consensus/babe/src/epoch_changes.rs diff --git a/client/consensus/babe/src/epoch_changes.rs b/client/consensus/babe/src/epoch_changes.rs deleted file mode 100644 index 01e957c4998ed..0000000000000 --- a/client/consensus/babe/src/epoch_changes.rs +++ /dev/null @@ -1,657 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Handling epoch changes in BABE. -//! -//! This exposes the `SharedEpochChanges`, which is a wrapper around a -//! persistent DAG superimposed over the forks of the blockchain. - -use std::sync::Arc; -use sp_consensus_babe::{Epoch, SlotNumber, NextEpochDescriptor}; -use fork_tree::ForkTree; -use parking_lot::{Mutex, MutexGuard}; -use sp_runtime::traits::{Block as BlockT, NumberFor, One, Zero}; -use codec::{Encode, Decode}; -use sc_client_api::utils::is_descendent_of; -use sp_blockchain::{HeaderMetadata, HeaderBackend, Error as ClientError}; -use std::ops::Add; - -/// A builder for `is_descendent_of` functions. -pub trait IsDescendentOfBuilder { - /// The error returned by the function. - type Error: std::error::Error; - /// A function that can tell you if the second parameter is a descendent of - /// the first. - type IsDescendentOf: Fn(&Hash, &Hash) -> Result; - - /// Build an `is_descendent_of` function. - /// - /// The `current` parameter can be `Some` with the details a fresh block whose - /// details aren't yet stored, but its parent is. - /// - /// The format of `current` when `Some` is `(current, current_parent)`. - fn build_is_descendent_of(&self, current: Option<(Hash, Hash)>) - -> Self::IsDescendentOf; -} - -/// Produce a descendent query object given the client. -pub(crate) fn descendent_query(client: &H) -> HeaderBackendDescendentBuilder<&H, Block> { - HeaderBackendDescendentBuilder(client, std::marker::PhantomData) -} - -/// Wrapper to get around unconstrained type errors when implementing -/// `IsDescendentOfBuilder` for header backends. -pub(crate) struct HeaderBackendDescendentBuilder(H, std::marker::PhantomData); - -impl<'a, H, Block> IsDescendentOfBuilder - for HeaderBackendDescendentBuilder<&'a H, Block> where - H: HeaderBackend + HeaderMetadata, - Block: BlockT, -{ - type Error = ClientError; - type IsDescendentOf = Box Result + 'a>; - - fn build_is_descendent_of(&self, current: Option<(Block::Hash, Block::Hash)>) - -> Self::IsDescendentOf - { - Box::new(is_descendent_of(self.0, current)) - } -} - -/// An unimported genesis epoch. -pub struct UnimportedGenesis(Epoch); - -/// The viable epoch under which a block can be verified. -/// -/// If this is the first non-genesis block in the chain, then it will -/// hold an `UnimportedGenesis` epoch. -pub enum ViableEpoch { - Genesis(UnimportedGenesis), - Regular(Epoch), -} - -impl From for ViableEpoch { - fn from(epoch: Epoch) -> ViableEpoch { - ViableEpoch::Regular(epoch) - } -} - -impl AsRef for ViableEpoch { - fn as_ref(&self) -> &Epoch { - match *self { - ViableEpoch::Genesis(UnimportedGenesis(ref e)) => e, - ViableEpoch::Regular(ref e) => e, - } - } -} - -impl ViableEpoch { - /// Extract the underlying epoch, disregarding the fact that a genesis - /// epoch may be unimported. - pub fn into_inner(self) -> Epoch { - match self { - ViableEpoch::Genesis(UnimportedGenesis(e)) => e, - ViableEpoch::Regular(e) => e, - } - } - - /// Increment the epoch, yielding an `IncrementedEpoch` to be imported - /// into the fork-tree. - pub fn increment(&self, next_descriptor: NextEpochDescriptor) -> IncrementedEpoch { - let next = self.as_ref().increment(next_descriptor); - let to_persist = match *self { - ViableEpoch::Genesis(UnimportedGenesis(ref epoch_0)) => - PersistedEpoch::Genesis(epoch_0.clone(), next), - ViableEpoch::Regular(_) => PersistedEpoch::Regular(next), - }; - - IncrementedEpoch(to_persist) - } -} - -/// The datatype encoded on disk. -// This really shouldn't be public, but the encode/decode derives force it to be. -#[derive(Clone, Encode, Decode)] -pub enum PersistedEpoch { - // epoch_0, epoch_1, - Genesis(Epoch, Epoch), - // epoch_n - Regular(Epoch), -} - -/// A fresh, incremented epoch to import into the underlying fork-tree. -/// -/// Create this with `ViableEpoch::increment`. -#[must_use = "Freshly-incremented epoch must be imported with `EpochChanges::import`"] -pub struct IncrementedEpoch(PersistedEpoch); - -impl AsRef for IncrementedEpoch { - fn as_ref(&self) -> &Epoch { - match self.0 { - PersistedEpoch::Genesis(_, ref epoch_1) => epoch_1, - PersistedEpoch::Regular(ref epoch_n) => epoch_n, - } - } -} - -/// Tree of all epoch changes across all *seen* forks. Data stored in tree is -/// the hash and block number of the block signaling the epoch change, and the -/// epoch that was signalled at that block. -/// -/// BABE special-cases the first epoch, epoch_0, by saying that it starts at -/// slot number of the first block in the chain. When bootstrapping a chain, -/// there can be multiple competing block #1s, so we have to ensure that the overlayed -/// DAG doesn't get confused. -/// -/// The first block of every epoch should be producing a descriptor for the next -/// epoch - this is checked in higher-level code. So the first block of epoch_0 contains -/// a descriptor for epoch_1. We special-case these and bundle them together in the -/// same DAG entry, pinned to a specific block #1. -/// -/// Further epochs (epoch_2, ..., epoch_n) each get their own entry. -#[derive(Clone, Encode, Decode)] -pub struct EpochChanges { - inner: ForkTree, -} - -// create a fake header hash which hasn't been included in the chain. -fn fake_head_hash + AsMut<[u8]> + Clone>(parent_hash: &H) -> H { - let mut h = parent_hash.clone(); - // dirty trick: flip the first bit of the parent hash to create a hash - // which has not been in the chain before (assuming a strong hash function). - h.as_mut()[0] ^= 0b10000000; - h -} - -impl EpochChanges where - Hash: PartialEq + AsRef<[u8]> + AsMut<[u8]> + Copy, - Number: Ord + One + Zero + Add + Copy, -{ - /// Create a new epoch-change tracker. - fn new() -> Self { - EpochChanges { inner: ForkTree::new() } - } - - /// Rebalances the tree of epoch changes so that it is sorted by length of - /// fork (longest fork first). - pub fn rebalance(&mut self) { - self.inner.rebalance() - } - - /// Prune out finalized epochs, except for the ancestor of the finalized - /// block. The given slot should be the slot number at which the finalized - /// block was authored. - pub fn prune_finalized>( - &mut self, - descendent_of_builder: D, - hash: &Hash, - number: Number, - slot: SlotNumber, - ) -> Result<(), fork_tree::Error> { - let is_descendent_of = descendent_of_builder - .build_is_descendent_of(None); - - let predicate = |epoch: &PersistedEpoch| match *epoch { - PersistedEpoch::Genesis(_, ref epoch_1) => - slot >= epoch_1.end_slot(), - PersistedEpoch::Regular(ref epoch_n) => - slot >= epoch_n.end_slot(), - }; - - // prune any epochs which could not be _live_ as of the children of the - // finalized block, i.e. re-root the fork tree to the oldest ancestor of - // (hash, number) where epoch.end_slot() >= finalized_slot - self.inner.prune( - hash, - &number, - &is_descendent_of, - &predicate, - )?; - - Ok(()) - } - - /// Finds the epoch for a child of the given block, assuming the given slot number. - /// - /// If the returned epoch is an `UnimportedGenesis` epoch, it should be imported into the - /// tree. - pub fn epoch_for_child_of, G>( - &self, - descendent_of_builder: D, - parent_hash: &Hash, - parent_number: Number, - slot_number: SlotNumber, - make_genesis: G, - ) -> Result, fork_tree::Error> - where G: FnOnce(SlotNumber) -> Epoch - { - // find_node_where will give you the node in the fork-tree which is an ancestor - // of the `parent_hash` by default. if the last epoch was signalled at the parent_hash, - // then it won't be returned. we need to create a new fake chain head hash which - // "descends" from our parent-hash. - let fake_head_hash = fake_head_hash(parent_hash); - - let is_descendent_of = descendent_of_builder - .build_is_descendent_of(Some((fake_head_hash, *parent_hash))); - - if parent_number == Zero::zero() { - // need to insert the genesis epoch. - let genesis_epoch = make_genesis(slot_number); - return Ok(Some(ViableEpoch::Genesis(UnimportedGenesis(genesis_epoch)))); - } - - // We want to find the deepest node in the tree which is an ancestor - // of our block and where the start slot of the epoch was before the - // slot of our block. The genesis special-case doesn't need to look - // at epoch_1 -- all we're doing here is figuring out which node - // we need. - let predicate = |epoch: &PersistedEpoch| match *epoch { - PersistedEpoch::Genesis(ref epoch_0, _) => - epoch_0.start_slot <= slot_number, - PersistedEpoch::Regular(ref epoch_n) => - epoch_n.start_slot <= slot_number, - }; - - self.inner.find_node_where( - &fake_head_hash, - &(parent_number + One::one()), - &is_descendent_of, - &predicate, - ) - .map(|n| n.map(|node| ViableEpoch::Regular(match node.data { - // Ok, we found our node. - // and here we figure out which of the internal epochs - // of a genesis node to use based on their start slot. - PersistedEpoch::Genesis(ref epoch_0, ref epoch_1) => - if epoch_1.start_slot <= slot_number { - epoch_1.clone() - } else { - epoch_0.clone() - }, - PersistedEpoch::Regular(ref epoch_n) => epoch_n.clone(), - }))) - } - - /// Import a new epoch-change, signalled at the given block. - /// - /// This assumes that the given block is prospective (i.e. has not been - /// imported yet), but its parent has. This is why the parent hash needs - /// to be provided. - pub fn import>( - &mut self, - descendent_of_builder: D, - hash: Hash, - number: Number, - parent_hash: Hash, - epoch: IncrementedEpoch, - ) -> Result<(), fork_tree::Error> { - let is_descendent_of = descendent_of_builder - .build_is_descendent_of(Some((hash, parent_hash))); - - let res = self.inner.import( - hash, - number, - epoch.0, - &is_descendent_of, - ); - - match res { - Ok(_) | Err(fork_tree::Error::Duplicate) => Ok(()), - Err(e) => Err(e), - } - } - - /// Return the inner fork tree, useful for testing purposes. - #[cfg(test)] - pub fn tree(&self) -> &ForkTree { - &self.inner - } -} - -/// Type alias to produce the epoch-changes tree from a block type. -pub type EpochChangesFor = EpochChanges<::Hash, NumberFor>; - -/// A shared epoch changes tree. -#[derive(Clone)] -pub struct SharedEpochChanges { - inner: Arc>>, -} - -impl SharedEpochChanges { - /// Create a new instance of the `SharedEpochChanges`. - pub fn new() -> Self { - SharedEpochChanges { - inner: Arc::new(Mutex::new(EpochChanges::<_, _>::new())) - } - } - - /// Lock the shared epoch changes, - pub fn lock(&self) -> MutexGuard> { - self.inner.lock() - } -} - -impl From> for SharedEpochChanges { - fn from(epoch_changes: EpochChangesFor) -> Self { - SharedEpochChanges { - inner: Arc::new(Mutex::new(epoch_changes)) - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[derive(Debug, PartialEq)] - pub struct TestError; - - impl std::fmt::Display for TestError { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "TestError") - } - } - - impl std::error::Error for TestError {} - - impl<'a, F: 'a , H: 'a + PartialEq + std::fmt::Debug> IsDescendentOfBuilder for &'a F - where F: Fn(&H, &H) -> Result - { - type Error = TestError; - type IsDescendentOf = Box Result + 'a>; - - fn build_is_descendent_of(&self, current: Option<(H, H)>) - -> Self::IsDescendentOf - { - let f = *self; - Box::new(move |base, head| { - let mut head = head; - - if let Some((ref c_head, ref c_parent)) = current { - if head == c_head { - if base == c_parent { - return Ok(true); - } else { - head = c_parent; - } - } - } - - f(base, head) - }) - } - } - - type Hash = [u8; 1]; - - #[test] - fn genesis_epoch_is_created_but_not_imported() { - // - // A - B - // \ - // — C - // - let is_descendent_of = |base: &Hash, block: &Hash| -> Result { - match (base, *block) { - (b"A", b) => Ok(b == *b"B" || b == *b"C" || b == *b"D"), - (b"B", b) | (b"C", b) => Ok(b == *b"D"), - (b"0", _) => Ok(true), - _ => Ok(false), - } - }; - - let make_genesis = |slot| Epoch { - epoch_index: 0, - start_slot: slot, - duration: 100, - authorities: Vec::new(), - randomness: [0; 32], - }; - - let epoch_changes = EpochChanges::new(); - let genesis_epoch = epoch_changes.epoch_for_child_of( - &is_descendent_of, - b"0", - 0, - 10101, - &make_genesis, - ).unwrap().unwrap(); - - match genesis_epoch { - ViableEpoch::Genesis(_) => {}, - _ => panic!("should be unimported genesis"), - }; - assert_eq!(genesis_epoch.as_ref(), &make_genesis(10101)); - - let genesis_epoch_2 = epoch_changes.epoch_for_child_of( - &is_descendent_of, - b"0", - 0, - 10102, - &make_genesis, - ).unwrap().unwrap(); - - match genesis_epoch_2 { - ViableEpoch::Genesis(_) => {}, - _ => panic!("should be unimported genesis"), - }; - assert_eq!(genesis_epoch_2.as_ref(), &make_genesis(10102)); - } - - #[test] - fn epoch_changes_between_blocks() { - // - // A - B - // \ - // — C - // - let is_descendent_of = |base: &Hash, block: &Hash| -> Result { - match (base, *block) { - (b"A", b) => Ok(b == *b"B" || b == *b"C" || b == *b"D"), - (b"B", b) | (b"C", b) => Ok(b == *b"D"), - (b"0", _) => Ok(true), - _ => Ok(false), - } - }; - - let make_genesis = |slot| Epoch { - epoch_index: 0, - start_slot: slot, - duration: 100, - authorities: Vec::new(), - randomness: [0; 32], - }; - - let mut epoch_changes = EpochChanges::new(); - let genesis_epoch = epoch_changes.epoch_for_child_of( - &is_descendent_of, - b"0", - 0, - 100, - &make_genesis, - ).unwrap().unwrap(); - - assert_eq!(genesis_epoch.as_ref(), &make_genesis(100)); - - let import_epoch_1 = genesis_epoch.increment(NextEpochDescriptor { - authorities: Vec::new(), - randomness: [1; 32], - }); - let epoch_1 = import_epoch_1.as_ref().clone(); - - epoch_changes.import( - &is_descendent_of, - *b"A", - 1, - *b"0", - import_epoch_1, - ).unwrap(); - let genesis_epoch = genesis_epoch.into_inner(); - - assert!(is_descendent_of(b"0", b"A").unwrap()); - - let end_slot = genesis_epoch.end_slot(); - assert_eq!(end_slot, epoch_1.start_slot); - - { - // x is still within the genesis epoch. - let x = epoch_changes.epoch_for_child_of( - &is_descendent_of, - b"A", - 1, - end_slot - 1, - &make_genesis, - ).unwrap().unwrap().into_inner(); - - assert_eq!(x, genesis_epoch); - } - - { - // x is now at the next epoch, because the block is now at the - // start slot of epoch 1. - let x = epoch_changes.epoch_for_child_of( - &is_descendent_of, - b"A", - 1, - end_slot, - &make_genesis, - ).unwrap().unwrap().into_inner(); - - assert_eq!(x, epoch_1); - } - - { - // x is now at the next epoch, because the block is now after - // start slot of epoch 1. - let x = epoch_changes.epoch_for_child_of( - &is_descendent_of, - b"A", - 1, - epoch_1.end_slot() - 1, - &make_genesis, - ).unwrap().unwrap().into_inner(); - - assert_eq!(x, epoch_1); - } - } - - #[test] - fn two_block_ones_dont_conflict() { - // X - Y - // / - // 0 - A - B - // - let is_descendent_of = |base: &Hash, block: &Hash| -> Result { - match (base, *block) { - (b"A", b) => Ok(b == *b"B"), - (b"X", b) => Ok(b == *b"Y"), - (b"0", _) => Ok(true), - _ => Ok(false), - } - }; - - let duration = 100; - - let make_genesis = |slot| Epoch { - epoch_index: 0, - start_slot: slot, - duration, - authorities: Vec::new(), - randomness: [0; 32], - }; - - let mut epoch_changes = EpochChanges::new(); - let next_descriptor = NextEpochDescriptor { - authorities: Vec::new(), - randomness: [0; 32], - }; - - // insert genesis epoch for A - { - let genesis_epoch_a = epoch_changes.epoch_for_child_of( - &is_descendent_of, - b"0", - 0, - 100, - &make_genesis, - ).unwrap().unwrap(); - - epoch_changes.import( - &is_descendent_of, - *b"A", - 1, - *b"0", - genesis_epoch_a.increment(next_descriptor.clone()), - ).unwrap(); - - } - - // insert genesis epoch for X - { - let genesis_epoch_x = epoch_changes.epoch_for_child_of( - &is_descendent_of, - b"0", - 0, - 1000, - &make_genesis, - ).unwrap().unwrap(); - - epoch_changes.import( - &is_descendent_of, - *b"X", - 1, - *b"0", - genesis_epoch_x.increment(next_descriptor.clone()), - ).unwrap(); - } - - // now check that the genesis epochs for our respective block 1s - // respect the chain structure. - { - let epoch_for_a_child = epoch_changes.epoch_for_child_of( - &is_descendent_of, - b"A", - 1, - 101, - &make_genesis, - ).unwrap().unwrap(); - - assert_eq!(epoch_for_a_child.into_inner(), make_genesis(100)); - - let epoch_for_x_child = epoch_changes.epoch_for_child_of( - &is_descendent_of, - b"X", - 1, - 1001, - &make_genesis, - ).unwrap().unwrap(); - - assert_eq!(epoch_for_x_child.into_inner(), make_genesis(1000)); - - let epoch_for_x_child_before_genesis = epoch_changes.epoch_for_child_of( - &is_descendent_of, - b"X", - 1, - 101, - &make_genesis, - ).unwrap(); - - // even though there is a genesis epoch at that slot, it's not in - // this chain. - assert!(epoch_for_x_child_before_genesis.is_none()); - } - } -} From 50a845c063d9803c2ad349e6734d884c49e440ae Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Wed, 22 Jan 2020 11:56:30 +0100 Subject: [PATCH 12/75] Add consensus-epochs to babe crate --- Cargo.lock | 1 + client/consensus/babe/Cargo.toml | 1 + 2 files changed, 2 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index 5cbd3e6fbac1d..218fb03ed25d7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5286,6 +5286,7 @@ dependencies = [ "sc-block-builder 0.8.0", "sc-client 0.8.0", "sc-client-api 2.0.0", + "sc-consensus-epochs 0.8.0", "sc-consensus-slots 0.8.0", "sc-consensus-uncles 0.8.0", "sc-executor 0.8.0", diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index 55b0e0cf20217..77cec6001e8fc 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -21,6 +21,7 @@ sc-telemetry = { version = "2.0.0", path = "../../telemetry" } sc-keystore = { version = "2.0.0", path = "../../keystore" } sc-client-api = { version = "2.0.0", path = "../../api" } sc-client = { version = "0.8", path = "../../" } +sc-consensus-epochs = { version = "0.8", path = "../epochs" } sp-api = { version = "2.0.0", path = "../../../primitives/api" } sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } From 02ea2a5ec85958eb8925dd8bb8864015102a6fa2 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Wed, 22 Jan 2020 12:21:44 +0100 Subject: [PATCH 13/75] Make Babe use the refactored EpochChanges struct --- client/consensus/babe/src/authorship.rs | 13 +++-- client/consensus/babe/src/aux_schema.rs | 16 +++--- client/consensus/babe/src/lib.rs | 68 +++++++++++++++++------ client/consensus/babe/src/verification.rs | 10 ++-- client/consensus/epochs/src/lib.rs | 14 +++-- primitives/consensus/babe/src/digest.rs | 28 +++++----- primitives/consensus/babe/src/lib.rs | 38 +------------ 7 files changed, 97 insertions(+), 90 deletions(-) diff --git a/client/consensus/babe/src/authorship.rs b/client/consensus/babe/src/authorship.rs index 62667ef3978c0..e38f891f88799 100644 --- a/client/consensus/babe/src/authorship.rs +++ b/client/consensus/babe/src/authorship.rs @@ -18,12 +18,13 @@ use merlin::Transcript; use sp_consensus_babe::{AuthorityId, BabeAuthorityWeight, BABE_ENGINE_ID, BABE_VRF_PREFIX}; -use sp_consensus_babe::{Epoch, SlotNumber, AuthorityPair, BabePreDigest, BabeConfiguration}; +use sp_consensus_babe::{SlotNumber, AuthorityPair, PreDigest, BabeConfiguration}; use sp_core::{U256, blake2_256}; use codec::Encode; use schnorrkel::vrf::VRFInOut; use sp_core::Pair; use sc_keystore::KeyStorePtr; +use super::Epoch; /// Calculates the primary selection threshold for a given authority, taking /// into account `c` (`1 - c` represents the probability of a slot being empty). @@ -104,7 +105,7 @@ fn claim_secondary_slot( authorities: &[(AuthorityId, BabeAuthorityWeight)], keystore: &KeyStorePtr, randomness: [u8; 32], -) -> Option<(BabePreDigest, AuthorityPair)> { +) -> Option<(PreDigest, AuthorityPair)> { if authorities.is_empty() { return None; } @@ -124,7 +125,7 @@ fn claim_secondary_slot( }) { if pair.public() == *expected_author { - let pre_digest = BabePreDigest::Secondary { + let pre_digest = PreDigest::Secondary { slot_number, authority_index: authority_index as u32, }; @@ -145,7 +146,7 @@ pub(super) fn claim_slot( epoch: &Epoch, config: &BabeConfiguration, keystore: &KeyStorePtr, -) -> Option<(BabePreDigest, AuthorityPair)> { +) -> Option<(PreDigest, AuthorityPair)> { claim_primary_slot(slot_number, epoch, config.c, keystore) .or_else(|| { if config.secondary_slots { @@ -175,7 +176,7 @@ fn claim_primary_slot( epoch: &Epoch, c: (u64, u64), keystore: &KeyStorePtr, -) -> Option<(BabePreDigest, AuthorityPair)> { +) -> Option<(PreDigest, AuthorityPair)> { let Epoch { authorities, randomness, epoch_index, .. } = epoch; let keystore = keystore.read(); @@ -196,7 +197,7 @@ fn claim_primary_slot( let pre_digest = get_keypair(&pair) .vrf_sign_after_check(transcript, |inout| super::authorship::check_primary_threshold(inout, threshold)) .map(|s| { - BabePreDigest::Primary { + PreDigest::Primary { slot_number, vrf_output: s.0.to_output(), vrf_proof: s.1, diff --git a/client/consensus/babe/src/aux_schema.rs b/client/consensus/babe/src/aux_schema.rs index 170c2bf42d4e0..2f64157f22951 100644 --- a/client/consensus/babe/src/aux_schema.rs +++ b/client/consensus/babe/src/aux_schema.rs @@ -16,6 +16,8 @@ //! Schema for BABE epoch changes in the aux-db. +use std::sync::Arc; +use parking_lot::Mutex; use log::info; use codec::{Decode, Encode}; @@ -23,8 +25,8 @@ use sc_client_api::backend::AuxStore; use sp_blockchain::{Result as ClientResult, Error as ClientError}; use sp_runtime::traits::Block as BlockT; use sp_consensus_babe::BabeBlockWeight; - -use super::{epoch_changes::EpochChangesFor, SharedEpochChanges}; +use sc_consensus_epochs::{EpochChangesFor, SharedEpochChanges}; +use crate::Epoch; const BABE_EPOCH_CHANGES: &[u8] = b"babe_epoch_changes"; @@ -49,14 +51,14 @@ fn load_decode(backend: &B, key: &[u8]) -> ClientResult> /// Load or initialize persistent epoch change data from backend. pub(crate) fn load_epoch_changes( backend: &B, -) -> ClientResult> { - let epoch_changes = load_decode::<_, EpochChangesFor>(backend, BABE_EPOCH_CHANGES)? - .map(Into::into) +) -> ClientResult> { + let epoch_changes = load_decode::<_, EpochChangesFor>(backend, BABE_EPOCH_CHANGES)? + .map(|v| Arc::new(Mutex::new(v))) .unwrap_or_else(|| { info!(target: "babe", "Creating empty BABE epoch changes on what appears to be first startup." ); - SharedEpochChanges::new() + SharedEpochChanges::::default() }); // rebalance the tree after deserialization. this isn't strictly necessary @@ -70,7 +72,7 @@ pub(crate) fn load_epoch_changes( /// Update the epoch changes on disk after a change. pub(crate) fn write_epoch_changes( - epoch_changes: &EpochChangesFor, + epoch_changes: &EpochChangesFor, write_aux: F, ) -> R where F: FnOnce(&[(&'static [u8], &[u8])]) -> R, diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index deb1f8c266efb..4a1e3089e2c04 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -59,8 +59,8 @@ #![forbid(unsafe_code)] #![warn(missing_docs)] pub use sp_consensus_babe::{ - BabeApi, ConsensusLog, BABE_ENGINE_ID, BabePreDigest, SlotNumber, BabeConfiguration, - CompatibleDigestItem, + BabeApi, ConsensusLog, BABE_ENGINE_ID, PreDigest, SlotNumber, BabeConfiguration, + CompatibleDigestItem, BabeAuthorityWeight, VRF_OUTPUT_LENGTH, }; pub use sp_consensus::SyncOracle; use std::{collections::HashMap, sync::Arc, u64, pin::Pin, time::{Instant, Duration}}; @@ -101,26 +101,60 @@ use log::{warn, debug, info, trace}; use sc_consensus_slots::{ SlotWorker, SlotInfo, SlotCompatible, StorageChanges, CheckedHeader, check_equivocation, }; -use epoch_changes::descendent_query; +use sc_consensus_epochs::{descendent_query, SharedEpochChanges, EpochChangesFor, Epoch as EpochT}; use sp_blockchain::{ Result as ClientResult, Error as ClientError, HeaderBackend, ProvideCache, HeaderMetadata }; use schnorrkel::SignatureError; - +use codec::{Encode, Decode}; use sp_api::ApiExt; mod aux_schema; mod verification; -mod epoch_changes; mod authorship; #[cfg(test)] mod tests; pub use sp_consensus_babe::{ - AuthorityId, AuthorityPair, AuthoritySignature, Epoch, NextEpochDescriptor, + AuthorityId, AuthorityPair, AuthoritySignature, NextEpochDescriptor, }; -pub use epoch_changes::{EpochChanges, EpochChangesFor, SharedEpochChanges}; +/// BABE epoch information +#[derive(Decode, Encode, Default, PartialEq, Eq, Clone, Debug)] +pub struct Epoch { + /// The epoch index + pub epoch_index: u64, + /// The starting slot of the epoch, + pub start_slot: SlotNumber, + /// The duration of this epoch + pub duration: SlotNumber, + /// The authorities and their weights + pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, + /// Randomness for this epoch + pub randomness: [u8; VRF_OUTPUT_LENGTH], +} + +impl EpochT for Epoch { + type NextEpochDescriptor = NextEpochDescriptor; + + fn increment(&self, descriptor: NextEpochDescriptor) -> Epoch { + Epoch { + epoch_index: self.epoch_index + 1, + start_slot: self.start_slot + self.duration, + duration: self.duration, + authorities: descriptor.authorities, + randomness: descriptor.randomness, + } + } + + fn start_slot(&self) -> SlotNumber { + self.start_slot + } + + fn end_slot(&self) -> SlotNumber { + self.start_slot + self.duration + } +} #[derive(derive_more::Display, Debug)] enum Error { @@ -343,7 +377,7 @@ struct BabeWorker { sync_oracle: SO, force_authoring: bool, keystore: KeyStorePtr, - epoch_changes: SharedEpochChanges, + epoch_changes: SharedEpochChanges, config: Config, } @@ -361,7 +395,7 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeWork Error: std::error::Error + Send + From + From + 'static, { type EpochData = Epoch; - type Claim = (BabePreDigest, AuthorityPair); + type Claim = (PreDigest, AuthorityPair); type SyncOracle = SO; type CreateProposer = Pin> + Send + 'static @@ -535,12 +569,12 @@ impl SlotWorker for BabeWorker where /// Extract the BABE pre digest from the given header. Pre-runtime digests are /// mandatory, the function will return `Err` if none is found. -fn find_pre_digest(header: &B::Header) -> Result> +fn find_pre_digest(header: &B::Header) -> Result> { // genesis block doesn't contain a pre digest so let's generate a // dummy one to not break any invariants in the rest of the code if header.number().is_zero() { - return Ok(BabePreDigest::Secondary { + return Ok(PreDigest::Secondary { slot_number: 0, authority_index: 0, }); @@ -599,7 +633,7 @@ impl SlotCompatible for TimeSource { #[derive(Clone)] pub struct BabeLink { time_source: TimeSource, - epoch_changes: SharedEpochChanges, + epoch_changes: SharedEpochChanges, config: Config, } /// A verifier for Babe blocks. @@ -608,7 +642,7 @@ pub struct BabeVerifier { api: Arc, inherent_data_providers: sp_inherents::InherentDataProviders, config: Config, - epoch_changes: SharedEpochChanges, + epoch_changes: SharedEpochChanges, time_source: TimeSource, } @@ -859,7 +893,7 @@ pub struct BabeBlockImport { inner: I, client: Arc>, api: Arc, - epoch_changes: SharedEpochChanges, + epoch_changes: SharedEpochChanges, config: Config, } @@ -879,7 +913,7 @@ impl BabeBlockImport { fn new( client: Arc>, api: Arc, - epoch_changes: SharedEpochChanges, + epoch_changes: SharedEpochChanges, block_import: I, config: Config, ) -> Self { @@ -1118,7 +1152,7 @@ impl BlockImport for BabeBlockImport( client: &Client, - epoch_changes: &mut EpochChangesFor, + epoch_changes: &mut EpochChangesFor, ) -> Result<(), ConsensusError> where Block: BlockT, E: CallExecutor + Send + Sync, @@ -1165,7 +1199,7 @@ pub fn block_import( RA: Send + Sync, Client: AuxStore, { - let epoch_changes = aux_schema::load_epoch_changes(&*client)?; + let epoch_changes = aux_schema::load_epoch_changes::(&*client)?; let link = BabeLink { epoch_changes: epoch_changes.clone(), time_source: Default::default(), diff --git a/client/consensus/babe/src/verification.rs b/client/consensus/babe/src/verification.rs index ee5a99ec9d533..45dd31acbb8ca 100644 --- a/client/consensus/babe/src/verification.rs +++ b/client/consensus/babe/src/verification.rs @@ -18,11 +18,11 @@ use schnorrkel::vrf::{VRFOutput, VRFProof}; use sp_runtime::{traits::Header, traits::DigestItemFor}; use sp_core::{Pair, Public}; -use sp_consensus_babe::{Epoch, BabePreDigest, CompatibleDigestItem, AuthorityId}; +use sp_consensus_babe::{PreDigest, CompatibleDigestItem, AuthorityId}; use sp_consensus_babe::{AuthoritySignature, SlotNumber, AuthorityIndex, AuthorityPair}; use sc_consensus_slots::CheckedHeader; use log::{debug, trace}; -use super::{find_pre_digest, babe_err, BlockT, Error}; +use super::{find_pre_digest, babe_err, Epoch, BlockT, Error}; use super::authorship::{make_transcript, calculate_primary_threshold, check_primary_threshold, secondary_slot_author}; /// BABE verification parameters @@ -32,7 +32,7 @@ pub(super) struct VerificationParams<'a, B: 'a + BlockT> { /// the pre-digest of the header being verified. this is optional - if prior /// verification code had to read it, it can be included here to avoid duplicate /// work. - pub(super) pre_digest: Option, + pub(super) pre_digest: Option, /// the slot number of the current time. pub(super) slot_now: SlotNumber, /// epoch descriptor of the epoch this block _should_ be under, if it's valid. @@ -93,7 +93,7 @@ pub(super) fn check_header( }; match &pre_digest { - BabePreDigest::Primary { vrf_output, vrf_proof, authority_index, slot_number } => { + PreDigest::Primary { vrf_output, vrf_proof, authority_index, slot_number } => { debug!(target: "babe", "Verifying Primary block"); let digest = (vrf_output, vrf_proof, *authority_index, *slot_number); @@ -106,7 +106,7 @@ pub(super) fn check_header( config.c, )?; }, - BabePreDigest::Secondary { authority_index, slot_number } if config.secondary_slots => { + PreDigest::Secondary { authority_index, slot_number } if config.secondary_slots => { debug!(target: "babe", "Verifying Secondary block"); let digest = (*authority_index, *slot_number); diff --git a/client/consensus/epochs/src/lib.rs b/client/consensus/epochs/src/lib.rs index 43c5c4e37ef4e..8031662e3d9e6 100644 --- a/client/consensus/epochs/src/lib.rs +++ b/client/consensus/epochs/src/lib.rs @@ -179,16 +179,20 @@ fn fake_head_hash + AsMut<[u8]> + Clone>(parent_hash: &H) -> H { h } +impl Default for EpochChanges where + Hash: PartialEq, + Number: Ord, +{ + fn default() -> Self { + EpochChanges { inner: ForkTree::new() } + } +} + impl EpochChanges where Hash: PartialEq + AsRef<[u8]> + AsMut<[u8]> + Copy, Number: Ord + One + Zero + Add + Copy, Epoch: crate::Epoch + Clone, { - /// Create a new epoch-change tracker. - pub fn new() -> Self { - EpochChanges { inner: ForkTree::new() } - } - /// Rebalances the tree of epoch changes so that it is sorted by length of /// fork (longest fork first). pub fn rebalance(&mut self) { diff --git a/primitives/consensus/babe/src/digest.rs b/primitives/consensus/babe/src/digest.rs index 8ea066699082b..7ec0f9b977cc7 100644 --- a/primitives/consensus/babe/src/digest.rs +++ b/primitives/consensus/babe/src/digest.rs @@ -67,16 +67,16 @@ impl PreDigest { /// Returns the slot number of the pre digest. pub fn authority_index(&self) -> AuthorityIndex { match self { - BabePreDigest::Primary { authority_index, .. } => *authority_index, - BabePreDigest::Secondary { authority_index, .. } => *authority_index, + PreDigest::Primary { authority_index, .. } => *authority_index, + PreDigest::Secondary { authority_index, .. } => *authority_index, } } /// Returns the slot number of the pre digest. pub fn slot_number(&self) -> SlotNumber { match self { - BabePreDigest::Primary { slot_number, .. } => *slot_number, - BabePreDigest::Secondary { slot_number, .. } => *slot_number, + PreDigest::Primary { slot_number, .. } => *slot_number, + PreDigest::Secondary { slot_number, .. } => *slot_number, } } @@ -84,8 +84,8 @@ impl PreDigest { /// of the chain. pub fn added_weight(&self) -> crate::BabeBlockWeight { match self { - BabePreDigest::Primary { .. } => 1, - BabePreDigest::Secondary { .. } => 0, + PreDigest::Primary { .. } => 1, + PreDigest::Secondary { .. } => 0, } } } @@ -124,8 +124,8 @@ impl RawPreDigest { /// Returns the slot number of the pre digest. pub fn slot_number(&self) -> SlotNumber { match self { - RawBabePreDigest::Primary { slot_number, .. } => *slot_number, - RawBabePreDigest::Secondary { slot_number, .. } => *slot_number, + RawPreDigest::Primary { slot_number, .. } => *slot_number, + RawPreDigest::Secondary { slot_number, .. } => *slot_number, } } } @@ -169,20 +169,20 @@ impl codec::EncodeLike for PreDigest {} impl Decode for PreDigest { fn decode(i: &mut R) -> Result { let pre_digest = match Decode::decode(i)? { - RawBabePreDigest::Primary { vrf_output, vrf_proof, authority_index, slot_number } => { + RawPreDigest::Primary { vrf_output, vrf_proof, authority_index, slot_number } => { // Verify (at compile time) that the sizes in babe_primitives are correct let _: [u8; super::VRF_OUTPUT_LENGTH] = vrf_output; let _: [u8; super::VRF_PROOF_LENGTH] = vrf_proof; - BabePreDigest::Primary { + PreDigest::Primary { vrf_proof: VRFProof::from_bytes(&vrf_proof).map_err(convert_error)?, vrf_output: VRFOutput::from_bytes(&vrf_output).map_err(convert_error)?, authority_index, slot_number, } }, - RawBabePreDigest::Secondary { authority_index, slot_number } => { - BabePreDigest::Secondary { authority_index, slot_number } + RawPreDigest::Secondary { authority_index, slot_number } => { + PreDigest::Secondary { authority_index, slot_number } }, }; @@ -224,11 +224,11 @@ pub trait CompatibleDigestItem: Sized { impl CompatibleDigestItem for DigestItem where Hash: Debug + Send + Sync + Eq + Clone + Codec + 'static { - fn babe_pre_digest(digest: BabePreDigest) -> Self { + fn babe_pre_digest(digest: PreDigest) -> Self { DigestItem::PreRuntime(BABE_ENGINE_ID, digest.encode()) } - fn as_babe_pre_digest(&self) -> Option { + fn as_babe_pre_digest(&self) -> Option { self.try_to(OpaqueDigestItemId::PreRuntime(&BABE_ENGINE_ID)) } diff --git a/primitives/consensus/babe/src/lib.rs b/primitives/consensus/babe/src/lib.rs index 8cbd9acda3d07..d67aebacab815 100644 --- a/primitives/consensus/babe/src/lib.rs +++ b/primitives/consensus/babe/src/lib.rs @@ -27,8 +27,8 @@ use sp_std::vec::Vec; use sp_runtime::{ConsensusEngineId, RuntimeDebug}; #[cfg(feature = "std")] -pub use digest::{BabePreDigest, CompatibleDigestItem}; -pub use digest::{BABE_VRF_PREFIX, RawBabePreDigest, NextEpochDescriptor}; +pub use digest::{PreDigest, CompatibleDigestItem}; +pub use digest::{RawPreDigest, NextEpochDescriptor}; mod app { use sp_application_crypto::{app_crypto, key_types::BABE, sr25519}; @@ -81,40 +81,6 @@ pub type BabeAuthorityWeight = u64; /// The weight of a BABE block. pub type BabeBlockWeight = u32; -/// BABE epoch information -#[derive(Decode, Encode, Default, PartialEq, Eq, Clone, RuntimeDebug)] -pub struct Epoch { - /// The epoch index - pub epoch_index: u64, - /// The starting slot of the epoch, - pub start_slot: SlotNumber, - /// The duration of this epoch - pub duration: SlotNumber, - /// The authorities and their weights - pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, - /// Randomness for this epoch - pub randomness: [u8; VRF_OUTPUT_LENGTH], -} - -impl Epoch { - /// "increment" the epoch, with given descriptor for the next. - pub fn increment(&self, descriptor: NextEpochDescriptor) -> Epoch { - Epoch { - epoch_index: self.epoch_index + 1, - start_slot: self.start_slot + self.duration, - duration: self.duration, - authorities: descriptor.authorities, - randomness: descriptor.randomness, - } - } - - /// Produce the "end slot" of the epoch. This is NOT inclusive to the epoch, - // i.e. the slots covered by the epoch are `self.start_slot .. self.end_slot()`. - pub fn end_slot(&self) -> SlotNumber { - self.start_slot + self.duration - } -} - /// An consensus log item for BABE. #[derive(Decode, Encode, Clone, PartialEq, Eq)] pub enum ConsensusLog { From f978bbe195561ed21e4f419b04b854fdf2de164c Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Thu, 30 Jan 2020 14:04:11 +0100 Subject: [PATCH 14/75] [WIP] Mutable fork tree items --- client/consensus/sassafras/src/aux_schema.rs | 18 ---- client/consensus/sassafras/src/lib.rs | 52 +++++++++++ utils/fork-tree/src/lib.rs | 93 +++++++++++++++++++- 3 files changed, 141 insertions(+), 22 deletions(-) diff --git a/client/consensus/sassafras/src/aux_schema.rs b/client/consensus/sassafras/src/aux_schema.rs index 0708e419ce229..c09cfe3c3453e 100644 --- a/client/consensus/sassafras/src/aux_schema.rs +++ b/client/consensus/sassafras/src/aux_schema.rs @@ -7,24 +7,6 @@ use sp_consensus_sassafras::{ use sp_blockchain::{Result as ClientResult, Error as ClientError}; use sc_client_api::AuxStore; -#[derive(Clone, Debug, Encode, Decode, Default)] -pub struct PoolAuxiliary { - pub proofs: Vec, - pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, - pub randomness: Randomness, - pub epoch: EpochNumber, -} - -#[derive(Clone, Debug, Encode, Decode, Default)] -pub struct Auxiliary { - pub total_weight: SassafrasBlockWeight, - pub weight: SassafrasBlockWeight, - pub slot: SlotNumber, - - pub publishing: PoolAuxiliary, - pub validating: PoolAuxiliary, -} - pub const AUXILIARY_KEY: &[u8] = b"sassafras_auxiliary"; fn load_decode(backend: &B, key: &[u8]) -> ClientResult> diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index ad4c6b4ff9364..6e07c80d4567b 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -32,6 +32,58 @@ use sc_client_api::backend::{AuxStore, Backend}; use sc_consensus_slots::SlotCompatible; use crate::aux_schema::{AUXILIARY_KEY, PoolAuxiliary}; +/// Validator set of a particular epoch, can be either publishing or validating. +pub struct ValidatorSet { + /// Proofs of all VRFs collected. + pub proofs: Vec, + /// The authorities and their weights. + pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, + /// Randomness for this epoch. + pub randomness: Randomness, +} + +/// Epoch data for Sassafras +pub struct Epoch { + /// Start slot of the epoch. + pub start_slot: SlotNumber, + /// Duration of this epoch. + pub duration: SlotNumber, + /// Epoch index. + pub epoch_index: u64, + + /// Publishing validator set. The set will start validating block in the next epoch. + pub publishing: ValidatorSet, + /// Validating validator set. The set validates block in the current epoch. + pub validating: ValidatorSet, +} + +impl EpochT for Epoch { + type NextEpochDescriptor = NextEpochDescriptor; + + fn increment(&self, descriptor: NextEpochDescriptor) -> Epoch { + Epoch { + epoch_index: self.epoch_index + 1, + start_slot: self.start_slot + self.duration, + duration: self.duration, + + validating: self.publishing.clone(), + publishing: ValidatorSet { + proofs: Vec::new(), + authorities: descriptor.authorities, + randomness: descriptor.randomness, + }, + } + } + + fn start_slot(&self) -> SlotNumber { + self.start_slot + } + + fn end_slot(&self) -> SlotNumber { + self.start_slot + self.duration + } +} + #[derive(derive_more::Display, Debug)] enum Error { #[display(fmt = "Could not extract timestamp and slot: {:?}", _0)] diff --git a/utils/fork-tree/src/lib.rs b/utils/fork-tree/src/lib.rs index e92900be1f73b..a372cd63f76a8 100644 --- a/utils/fork-tree/src/lib.rs +++ b/utils/fork-tree/src/lib.rs @@ -232,10 +232,10 @@ impl ForkTree where number: &N, is_descendent_of: &F, predicate: &P, - ) -> Result>, Error> - where E: std::error::Error, - F: Fn(&H, &H) -> Result, - P: Fn(&V) -> bool, + ) -> Result>, Error> where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, { // search for node starting from all roots for root in self.roots.iter() { @@ -250,6 +250,31 @@ impl ForkTree where Ok(None) } + /// Same as `find_node_where`, but returns mutable reference. + pub fn find_node_mut_where( + &mut self, + hash: &H, + number: &N, + is_descendent_of: &F, + predicate: &P, + ) -> Result>, Error> where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, + { + // search for node starting from all roots + for root in self.roots.iter_mut() { + let node = root.find_node_mut_where(hash, number, is_descendent_of, predicate)?; + + // found the node, early exit + if let FindOutcome::Found(node) = node { + return Ok(Some(node)); + } + } + + Ok(None) + } + /// Finalize a root in the tree and return it, return `None` in case no root /// with the given hash exists. All other roots are pruned, and the children /// of the finalized node become the new roots. @@ -663,6 +688,66 @@ mod node_implementation { // the block was a descendent. Ok(FindOutcome::Failure(is_descendent_of)) } + + /// Find a node in the tree that is the deepest ancestor of the given + /// block hash which also passes the given predicate, backtracking + /// when the predicate fails. + /// The given function `is_descendent_of` should return `true` if the second hash (target) + /// is a descendent of the first hash (base). + // FIXME: it would be useful if this returned a mutable reference but + // rustc can't deal with lifetimes properly. an option would be to try + // an iterative definition instead. + pub fn find_node_mut_where( + &mut self, + hash: &H, + number: &N, + is_descendent_of: &F, + predicate: &P, + ) -> Result>, Error> + where E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, + { + // stop searching this branch + if *number < self.number { + return Ok(FindOutcome::Failure(false)); + } + + let mut known_descendent_of = false; + + // continue depth-first search through all children + for node in self.children.iter_mut() { + // found node, early exit + match node.find_node_mut_where(hash, number, is_descendent_of, predicate)? { + FindOutcome::Abort => return Ok(FindOutcome::Abort), + FindOutcome::Found(x) => return Ok(FindOutcome::Found(x)), + FindOutcome::Failure(true) => { + // if the block was a descendent of this child, + // then it cannot be a descendent of any others, + // so we don't search them. + known_descendent_of = true; + break; + }, + FindOutcome::Failure(false) => {}, + } + } + + // node not found in any of the descendents, if the node we're + // searching for is a descendent of this node then we will stop the + // search here, since there aren't any more children and we found + // the correct node so we don't want to backtrack. + let is_descendent_of = known_descendent_of || is_descendent_of(&self.hash, hash)?; + if is_descendent_of { + // if the predicate passes we return the node + if predicate(&self.data) { + return Ok(FindOutcome::Found(self)); + } + } + + // otherwise, tell our ancestor that we failed, and whether + // the block was a descendent. + Ok(FindOutcome::Failure(is_descendent_of)) + } } } From 7239f6ff0552f57b24d48c8ee83fe1aa4c79cb84 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Fri, 31 Jan 2020 07:35:13 +0100 Subject: [PATCH 15/75] Working impl of find_node_mut via indexing method --- utils/fork-tree/src/lib.rs | 104 ++++++++++++++++++++----------------- 1 file changed, 56 insertions(+), 48 deletions(-) diff --git a/utils/fork-tree/src/lib.rs b/utils/fork-tree/src/lib.rs index a372cd63f76a8..93789f5b8c635 100644 --- a/utils/fork-tree/src/lib.rs +++ b/utils/fork-tree/src/lib.rs @@ -634,16 +634,17 @@ mod node_implementation { /// when the predicate fails. /// The given function `is_descendent_of` should return `true` if the second hash (target) /// is a descendent of the first hash (base). - // FIXME: it would be useful if this returned a mutable reference but - // rustc can't deal with lifetimes properly. an option would be to try - // an iterative definition instead. - pub fn find_node_where( + /// + /// The returned indexes are from last to first, meaning the last is the least significant + /// child, and the first is the most significant child. An empty list means that the + /// current node is the result. + pub fn find_node_index_where( &self, hash: &H, number: &N, is_descendent_of: &F, predicate: &P, - ) -> Result>, Error> + ) -> Result>, Error> where E: std::error::Error, F: Fn(&H, &H) -> Result, P: Fn(&V) -> bool, @@ -656,11 +657,14 @@ mod node_implementation { let mut known_descendent_of = false; // continue depth-first search through all children - for node in self.children.iter() { + for (i, node) in self.children.iter().enumerate() { // found node, early exit - match node.find_node_where(hash, number, is_descendent_of, predicate)? { + match node.find_node_index_where(hash, number, is_descendent_of, predicate)? { FindOutcome::Abort => return Ok(FindOutcome::Abort), - FindOutcome::Found(x) => return Ok(FindOutcome::Found(x)), + FindOutcome::Found(mut x) => { + x.push(i); + return Ok(FindOutcome::Found(x)) + }, FindOutcome::Failure(true) => { // if the block was a descendent of this child, // then it cannot be a descendent of any others, @@ -680,7 +684,7 @@ mod node_implementation { if is_descendent_of { // if the predicate passes we return the node if predicate(&self.data) { - return Ok(FindOutcome::Found(self)); + return Ok(FindOutcome::Found(Vec::new())); } } @@ -694,9 +698,38 @@ mod node_implementation { /// when the predicate fails. /// The given function `is_descendent_of` should return `true` if the second hash (target) /// is a descendent of the first hash (base). - // FIXME: it would be useful if this returned a mutable reference but - // rustc can't deal with lifetimes properly. an option would be to try - // an iterative definition instead. + pub fn find_node_where( + &self, + hash: &H, + number: &N, + is_descendent_of: &F, + predicate: &P, + ) -> Result>, Error> + where E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, + { + let outcome = self.find_node_index_where(hash, number, is_descendent_of, predicate)?; + + match outcome { + FindOutcome::Abort => Ok(FindOutcome::Abort), + FindOutcome::Failure(f) => Ok(FindOutcome::Failure(f)), + FindOutcome::Found(mut indexes) => { + let mut cur = self; + + while let Some(i) = indexes.pop() { + cur = &cur.children[i]; + } + Ok(FindOutcome::Found(cur)) + }, + } + } + + /// Find a node in the tree that is the deepest ancestor of the given + /// block hash which also passes the given predicate, backtracking + /// when the predicate fails. + /// The given function `is_descendent_of` should return `true` if the second hash (target) + /// is a descendent of the first hash (base). pub fn find_node_mut_where( &mut self, hash: &H, @@ -708,45 +741,20 @@ mod node_implementation { F: Fn(&H, &H) -> Result, P: Fn(&V) -> bool, { - // stop searching this branch - if *number < self.number { - return Ok(FindOutcome::Failure(false)); - } + let outcome = self.find_node_index_where(hash, number, is_descendent_of, predicate)?; - let mut known_descendent_of = false; + match outcome { + FindOutcome::Abort => Ok(FindOutcome::Abort), + FindOutcome::Failure(f) => Ok(FindOutcome::Failure(f)), + FindOutcome::Found(mut indexes) => { + let mut cur = self; - // continue depth-first search through all children - for node in self.children.iter_mut() { - // found node, early exit - match node.find_node_mut_where(hash, number, is_descendent_of, predicate)? { - FindOutcome::Abort => return Ok(FindOutcome::Abort), - FindOutcome::Found(x) => return Ok(FindOutcome::Found(x)), - FindOutcome::Failure(true) => { - // if the block was a descendent of this child, - // then it cannot be a descendent of any others, - // so we don't search them. - known_descendent_of = true; - break; - }, - FindOutcome::Failure(false) => {}, - } - } - - // node not found in any of the descendents, if the node we're - // searching for is a descendent of this node then we will stop the - // search here, since there aren't any more children and we found - // the correct node so we don't want to backtrack. - let is_descendent_of = known_descendent_of || is_descendent_of(&self.hash, hash)?; - if is_descendent_of { - // if the predicate passes we return the node - if predicate(&self.data) { - return Ok(FindOutcome::Found(self)); - } + while let Some(i) = indexes.pop() { + cur = &mut cur.children[i]; + } + Ok(FindOutcome::Found(cur)) + }, } - - // otherwise, tell our ancestor that we failed, and whether - // the block was a descendent. - Ok(FindOutcome::Failure(is_descendent_of)) } } } From 85236e93b2cd02512c9909d248fdf7f56b77d8cf Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Sat, 1 Feb 2020 08:12:16 +0100 Subject: [PATCH 16/75] aux-schema update --- Cargo.lock | 1 + client/consensus/sassafras/Cargo.toml | 3 +- client/consensus/sassafras/src/aux_schema.rs | 46 ++++++++++++++------ client/consensus/sassafras/src/lib.rs | 20 +++++---- primitives/consensus/sassafras/src/lib.rs | 1 - 5 files changed, 47 insertions(+), 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 218fb03ed25d7..be9bf10f29fa2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5357,6 +5357,7 @@ dependencies = [ "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "sc-client 0.8.0", "sc-client-api 2.0.0", + "sc-consensus-epochs 0.8.0", "sc-consensus-slots 0.8.0", "schnorrkel 0.8.5 (registry+https://github.com/rust-lang/crates.io-index)", "sp-api 2.0.0", diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml index 28946cedc557a..3b15ebb9920ba 100644 --- a/client/consensus/sassafras/Cargo.toml +++ b/client/consensus/sassafras/Cargo.toml @@ -23,4 +23,5 @@ sp-timestamp = { path = "../../../primitives/timestamp" } sp-api = { version = "2.0.0", path = "../../../primitives/api" } sc-client = { path = "../../" } sc-client-api = { path = "../../api" } -sc-consensus-slots = { path = "../slots" } +sc-consensus-slots = { path = "../slots" } +sc-consensus-epochs = { path = "../epochs" } diff --git a/client/consensus/sassafras/src/aux_schema.rs b/client/consensus/sassafras/src/aux_schema.rs index c09cfe3c3453e..3549d77edd836 100644 --- a/client/consensus/sassafras/src/aux_schema.rs +++ b/client/consensus/sassafras/src/aux_schema.rs @@ -1,13 +1,19 @@ +use std::sync::Arc; +use parking_lot::Mutex; use codec::{Encode, Decode}; +use log::info; use sp_core::H256; use sp_consensus_sassafras::{ EpochNumber, SlotNumber, SassafrasBlockWeight, SassafrasAuthorityWeight, VRFProof, Randomness, AuthorityId }; +use sp_runtime::traits::Block as BlockT; use sp_blockchain::{Result as ClientResult, Error as ClientError}; use sc_client_api::AuxStore; +use sc_consensus_epochs::{SharedEpochChanges, EpochChangesFor}; +use crate::Epoch; -pub const AUXILIARY_KEY: &[u8] = b"sassafras_auxiliary"; +const SASSAFRAS_EPOCH_CHANGES: &[u8] = b"sassafras_epoch_changes"; fn load_decode(backend: &B, key: &[u8]) -> ClientResult> where @@ -23,25 +29,39 @@ fn load_decode(backend: &B, key: &[u8]) -> ClientResult> } } -pub(crate) fn load_auxiliary( - hash: &H256, - backend: &B -) -> ClientResult { - let auxiliary = load_decode::<_, Auxiliary>(backend, AUXILIARY_KEY)? - .map(Into::into) - .unwrap_or_default(); +/// Load or initialize persistent epoch change data from backend. +pub(crate) fn load_epoch_changes( + backend: &B, +) -> ClientResult> { + let epoch_changes = load_decode::<_, EpochChangesFor>( + backend, SASSAFRAS_EPOCH_CHANGES + )? + .map(|v| Arc::new(Mutex::new(v))) + .unwrap_or_else(|| { + info!(target: "sassafras", + "Creating empty Sassafras epoch changes on what appears to be first startup." + ); + SharedEpochChanges::::default() + }); - Ok(auxiliary) + // rebalance the tree after deserialization. this isn't strictly necessary + // since the tree is now rebalanced on every update operation. but since the + // tree wasn't rebalanced initially it's useful to temporarily leave it here + // to avoid having to wait until an import for rebalancing. + epoch_changes.lock().rebalance(); + + Ok(epoch_changes) } -pub(crate) fn write_auxiliary( - auxiliary: &Auxiliary, +/// Update the epoch changes on disk after a change. +pub(crate) fn write_epoch_changes( + epoch_changes: &EpochChangesFor, write_aux: F, ) -> R where F: FnOnce(&[(&'static [u8], &[u8])]) -> R, { - let encoded_auxiliary = auxiliary.encode(); + let encoded_epoch_changes = epoch_changes.encode(); write_aux( - &[(AUXILIARY_KEY, encoded_auxiliary.as_slice())], + &[(SASSAFRAS_EPOCH_CHANGES, encoded_epoch_changes.as_slice())], ) } diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 6e07c80d4567b..2f0eb4342f930 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -17,7 +17,8 @@ use sp_consensus::{ }; use sp_consensus::import_queue::{Verifier, CacheKeyId, BasicQueue}; use sp_consensus_sassafras::{ - SASSAFRAS_ENGINE_ID, AuthorityPair, AuthorityId, Randomness, + SASSAFRAS_ENGINE_ID, AuthorityPair, AuthorityId, Randomness, VRFProof, + SassafrasAuthorityWeight, }; use sp_consensus_sassafras::digest::{ NextEpochDescriptor, PostBlockDescriptor, PreDigest, CompatibleDigestItem @@ -29,8 +30,9 @@ use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder as BlockBuilderApi; use sc_client::{Client, CallExecutor}; use sc_client_api::backend::{AuxStore, Backend}; +use sc_consensus_epochs::{SlotNumber, Epoch as EpochT}; use sc_consensus_slots::SlotCompatible; -use crate::aux_schema::{AUXILIARY_KEY, PoolAuxiliary}; +use crate::aux_schema::{load_epoch_changes, write_epoch_changes}; /// Validator set of a particular epoch, can be either publishing or validating. pub struct ValidatorSet { @@ -125,10 +127,10 @@ pub struct SassafrasVerifier { impl SassafrasVerifier where Block: BlockT, - B: Backend + 'static, - E: CallExecutor + 'static + Clone + Send + Sync, + B: Backend + 'static, + E: CallExecutor + 'static + Clone + Send + Sync, RA: Send + Sync, - PRA: ProvideRuntimeApi + Send + Sync + AuxStore + ProvideCache, + PRA: ProvideRuntimeApi + Send + Sync + AuxStore + ProvideCache, PRA::Api: BlockBuilderApi, { fn verify( @@ -248,10 +250,10 @@ impl SassafrasVerifier where impl Verifier for SassafrasVerifier where Block: BlockT, - B: Backend + 'static, - E: CallExecutor + 'static + Clone + Send + Sync, + B: Backend + 'static, + E: CallExecutor + 'static + Clone + Send + Sync, RA: Send + Sync, - PRA: ProvideRuntimeApi + Send + Sync + AuxStore + ProvideCache, + PRA: ProvideRuntimeApi + Send + Sync + AuxStore + ProvideCache, PRA::Api: BlockBuilderApi, { fn verify( @@ -265,7 +267,7 @@ impl Verifier for SassafrasVerifier = BasicQueue; +pub type SassafrasImportQueue = BasicQueue; pub struct SassafrasBlockImport { inner: I, diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index 5d13f37a0a82a..b71206be36cf0 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -29,7 +29,6 @@ pub use crate::vrf::{ RawVRFProof, VRFProof, Randomness, }; -use core::ops::{Deref, DerefMut}; use sp_runtime::ConsensusEngineId; mod app { From a2a36213613b1e934a7b973d80824e052247229a Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Fri, 7 Feb 2020 12:01:32 +0100 Subject: [PATCH 17/75] Update cargo locks --- Cargo.lock | 54 +++++++++++++++++++++++++++--------------------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a544fa102d412..405c98b0b3b94 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5902,25 +5902,25 @@ dependencies = [ name = "sc-consensus-sassafras" version = "2.0.0" dependencies = [ - "derive_more 0.99.2 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", - "merlin 1.3.0 (registry+https://github.com/rust-lang/crates.io-index)", - "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "parking_lot 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", - "sc-client 0.8.0", - "sc-client-api 2.0.0", - "sc-consensus-epochs 0.8.0", - "sc-consensus-slots 0.8.0", - "schnorrkel 0.8.5 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-api 2.0.0", - "sp-block-builder 2.0.0", - "sp-blockchain 2.0.0", - "sp-consensus 0.8.0", - "sp-consensus-sassafras 2.0.0", - "sp-core 2.0.0", - "sp-inherents 2.0.0", - "sp-runtime 2.0.0", - "sp-timestamp 2.0.0", + "derive_more", + "log 0.4.8", + "merlin", + "parity-scale-codec", + "parking_lot 0.9.0", + "sc-client", + "sc-client-api", + "sc-consensus-epochs", + "sc-consensus-slots", + "schnorrkel", + "sp-api", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-consensus-sassafras", + "sp-core", + "sp-inherents", + "sp-runtime", + "sp-timestamp", ] [[package]] @@ -7016,14 +7016,14 @@ dependencies = [ name = "sp-consensus-sassafras" version = "2.0.0" dependencies = [ - "parity-scale-codec 1.1.2 (registry+https://github.com/rust-lang/crates.io-index)", - "schnorrkel 0.8.5 (registry+https://github.com/rust-lang/crates.io-index)", - "sp-application-crypto 2.0.0", - "sp-core 2.0.0", - "sp-inherents 2.0.0", - "sp-runtime 2.0.0", - "sp-std 2.0.0", - "sp-timestamp 2.0.0", + "parity-scale-codec", + "schnorrkel", + "sp-application-crypto", + "sp-core", + "sp-inherents", + "sp-runtime", + "sp-std", + "sp-timestamp", ] [[package]] From 4467dc4bfb96320e76471487557e47fc457ce480 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Fri, 7 Feb 2020 16:38:12 +0100 Subject: [PATCH 18/75] Switch verifier to use epoch_changes --- client/consensus/epochs/src/lib.rs | 61 ++++++++++++++++++++ client/consensus/sassafras/src/lib.rs | 70 +++++++++++++++-------- primitives/consensus/sassafras/src/lib.rs | 7 ++- 3 files changed, 110 insertions(+), 28 deletions(-) diff --git a/client/consensus/epochs/src/lib.rs b/client/consensus/epochs/src/lib.rs index cf3d9f5c4c2c2..cac201976a922 100644 --- a/client/consensus/epochs/src/lib.rs +++ b/client/consensus/epochs/src/lib.rs @@ -313,6 +313,67 @@ impl EpochChanges where }))) } + /// Finds the epoch for a child of the given block, assuming the given slot number. + /// + /// If the returned epoch is an `UnimportedGenesis` epoch, it should be imported into the + /// tree. + pub fn epoch_for_child_of_mut, G>( + &mut self, + descendent_of_builder: D, + parent_hash: &Hash, + parent_number: Number, + slot_number: Epoch::SlotNumber, + make_genesis: G, + ) -> Result>, fork_tree::Error> + where G: FnOnce(Epoch::SlotNumber) -> Epoch + { + // find_node_where will give you the node in the fork-tree which is an ancestor + // of the `parent_hash` by default. if the last epoch was signalled at the parent_hash, + // then it won't be returned. we need to create a new fake chain head hash which + // "descends" from our parent-hash. + let fake_head_hash = fake_head_hash(parent_hash); + + let is_descendent_of = descendent_of_builder + .build_is_descendent_of(Some((fake_head_hash, *parent_hash))); + + if parent_number == Zero::zero() { + // need to insert the genesis epoch. + let genesis_epoch = make_genesis(slot_number); + return Ok(Some(ViableEpoch::Genesis(UnimportedGenesisEpoch(genesis_epoch)))); + } + + // We want to find the deepest node in the tree which is an ancestor + // of our block and where the start slot of the epoch was before the + // slot of our block. The genesis special-case doesn't need to look + // at epoch_1 -- all we're doing here is figuring out which node + // we need. + let predicate = |epoch: &PersistedEpoch| match *epoch { + PersistedEpoch::Genesis(ref epoch_0, _) => + epoch_0.start_slot() <= slot_number, + PersistedEpoch::Regular(ref epoch_n) => + epoch_n.start_slot() <= slot_number, + }; + + self.inner.find_node_where_mut( + &fake_head_hash, + &(parent_number + One::one()), + &is_descendent_of, + &predicate, + ) + .map(|n| n.map(|node| ViableEpoch::Regular(match node.data { + // Ok, we found our node. + // and here we figure out which of the internal epochs + // of a genesis node to use based on their start slot. + PersistedEpoch::Genesis(ref epoch_0, ref epoch_1) => + if epoch_1.start_slot() <= slot_number { + epoch_1.clone() + } else { + epoch_0.clone() + }, + PersistedEpoch::Regular(ref epoch_n) => epoch_n.clone(), + }))) + } + /// Import a new epoch-change, signalled at the given block. /// /// This assumes that the given block is prospective (i.e. has not been diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 2f0eb4342f930..93ab62a86fc78 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -18,7 +18,7 @@ use sp_consensus::{ use sp_consensus::import_queue::{Verifier, CacheKeyId, BasicQueue}; use sp_consensus_sassafras::{ SASSAFRAS_ENGINE_ID, AuthorityPair, AuthorityId, Randomness, VRFProof, - SassafrasAuthorityWeight, + SassafrasAuthorityWeight, SlotNumber, }; use sp_consensus_sassafras::digest::{ NextEpochDescriptor, PostBlockDescriptor, PreDigest, CompatibleDigestItem @@ -26,11 +26,11 @@ use sp_consensus_sassafras::digest::{ use sp_consensus_sassafras::inherents::SassafrasInherentData; use sp_runtime::{generic::BlockId, Justification}; use sp_runtime::traits::{Block as BlockT, Header}; -use sp_api::ProvideRuntimeApi; +use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_block_builder::BlockBuilder as BlockBuilderApi; use sc_client::{Client, CallExecutor}; use sc_client_api::backend::{AuxStore, Backend}; -use sc_consensus_epochs::{SlotNumber, Epoch as EpochT}; +use sc_consensus_epochs::{descendent_query, Epoch as EpochT, SharedEpochChanges}; use sc_consensus_slots::SlotCompatible; use crate::aux_schema::{load_epoch_changes, write_epoch_changes}; @@ -60,6 +60,7 @@ pub struct Epoch { } impl EpochT for Epoch { + type SlotNumber = SlotNumber; type NextEpochDescriptor = NextEpochDescriptor; fn increment(&self, descriptor: NextEpochDescriptor) -> Epoch { @@ -121,6 +122,7 @@ impl std::convert::From> for String { pub struct SassafrasVerifier { client: Arc>, api: Arc, + epoch_changes: SharedEpochChanges, inherent_data_providers: sp_inherents::InherentDataProviders, time_source: TimeSource, } @@ -139,7 +141,7 @@ impl SassafrasVerifier where mut header: Block::Header, justification: Option, mut body: Option>, - ) -> Result<(BlockImportParams, Option)>>), Error> { + ) -> Result<(BlockImportParams, Option)>>), Error> { trace!( target: "sassafras", "Verifying origin: {:?} header: {:?} justification: {:?} body: {:?}", @@ -170,8 +172,19 @@ impl SassafrasVerifier where return Err(Error::SlotInFuture.into()) } + let epoch_changes = self.epoch_changes.lock(); + let epoch = { + epoch_changes.epoch_for_child_of_mut( + descendent_query(&*self.client), + &parent_hash, + parent_header_metadata.number, + unimplemented!(), // TODO + |slot| unimplemented!() // TODO + ) + }; + // Check the signature. - let (author, block_weight) = auxiliary.validating.authorities + let (author, block_weight) = epoch.validating.authorities .get(pre_digest.authority_index as usize) .cloned() .ok_or(Error::InvalidAuthorityId)?; @@ -184,16 +197,16 @@ impl SassafrasVerifier where } // Check that the ticket VRF is of a valid index in auxiliary.validating. - let ticket_vrf_proof = auxiliary.validating.proofs + let ticket_vrf_proof = epoch.validating.proofs .get(pre_digest.ticket_vrf_index as usize) .cloned() .ok_or(Error::InvalidTicketVRFIndex)?; // Check that the ticket VRF is valid. let ticket_transcript = make_ticket_transcript( - &auxiliary.validating.randomness, + &epoch.validating.randomness, pre_digest.slot, - auxiliary.validating.epoch, + epoch.validating.epoch, ); schnorrkel::PublicKey::from_bytes(author.as_slice()).and_then(|p| { p.vrf_verify(ticket_transcript, &pre_digest.ticket_vrf_output, &ticket_vrf_proof) @@ -201,9 +214,9 @@ impl SassafrasVerifier where // Check that the post-block VRF is valid. let post_transcript = make_post_transcript( - &auxiliary.validating.randomness, + &epoch.validating.randomness, pre_digest.slot, - auxiliary.validating.epoch, + epoch.validating.epoch, ); schnorrkel::PublicKey::from_bytes(author.as_slice()).and_then(|p| { p.vrf_verify(post_transcript, &pre_digest.post_vrf_output, &pre_digest.post_vrf_proof) @@ -213,37 +226,44 @@ impl SassafrasVerifier where if let Some(post_block_desc) = find_post_block_descriptor::(&header)? { // TODO: verify that proofs are below threshold. - auxiliary.publishing.proofs.append(&mut post_block_desc.commitments.clone()); + epoch.publishing.proofs.append(&mut post_block_desc.commitments.clone()); } // Finally, if we are switching epoch, move publishing to validating, and sort the proofs. if let Some(next_epoch_desc) = find_next_epoch_descriptor::(&header)? { // TODO: check descriptor validity. - std::mem::swap(&mut auxiliary.publishing, &mut auxiliary.validating); - auxiliary.publishing = PoolAuxiliary { + std::mem::swap(&mut epoch.publishing, &mut epoch.validating); + epoch.publishing = ValidatorSet { proofs: Vec::new(), authorities: next_epoch_desc.authorities, randomness: next_epoch_desc.randomness, - epoch: auxiliary.validating.epoch + 1, + epoch: epoch.validating.epoch + 1, }; // TODO: sort the validating proofs in "outside-in" order. } - let block_import_params = BlockImportParams { + let mut block_import_params = BlockImportParams { origin, header, post_digests: vec![seal], body, finalized: false, justification, - auxiliary: vec![(AUXILIARY_KEY.to_vec(), Some(auxiliary.encode()))], + auxiliary: vec![], fork_choice: ForkChoiceStrategy::LongestChain, allow_missing_state: false, import_existing: false, }; + crate::aux_schema::write_epoch_changes::( + &*epoch_changes, + |insert| block_import_params.auxiliary.extend( + insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) + ) + ); + Ok((block_import_params, Default::default())) } } @@ -262,7 +282,7 @@ impl Verifier for SassafrasVerifier, mut body: Option>, - ) -> Result<(BlockImportParams, Option)>>), String> { + ) -> Result<(BlockImportParams, Option)>>), String> { self.verify(origin, header, justification, body).map_err(Into::into) } } @@ -381,10 +401,10 @@ fn make_ticket_transcript( epoch: u64, ) -> Transcript { let mut transcript = Transcript::new(&SASSAFRAS_ENGINE_ID); - transcript.commit_bytes(b"type", b"ticket"); - transcript.commit_bytes(b"slot number", &slot_number.to_le_bytes()); - transcript.commit_bytes(b"current epoch", &epoch.to_le_bytes()); - transcript.commit_bytes(b"chain randomness", randomness); + transcript.append_message(b"type", b"ticket"); + transcript.append_message(b"slot number", &slot_number.to_le_bytes()); + transcript.append_message(b"current epoch", &epoch.to_le_bytes()); + transcript.append_message(b"chain randomness", randomness); transcript } @@ -394,9 +414,9 @@ fn make_post_transcript( epoch: u64, ) -> Transcript { let mut transcript = Transcript::new(&SASSAFRAS_ENGINE_ID); - transcript.commit_bytes(b"type", b"post"); - transcript.commit_bytes(b"slot number", &slot_number.to_le_bytes()); - transcript.commit_bytes(b"current epoch", &epoch.to_le_bytes()); - transcript.commit_bytes(b"chain randomness", randomness); + transcript.append_message(b"type", b"post"); + transcript.append_message(b"slot number", &slot_number.to_le_bytes()); + transcript.append_message(b"current epoch", &epoch.to_le_bytes()); + transcript.append_message(b"chain randomness", randomness); transcript } diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index b71206be36cf0..8b2becbf05c38 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -42,6 +42,10 @@ pub const SASSAFRAS_TICKET_VRF_PREFIX: &[u8] = b"substrate-sassafras-ticket-vrf" /// The prefix used by Sassafras for its post-block VRF keys. pub const SASSAFRAS_POST_VRF_PREFIX: &[u8] = b"substrate-sassafras-post-vrf"; +/// A slot number. +pub type SlotNumber = u64; + +/// An epoch number. pub type EpochNumber = u64; /// A Sassafras authority keypair, used by both ticket VRF and post-block VRF. @@ -63,9 +67,6 @@ pub type VRFIndex = u32; /// The index of an authority. pub type AuthorityIndex = u32; -/// A slot number. -pub type SlotNumber = u64; - /// The weight of an authority. // NOTE: we use a unique name for the weight to avoid conflicts with other // `Weight` types, since the metadata isn't able to disambiguate. From f8515ce1b98bb8da01f7f16aabebbd90396a9883 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Fri, 7 Feb 2020 16:56:05 +0100 Subject: [PATCH 19/75] Make epoch_changes impl compile --- Cargo.lock | 2 +- client/consensus/epochs/src/lib.rs | 9 ++++++ client/consensus/sassafras/Cargo.toml | 2 +- client/consensus/sassafras/src/lib.rs | 42 +++++++++++++++------------ 4 files changed, 35 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 405c98b0b3b94..5448802ab0797 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5906,7 +5906,7 @@ dependencies = [ "log 0.4.8", "merlin", "parity-scale-codec", - "parking_lot 0.9.0", + "parking_lot 0.10.0", "sc-client", "sc-client-api", "sc-consensus-epochs", diff --git a/client/consensus/epochs/src/lib.rs b/client/consensus/epochs/src/lib.rs index cac201976a922..57126e53c3540 100644 --- a/client/consensus/epochs/src/lib.rs +++ b/client/consensus/epochs/src/lib.rs @@ -112,6 +112,15 @@ impl AsRef for ViableEpoch { } } +impl AsMut for ViableEpoch { + fn as_mut(&mut self) -> &mut Epoch { + match *self { + ViableEpoch::Genesis(UnimportedGenesisEpoch(ref mut e)) => e, + ViableEpoch::Regular(ref mut e) => e, + } + } +} + impl ViableEpoch where Epoch: crate::Epoch + Clone, { diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml index 3b15ebb9920ba..88194e7c2c043 100644 --- a/client/consensus/sassafras/Cargo.toml +++ b/client/consensus/sassafras/Cargo.toml @@ -10,7 +10,7 @@ codec = { package = "parity-scale-codec", version = "1.0.0", features = ["derive log = "0.4.8" schnorrkel = { version = "0.8.5", features = ["preaudit_deprecated"] } derive_more = "0.99.2" -parking_lot = "0.9.0" +parking_lot = "0.10.0" merlin = "1.2.1" sp-core = { path = "../../../primitives/core" } sp-blockchain = { path = "../../../primitives/blockchain" } diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 93ab62a86fc78..17487428e706b 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -4,7 +4,7 @@ use std::{ sync::Arc, marker::PhantomData, time::{Duration, Instant}, collections::HashMap, }; use log::trace; -use codec::Encode; +use codec::{Encode, Decode}; use parking_lot::Mutex; use merlin::Transcript; use sp_core::{Blake2Hasher, H256, crypto::{Pair, Public}}; @@ -13,7 +13,7 @@ use sp_inherents::InherentData; use sp_timestamp::{TimestampInherentData, InherentType as TimestampInherent}; use sp_consensus::{ Error as ConsensusError, BlockImportParams, BlockOrigin, ForkChoiceStrategy, - ImportResult, BlockImport, + ImportResult, BlockImport, BlockCheckParams, }; use sp_consensus::import_queue::{Verifier, CacheKeyId, BasicQueue}; use sp_consensus_sassafras::{ @@ -35,6 +35,7 @@ use sc_consensus_slots::SlotCompatible; use crate::aux_schema::{load_epoch_changes, write_epoch_changes}; /// Validator set of a particular epoch, can be either publishing or validating. +#[derive(Debug, Clone, Encode, Decode)] pub struct ValidatorSet { /// Proofs of all VRFs collected. pub proofs: Vec, @@ -45,6 +46,7 @@ pub struct ValidatorSet { } /// Epoch data for Sassafras +#[derive(Debug, Clone, Encode, Decode)] pub struct Epoch { /// Start slot of the epoch. pub start_slot: SlotNumber, @@ -97,6 +99,7 @@ enum Error { ParentUnavailable(B::Hash, B::Hash), #[display(fmt = "Could not fetch parent header: {:?}", _0)] FetchParentHeader(sp_blockchain::Error), + InvalidEpochData, MultiplePreRuntimeDigest, NoPreRuntimeDigest, MultipleNextEpochDescriptor, @@ -172,8 +175,8 @@ impl SassafrasVerifier where return Err(Error::SlotInFuture.into()) } - let epoch_changes = self.epoch_changes.lock(); - let epoch = { + let mut epoch_changes = self.epoch_changes.lock(); + let epoch_data = { epoch_changes.epoch_for_child_of_mut( descendent_query(&*self.client), &parent_hash, @@ -181,7 +184,10 @@ impl SassafrasVerifier where unimplemented!(), // TODO |slot| unimplemented!() // TODO ) + .map_err(|_| Error::InvalidEpochData)? + .ok_or(Error::InvalidEpochData)? }; + let epoch = epoch_data.as_mut(); // Check the signature. let (author, block_weight) = epoch.validating.authorities @@ -206,7 +212,7 @@ impl SassafrasVerifier where let ticket_transcript = make_ticket_transcript( &epoch.validating.randomness, pre_digest.slot, - epoch.validating.epoch, + epoch.epoch_index, ); schnorrkel::PublicKey::from_bytes(author.as_slice()).and_then(|p| { p.vrf_verify(ticket_transcript, &pre_digest.ticket_vrf_output, &ticket_vrf_proof) @@ -216,7 +222,7 @@ impl SassafrasVerifier where let post_transcript = make_post_transcript( &epoch.validating.randomness, pre_digest.slot, - epoch.validating.epoch, + epoch.epoch_index, ); schnorrkel::PublicKey::from_bytes(author.as_slice()).and_then(|p| { p.vrf_verify(post_transcript, &pre_digest.post_vrf_output, &pre_digest.post_vrf_proof) @@ -238,7 +244,6 @@ impl SassafrasVerifier where proofs: Vec::new(), authorities: next_epoch_desc.authorities, randomness: next_epoch_desc.randomness, - epoch: epoch.validating.epoch + 1, }; // TODO: sort the validating proofs in "outside-in" order. @@ -252,7 +257,9 @@ impl SassafrasVerifier where finalized: false, justification, auxiliary: vec![], - fork_choice: ForkChoiceStrategy::LongestChain, + fork_choice: None, + intermediates: Default::default(), + storage_changes: None, allow_missing_state: false, import_existing: false, }; @@ -319,21 +326,20 @@ where let parent_hash = *block.header.parent_hash(); let number = block.header.number().clone(); - let mut auxiliary = aux_schema::load_auxiliary(&parent_hash, self.api.as_ref()) - .map_err(Error::Client)?; - - let pre_digest = find_pre_digest::(&block.header)?; - - // Verify that the slot is increasing, and not in the future. - if pre_digest.slot <= auxiliary.slot { - return Err(Error::SlotInPast.into()) - } - auxiliary.slot = pre_digest.slot; + // let pre_digest = find_pre_digest::(&block.header)?; + // TODO: Verify that the slot is increasing, and not in the future. let import_result = self.inner.import_block(block, new_cache); import_result.map_err(Into::into) } + + fn check_block( + &mut self, + block: BlockCheckParams, + ) -> Result { + self.inner.check_block(block).map_err(Into::into) + } } #[derive(Default, Clone)] From 207d76476b93dad8dc6410dd8ce98affcfc575a0 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 10 Feb 2020 10:15:59 +0100 Subject: [PATCH 20/75] fork-tree: prune returns all pruned node data --- utils/fork-tree/src/lib.rs | 88 +++++++++++++++++++++++++++++++++----- 1 file changed, 78 insertions(+), 10 deletions(-) diff --git a/utils/fork-tree/src/lib.rs b/utils/fork-tree/src/lib.rs index 8ce0b729c67c9..0b738ecdd1a98 100644 --- a/utils/fork-tree/src/lib.rs +++ b/utils/fork-tree/src/lib.rs @@ -93,41 +93,83 @@ impl ForkTree where /// node. Otherwise the tree remains unchanged. The given function /// `is_descendent_of` should return `true` if the second hash (target) is a /// descendent of the first hash (base). + /// + /// Returns all pruned node data. pub fn prune( &mut self, hash: &H, number: &N, is_descendent_of: &F, predicate: &P, - ) -> Result<(), Error> + ) -> Result, Error> where E: std::error::Error, F: Fn(&H, &H) -> Result, P: Fn(&V) -> bool, { - let new_root = self.find_node_where( + let new_root_index = self.find_node_index_where( hash, number, is_descendent_of, predicate, )?; + let old_roots = std::mem::replace(&mut self.roots, Vec::new()); + + let mut removed = Vec::new(); + + if let Some(mut root_index) = new_root_index { + let mut root = { + let mut found = None; + let top_index = root_index.pop() + .expect("find_node_index_where will return array with at least one index; qed"); - if let Some(root) = new_root { - let mut root = root.clone(); + for (index, child) in old_roots.into_iter().enumerate() { + if index == top_index { + found = Some(child); + } else { + removed.push(child.data); + } + } + + found.expect("find_node_index_where returns indexes that exist in the tree; qed") + }; + + while let Some(cur_index) = root_index.pop() { + let mut found = None; + + for (index, child) in root.children.into_iter().enumerate() { + if index == cur_index { + found = Some(child); + } else { + removed.push(child.data); + } + } + + root = found.expect("find_node_index_where returns indexes that exist in the tree; qed") + } // we found the deepest ancestor of the finalized block, so we prune // out any children that don't include the finalized block. - let children = std::mem::replace(&mut root.children, Vec::new()); - root.children = children.into_iter().filter(|node| { - node.number == *number && node.hash == *hash || - node.number < *number && is_descendent_of(&node.hash, hash).unwrap_or(false) - }).take(1).collect(); + let root_children = std::mem::replace(&mut root.children, Vec::new()); + let mut is_first = true; + + for child in root_children { + if is_first && + (child.number == *number && child.hash == *hash || + child.number < *number && is_descendent_of(&child.hash, hash).unwrap_or(false)) + { + root.children.push(child); + is_first = false; + } else { + removed.push(child.data); + } + } self.roots = vec![root]; } self.rebalance(); - Ok(()) + Ok(removed) } } @@ -275,6 +317,32 @@ impl ForkTree where Ok(None) } + /// Same as [`find_node_where`](Self::find_node_where), but returns indexes. + pub fn find_node_index_where( + &self, + hash: &H, + number: &N, + is_descendent_of: &F, + predicate: &P, + ) -> Result>, Error> where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, + { + // search for node starting from all roots + for (index, root) in self.roots.iter().enumerate() { + let node = root.find_node_index_where(hash, number, is_descendent_of, predicate)?; + + // found the node, early exit + if let FindOutcome::Found(mut node) = node { + node.push(index); + return Ok(Some(node)); + } + } + + Ok(None) + } + /// Finalize a root in the tree and return it, return `None` in case no root /// with the given hash exists. All other roots are pruned, and the children /// of the finalized node become the new roots. From f65aa737281b22d538006c0380397d41f446966f Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 10 Feb 2020 17:06:11 +0100 Subject: [PATCH 21/75] epoch-changes: split EpochHeader vs epoch data --- client/consensus/epochs/src/lib.rs | 431 +++++++++++++++++++---------- utils/fork-tree/src/lib.rs | 8 +- 2 files changed, 291 insertions(+), 148 deletions(-) diff --git a/client/consensus/epochs/src/lib.rs b/client/consensus/epochs/src/lib.rs index cf3d9f5c4c2c2..0a6f77fa4ed67 100644 --- a/client/consensus/epochs/src/lib.rs +++ b/client/consensus/epochs/src/lib.rs @@ -16,7 +16,7 @@ //! Generic utilities for epoch-based consensus engines. -use std::{sync::Arc, ops::Add}; +use std::{sync::Arc, ops::Add, collections::BTreeMap}; use parking_lot::Mutex; use codec::{Encode, Decode}; use fork_tree::ForkTree; @@ -67,97 +67,127 @@ impl<'a, H, Block> IsDescendentOfBuilder } /// Epoch data, distinguish whether it is genesis or not. +/// +/// Once an epoch is created, it must have a known `start_slot` and `end_slot`, which cannot be +/// changed. Consensus engine can modify and other data into the epoch, if needed. pub trait Epoch { /// Descriptor for the next epoch. type NextEpochDescriptor; /// Type of the slot number. - type SlotNumber: Ord; + type SlotNumber: Ord + Copy; + /// The starting slot of the epoch. + fn start_slot(&self) -> Self::SlotNumber; + /// The end slot of the epoch. + fn end_slot(&self) -> Self::SlotNumber; /// Increment the epoch data, using the next epoch descriptor. fn increment(&self, descriptor: Self::NextEpochDescriptor) -> Self; - - /// Produce the "end slot" of the epoch. This is NOT inclusive to the epoch, - /// i.e. the slots covered by the epoch are `self.start_slot() .. self.end_slot()`. - fn end_slot(&self) -> Self::SlotNumber; - /// Produce the "start slot" of the epoch. - fn start_slot(&self) -> Self::SlotNumber; } -/// An unimported genesis epoch. -pub struct UnimportedGenesisEpoch(Epoch); - -/// The viable epoch under which a block can be verified. -/// -/// If this is the first non-genesis block in the chain, then it will -/// hold an `UnimportedGenesis` epoch. -pub enum ViableEpoch { - /// Genesis viable epoch data. - Genesis(UnimportedGenesisEpoch), - /// Regular viable epoch data. - Regular(Epoch), +impl<'a, E: Epoch> From<&'a E> for EpochHeader { + fn from(epoch: &'a E) -> EpochHeader { + Self { + start_slot: epoch.start_slot(), + end_slot: epoch.end_slot(), + } + } } -impl From for ViableEpoch { - fn from(epoch: Epoch) -> ViableEpoch { - ViableEpoch::Regular(epoch) - } +/// Header of epoch data, consisting of start and end slot. +#[derive(Eq, PartialEq, Encode, Decode, Debug)] +pub struct EpochHeader { + /// The starting slot of the epoch. + pub start_slot: E::SlotNumber, + /// The end slot of the epoch. + pub end_slot: E::SlotNumber, } -impl AsRef for ViableEpoch { - fn as_ref(&self) -> &Epoch { - match *self { - ViableEpoch::Genesis(UnimportedGenesisEpoch(ref e)) => e, - ViableEpoch::Regular(ref e) => e, +impl Clone for EpochHeader { + fn clone(&self) -> Self { + Self { + start_slot: self.start_slot, + end_slot: self.end_slot, } } } -impl ViableEpoch where - Epoch: crate::Epoch + Clone, -{ - /// Extract the underlying epoch, disregarding the fact that a genesis - /// epoch may be unimported. - pub fn into_inner(self) -> Epoch { - match self { - ViableEpoch::Genesis(UnimportedGenesisEpoch(e)) => e, - ViableEpoch::Regular(e) => e, - } - } +/// Position of the epoch identifier. +#[derive(PartialEq, Eq, PartialOrd, Ord, Copy, Clone, Debug)] +pub enum EpochIdentifierPosition { + /// The identifier points to a genesis epoch `epoch_0`. + Genesis0, + /// The identifier points to a genesis epoch `epoch_1`. + Genesis1, + /// The identifier points to a regular epoch. + Regular, +} - /// Increment the epoch, yielding an `IncrementedEpoch` to be imported - /// into the fork-tree. - pub fn increment( - &self, - next_descriptor: Epoch::NextEpochDescriptor - ) -> IncrementedEpoch { - let next = self.as_ref().increment(next_descriptor); - let to_persist = match *self { - ViableEpoch::Genesis(UnimportedGenesisEpoch(ref epoch_0)) => - PersistedEpoch::Genesis(epoch_0.clone(), next), - ViableEpoch::Regular(_) => PersistedEpoch::Regular(next), - }; +/// Epoch identifier. +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug)] +pub struct EpochIdentifier { + /// Location of the epoch. + pub position: EpochIdentifierPosition, + /// Hash of the block when the epoch is signaled. + pub hash: Hash, + /// Number of the block when the epoch is signaled. + pub number: Number, +} - IncrementedEpoch(to_persist) - } +/// Descriptor for a viable epoch. +#[derive(PartialEq, Eq, Clone, Debug)] +pub enum ViableEpochDescriptor { + /// The epoch is an unimported genesis, with given slot number. + UnimportedGenesis(E::SlotNumber), + /// The epoch is signaled and has been imported, with given identifier and header. + Signaled(EpochIdentifier, EpochHeader) } -/// The datatype encoded on disk. -#[derive(Clone, Encode, Decode)] -pub enum PersistedEpoch { +/// Persisted epoch stored in EpochChanges. +#[derive(Clone)] +pub enum PersistedEpoch { /// Genesis persisted epoch data. epoch_0, epoch_1. - Genesis(Epoch, Epoch), + Genesis(E, E), /// Regular persisted epoch data. epoch_n. - Regular(Epoch), + Regular(E), +} + +impl<'a, E: Epoch> From<&'a PersistedEpoch> for PersistedEpochHeader { + fn from(epoch: &'a PersistedEpoch) -> Self { + match epoch { + PersistedEpoch::Genesis(ref epoch_0, ref epoch_1) => + PersistedEpochHeader::Genesis(epoch_0.into(), epoch_1.into()), + PersistedEpoch::Regular(ref epoch_n) => + PersistedEpochHeader::Regular(epoch_n.into()), + } + } +} + +/// Persisted epoch header stored in ForkTree. +#[derive(Encode, Decode)] +pub enum PersistedEpochHeader { + /// Genesis persisted epoch header. epoch_0, epoch_1. + Genesis(EpochHeader, EpochHeader), + /// Regular persisted epoch header. epoch_n. + Regular(EpochHeader), +} + +impl Clone for PersistedEpochHeader { + fn clone(&self) -> Self { + match self { + Self::Genesis(epoch_0, epoch_1) => Self::Genesis(epoch_0.clone(), epoch_1.clone()), + Self::Regular(epoch_n) => Self::Regular(epoch_n.clone()), + } + } } /// A fresh, incremented epoch to import into the underlying fork-tree. /// /// Create this with `ViableEpoch::increment`. #[must_use = "Freshly-incremented epoch must be imported with `EpochChanges::import`"] -pub struct IncrementedEpoch(PersistedEpoch); +pub struct IncrementedEpoch(PersistedEpoch); -impl AsRef for IncrementedEpoch { - fn as_ref(&self) -> &Epoch { +impl AsRef for IncrementedEpoch { + fn as_ref(&self) -> &E { match self.0 { PersistedEpoch::Genesis(_, ref epoch_1) => epoch_1, PersistedEpoch::Regular(ref epoch_n) => epoch_n, @@ -181,8 +211,9 @@ impl AsRef for IncrementedEpoch { /// /// Further epochs (epoch_2, ..., epoch_n) each get their own entry. #[derive(Clone, Encode, Decode)] -pub struct EpochChanges { - inner: ForkTree>, +pub struct EpochChanges { + inner: ForkTree>, + epochs: BTreeMap<(Hash, Number), PersistedEpoch>, } // create a fake header hash which hasn't been included in the chain. @@ -194,19 +225,18 @@ fn fake_head_hash + AsMut<[u8]> + Clone>(parent_hash: &H) -> H { h } -impl Default for EpochChanges where - Hash: PartialEq, +impl Default for EpochChanges where + Hash: PartialEq + Ord, Number: Ord, { fn default() -> Self { - EpochChanges { inner: ForkTree::new() } + EpochChanges { inner: ForkTree::new(), epochs: BTreeMap::new() } } } -impl EpochChanges where - Hash: PartialEq + AsRef<[u8]> + AsMut<[u8]> + Copy, +impl EpochChanges where + Hash: PartialEq + Ord + AsRef<[u8]> + AsMut<[u8]> + Copy, Number: Ord + One + Zero + Add + Copy, - Epoch: crate::Epoch + Clone, { /// Create a new epoch change. pub fn new() -> Self { @@ -227,45 +257,143 @@ impl EpochChanges where descendent_of_builder: D, hash: &Hash, number: Number, - slot: Epoch::SlotNumber, + slot: E::SlotNumber, ) -> Result<(), fork_tree::Error> { let is_descendent_of = descendent_of_builder .build_is_descendent_of(None); - let predicate = |epoch: &PersistedEpoch| match *epoch { - PersistedEpoch::Genesis(_, ref epoch_1) => - slot >= epoch_1.end_slot(), - PersistedEpoch::Regular(ref epoch_n) => - slot >= epoch_n.end_slot(), + let predicate = |epoch: &PersistedEpochHeader| match *epoch { + PersistedEpochHeader::Genesis(_, ref epoch_1) => + slot >= epoch_1.end_slot, + PersistedEpochHeader::Regular(ref epoch_n) => + slot >= epoch_n.end_slot, }; // prune any epochs which could not be _live_ as of the children of the // finalized block, i.e. re-root the fork tree to the oldest ancestor of // (hash, number) where epoch.end_slot() >= finalized_slot - self.inner.prune( + let removed = self.inner.prune( hash, &number, &is_descendent_of, &predicate, )?; + for (hash, number, _) in removed { + self.epochs.remove(&(hash, number)); + } + Ok(()) } + pub fn epoch(&self, id: &EpochIdentifier) -> Option<&E> { + self.epochs.get(&(id.hash, id.number)) + .and_then(|v| { + match v { + PersistedEpoch::Genesis(ref epoch_0, _) + if id.position == EpochIdentifierPosition::Genesis0 => Some(epoch_0), + PersistedEpoch::Genesis(_, ref epoch_1) + if id.position == EpochIdentifierPosition::Genesis1 => Some(epoch_1), + PersistedEpoch::Regular(ref epoch_n) + if id.position == EpochIdentifierPosition::Regular => Some(epoch_n), + _ => None, + } + }) + } + + pub fn epoch_mut(&mut self, id: &EpochIdentifier) -> Option<&mut E> { + self.epochs.get_mut(&(id.hash, id.number)) + .and_then(|v| { + match v { + PersistedEpoch::Genesis(ref mut epoch_0, _) + if id.position == EpochIdentifierPosition::Genesis0 => Some(epoch_0), + PersistedEpoch::Genesis(_, ref mut epoch_1) + if id.position == EpochIdentifierPosition::Genesis1 => Some(epoch_1), + PersistedEpoch::Regular(ref mut epoch_n) + if id.position == EpochIdentifierPosition::Regular => Some(epoch_n), + _ => None, + } + }) + } + + /// Given an epoch descriptor, increment it using the next epoch descriptor. + pub fn increment( + &self, + descriptor: &ViableEpochDescriptor, + next_descriptor: E::NextEpochDescriptor, + make_genesis: G, + ) -> Option> where + G: FnOnce(E::SlotNumber) -> E, + { + match descriptor { + ViableEpochDescriptor::UnimportedGenesis(slot_number) => { + let epoch_0 = make_genesis(*slot_number); + let epoch_1 = epoch_0.increment(next_descriptor); + Some(IncrementedEpoch(PersistedEpoch::Genesis(epoch_0, epoch_1))) + }, + ViableEpochDescriptor::Signaled(identifier, _) => { + let epoch_n_plus_1 = self.epoch(identifier)? + .increment(next_descriptor); + Some(IncrementedEpoch(PersistedEpoch::Regular(epoch_n_plus_1))) + }, + } + } + + pub fn epoch_data( + &self, + descriptor: &ViableEpochDescriptor, + make_genesis: G + ) -> Option where + G: FnOnce(E::SlotNumber) -> E, + E: Clone, + { + match descriptor { + ViableEpochDescriptor::UnimportedGenesis(slot_number) => { + Some(make_genesis(*slot_number)) + }, + ViableEpochDescriptor::Signaled(identifier, _) => { + self.epoch(&identifier).cloned() + }, + } + } + + /// Finds the epoch data for a child of the given block. Similar to + /// `epoch_descriptor_for_child_of` but returns the full data. + /// + /// Note that this function ignores the fact that an genesis epoch might need to be imported. + /// Mostly useful for testing. + pub fn epoch_data_for_child_of, G>( + &self, + descendent_of_builder: D, + parent_hash: &Hash, + parent_number: Number, + slot_number: E::SlotNumber, + make_genesis: G, + ) -> Result, fork_tree::Error> where + G: FnOnce(E::SlotNumber) -> E, + E: Clone, + { + let descriptor = self.epoch_descriptor_for_child_of( + descendent_of_builder, + parent_hash, + parent_number, + slot_number + )?; + + Ok(descriptor.and_then(|des| self.epoch_data(&des, make_genesis))) + } + /// Finds the epoch for a child of the given block, assuming the given slot number. /// /// If the returned epoch is an `UnimportedGenesis` epoch, it should be imported into the /// tree. - pub fn epoch_for_child_of, G>( + pub fn epoch_descriptor_for_child_of>( &self, descendent_of_builder: D, parent_hash: &Hash, parent_number: Number, - slot_number: Epoch::SlotNumber, - make_genesis: G, - ) -> Result>, fork_tree::Error> - where G: FnOnce(Epoch::SlotNumber) -> Epoch - { + slot_number: E::SlotNumber, + ) -> Result>, fork_tree::Error> { // find_node_where will give you the node in the fork-tree which is an ancestor // of the `parent_hash` by default. if the last epoch was signalled at the parent_hash, // then it won't be returned. we need to create a new fake chain head hash which @@ -277,8 +405,7 @@ impl EpochChanges where if parent_number == Zero::zero() { // need to insert the genesis epoch. - let genesis_epoch = make_genesis(slot_number); - return Ok(Some(ViableEpoch::Genesis(UnimportedGenesisEpoch(genesis_epoch)))); + return Ok(Some(ViableEpochDescriptor::UnimportedGenesis(slot_number))) } // We want to find the deepest node in the tree which is an ancestor @@ -286,11 +413,11 @@ impl EpochChanges where // slot of our block. The genesis special-case doesn't need to look // at epoch_1 -- all we're doing here is figuring out which node // we need. - let predicate = |epoch: &PersistedEpoch| match *epoch { - PersistedEpoch::Genesis(ref epoch_0, _) => - epoch_0.start_slot() <= slot_number, - PersistedEpoch::Regular(ref epoch_n) => - epoch_n.start_slot() <= slot_number, + let predicate = |epoch: &PersistedEpochHeader| match *epoch { + PersistedEpochHeader::Genesis(ref epoch_0, _) => + epoch_0.start_slot <= slot_number, + PersistedEpochHeader::Regular(ref epoch_n) => + epoch_n.start_slot <= slot_number, }; self.inner.find_node_where( @@ -299,18 +426,27 @@ impl EpochChanges where &is_descendent_of, &predicate, ) - .map(|n| n.map(|node| ViableEpoch::Regular(match node.data { - // Ok, we found our node. - // and here we figure out which of the internal epochs - // of a genesis node to use based on their start slot. - PersistedEpoch::Genesis(ref epoch_0, ref epoch_1) => - if epoch_1.start_slot() <= slot_number { - epoch_1.clone() - } else { - epoch_0.clone() - }, - PersistedEpoch::Regular(ref epoch_n) => epoch_n.clone(), - }))) + .map(|n| { + n.map(|node| (match node.data { + // Ok, we found our node. + // and here we figure out which of the internal epochs + // of a genesis node to use based on their start slot. + PersistedEpochHeader::Genesis(ref epoch_0, ref epoch_1) => + if epoch_1.start_slot <= slot_number { + (EpochIdentifierPosition::Genesis1, epoch_1.clone()) + } else { + (EpochIdentifierPosition::Genesis0, epoch_0.clone()) + }, + PersistedEpochHeader::Regular(ref epoch_n) => + (EpochIdentifierPosition::Regular, epoch_n.clone()), + }, node)).map(|((position, header), node)| { + ViableEpochDescriptor::Signaled(EpochIdentifier { + position, + hash: node.hash, + number: node.number + }, header) + }) + }) } /// Import a new epoch-change, signalled at the given block. @@ -324,26 +460,30 @@ impl EpochChanges where hash: Hash, number: Number, parent_hash: Hash, - epoch: IncrementedEpoch, + epoch: IncrementedEpoch, ) -> Result<(), fork_tree::Error> { let is_descendent_of = descendent_of_builder .build_is_descendent_of(Some((hash, parent_hash))); + let header = PersistedEpochHeader::::from(&epoch.0); let res = self.inner.import( hash, number, - epoch.0, + header, &is_descendent_of, ); match res { - Ok(_) | Err(fork_tree::Error::Duplicate) => Ok(()), + Ok(_) | Err(fork_tree::Error::Duplicate) => { + self.epochs.insert((hash, number), epoch.0); + Ok(()) + }, Err(e) => Err(e), } } /// Return the inner fork tree. - pub fn tree(&self) -> &ForkTree> { + pub fn tree(&self) -> &ForkTree> { &self.inner } } @@ -443,39 +583,34 @@ mod tests { } }; - let make_genesis = |slot| Epoch { - start_slot: slot, - duration: 100, - }; - - let epoch_changes = EpochChanges::new(); - let genesis_epoch = epoch_changes.epoch_for_child_of( + let epoch_changes = EpochChanges::<_, _, Epoch>::new(); + let genesis_epoch = epoch_changes.epoch_descriptor_for_child_of( &is_descendent_of, b"0", 0, 10101, - &make_genesis, ).unwrap().unwrap(); match genesis_epoch { - ViableEpoch::Genesis(_) => {}, + ViableEpochDescriptor::UnimportedGenesis(slot_number) => { + assert_eq!(slot_number, 10101u64); + }, _ => panic!("should be unimported genesis"), }; - assert_eq!(genesis_epoch.as_ref(), &make_genesis(10101)); - let genesis_epoch_2 = epoch_changes.epoch_for_child_of( + let genesis_epoch_2 = epoch_changes.epoch_descriptor_for_child_of( &is_descendent_of, b"0", 0, 10102, - &make_genesis, ).unwrap().unwrap(); match genesis_epoch_2 { - ViableEpoch::Genesis(_) => {}, + ViableEpochDescriptor::UnimportedGenesis(slot_number) => { + assert_eq!(slot_number, 10102u64); + }, _ => panic!("should be unimported genesis"), }; - assert_eq!(genesis_epoch_2.as_ref(), &make_genesis(10102)); } #[test] @@ -499,18 +634,17 @@ mod tests { duration: 100, }; - let mut epoch_changes = EpochChanges::new(); - let genesis_epoch = epoch_changes.epoch_for_child_of( + let mut epoch_changes = EpochChanges::<_, _, Epoch>::new(); + let genesis_epoch = epoch_changes.epoch_descriptor_for_child_of( &is_descendent_of, b"0", 0, 100, - &make_genesis, ).unwrap().unwrap(); - assert_eq!(genesis_epoch.as_ref(), &make_genesis(100)); + assert_eq!(genesis_epoch, ViableEpochDescriptor::UnimportedGenesis(100)); - let import_epoch_1 = genesis_epoch.increment(()); + let import_epoch_1 = epoch_changes.increment(&genesis_epoch, (), &make_genesis).unwrap(); let epoch_1 = import_epoch_1.as_ref().clone(); epoch_changes.import( @@ -520,7 +654,7 @@ mod tests { *b"0", import_epoch_1, ).unwrap(); - let genesis_epoch = genesis_epoch.into_inner(); + let genesis_epoch = epoch_changes.epoch_data(&genesis_epoch, &make_genesis).unwrap(); assert!(is_descendent_of(b"0", b"A").unwrap()); @@ -529,13 +663,13 @@ mod tests { { // x is still within the genesis epoch. - let x = epoch_changes.epoch_for_child_of( + let x = epoch_changes.epoch_data_for_child_of( &is_descendent_of, b"A", 1, end_slot - 1, &make_genesis, - ).unwrap().unwrap().into_inner(); + ).unwrap().unwrap(); assert_eq!(x, genesis_epoch); } @@ -543,13 +677,13 @@ mod tests { { // x is now at the next epoch, because the block is now at the // start slot of epoch 1. - let x = epoch_changes.epoch_for_child_of( + let x = epoch_changes.epoch_data_for_child_of( &is_descendent_of, b"A", 1, end_slot, &make_genesis, - ).unwrap().unwrap().into_inner(); + ).unwrap().unwrap(); assert_eq!(x, epoch_1); } @@ -557,13 +691,13 @@ mod tests { { // x is now at the next epoch, because the block is now after // start slot of epoch 1. - let x = epoch_changes.epoch_for_child_of( + let x = epoch_changes.epoch_data_for_child_of( &is_descendent_of, b"A", 1, epoch_1.end_slot() - 1, &make_genesis, - ).unwrap().unwrap().into_inner(); + ).unwrap().unwrap(); assert_eq!(x, epoch_1); } @@ -596,47 +730,56 @@ mod tests { // insert genesis epoch for A { - let genesis_epoch_a = epoch_changes.epoch_for_child_of( + let genesis_epoch_a_descriptor = epoch_changes.epoch_descriptor_for_child_of( &is_descendent_of, b"0", 0, 100, - &make_genesis, ).unwrap().unwrap(); + let incremented_epoch = epoch_changes.increment( + &genesis_epoch_a_descriptor, + next_descriptor.clone(), + &make_genesis, + ).unwrap(); + epoch_changes.import( &is_descendent_of, *b"A", 1, *b"0", - genesis_epoch_a.increment(next_descriptor.clone()), + incremented_epoch, ).unwrap(); - } // insert genesis epoch for X { - let genesis_epoch_x = epoch_changes.epoch_for_child_of( + let genesis_epoch_x_descriptor = epoch_changes.epoch_descriptor_for_child_of( &is_descendent_of, b"0", 0, 1000, - &make_genesis, ).unwrap().unwrap(); + let incremented_epoch = epoch_changes.increment( + &genesis_epoch_x_descriptor, + next_descriptor.clone(), + &make_genesis, + ).unwrap(); + epoch_changes.import( &is_descendent_of, *b"X", 1, *b"0", - genesis_epoch_x.increment(next_descriptor.clone()), + incremented_epoch, ).unwrap(); } // now check that the genesis epochs for our respective block 1s // respect the chain structure. { - let epoch_for_a_child = epoch_changes.epoch_for_child_of( + let epoch_for_a_child = epoch_changes.epoch_data_for_child_of( &is_descendent_of, b"A", 1, @@ -644,9 +787,9 @@ mod tests { &make_genesis, ).unwrap().unwrap(); - assert_eq!(epoch_for_a_child.into_inner(), make_genesis(100)); + assert_eq!(epoch_for_a_child, make_genesis(100)); - let epoch_for_x_child = epoch_changes.epoch_for_child_of( + let epoch_for_x_child = epoch_changes.epoch_data_for_child_of( &is_descendent_of, b"X", 1, @@ -654,9 +797,9 @@ mod tests { &make_genesis, ).unwrap().unwrap(); - assert_eq!(epoch_for_x_child.into_inner(), make_genesis(1000)); + assert_eq!(epoch_for_x_child, make_genesis(1000)); - let epoch_for_x_child_before_genesis = epoch_changes.epoch_for_child_of( + let epoch_for_x_child_before_genesis = epoch_changes.epoch_data_for_child_of( &is_descendent_of, b"X", 1, diff --git a/utils/fork-tree/src/lib.rs b/utils/fork-tree/src/lib.rs index 0b738ecdd1a98..89c161942869f 100644 --- a/utils/fork-tree/src/lib.rs +++ b/utils/fork-tree/src/lib.rs @@ -101,7 +101,7 @@ impl ForkTree where number: &N, is_descendent_of: &F, predicate: &P, - ) -> Result, Error> + ) -> Result, Error> where E: std::error::Error, F: Fn(&H, &H) -> Result, P: Fn(&V) -> bool, @@ -126,7 +126,7 @@ impl ForkTree where if index == top_index { found = Some(child); } else { - removed.push(child.data); + removed.push((child.hash, child.number, child.data)); } } @@ -140,7 +140,7 @@ impl ForkTree where if index == cur_index { found = Some(child); } else { - removed.push(child.data); + removed.push((child.hash, child.number, child.data)); } } @@ -160,7 +160,7 @@ impl ForkTree where root.children.push(child); is_first = false; } else { - removed.push(child.data); + removed.push((child.hash, child.number, child.data)); } } From 9049a7b352bb5c916e971550b1d3b9818b489aad Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 10 Feb 2020 17:24:30 +0100 Subject: [PATCH 22/75] EpochChanges::viable_epoch and add missing comments --- client/consensus/epochs/src/lib.rs | 140 ++++++++++++++++++++++++----- 1 file changed, 116 insertions(+), 24 deletions(-) diff --git a/client/consensus/epochs/src/lib.rs b/client/consensus/epochs/src/lib.rs index 0a6f77fa4ed67..4fd1b34607077 100644 --- a/client/consensus/epochs/src/lib.rs +++ b/client/consensus/epochs/src/lib.rs @@ -16,7 +16,7 @@ //! Generic utilities for epoch-based consensus engines. -use std::{sync::Arc, ops::Add, collections::BTreeMap}; +use std::{sync::Arc, ops::Add, collections::BTreeMap, borrow::{Borrow, BorrowMut}}; use parking_lot::Mutex; use codec::{Encode, Decode}; use fork_tree::ForkTree; @@ -133,6 +133,78 @@ pub struct EpochIdentifier { pub number: Number, } +/// The viable epoch under which a block can be verified. +/// +/// If this is the first non-genesis block in the chain, then it will +/// hold an `UnimportedGenesis` epoch. +pub enum ViableEpoch { + /// Unimported genesis viable epoch data. + UnimportedGenesis(E), + /// Regular viable epoch data. + Signaled(ERef), +} + +impl AsRef for ViableEpoch where + ERef: Borrow, +{ + fn as_ref(&self) -> &E { + match *self { + ViableEpoch::UnimportedGenesis(ref e) => e, + ViableEpoch::Signaled(ref e) => e.borrow(), + } + } +} + +impl AsMut for ViableEpoch where + ERef: BorrowMut, +{ + fn as_mut(&mut self) -> &mut E { + match *self { + ViableEpoch::UnimportedGenesis(ref mut e) => e, + ViableEpoch::Signaled(ref mut e) => e.borrow_mut(), + } + } +} + +impl ViableEpoch where + E: Epoch + Clone, + ERef: Borrow, +{ + /// Extract the underlying epoch, disregarding the fact that a genesis + /// epoch may be unimported. + pub fn into_cloned_inner(self) -> E { + match self { + ViableEpoch::UnimportedGenesis(e) => e, + ViableEpoch::Signaled(e) => e.borrow().clone(), + } + } + + /// Get cloned value for the viable epoch. + pub fn into_cloned(self) -> ViableEpoch { + match self { + ViableEpoch::UnimportedGenesis(e) => + ViableEpoch::UnimportedGenesis(e), + ViableEpoch::Signaled(e) => ViableEpoch::Signaled(e.borrow().clone()), + } + } + + /// Increment the epoch, yielding an `IncrementedEpoch` to be imported + /// into the fork-tree. + pub fn increment( + &self, + next_descriptor: E::NextEpochDescriptor + ) -> IncrementedEpoch { + let next = self.as_ref().increment(next_descriptor); + let to_persist = match *self { + ViableEpoch::UnimportedGenesis(ref epoch_0) => + PersistedEpoch::Genesis(epoch_0.clone(), next), + ViableEpoch::Signaled(_) => PersistedEpoch::Regular(next), + }; + + IncrementedEpoch(to_persist) + } +} + /// Descriptor for a viable epoch. #[derive(PartialEq, Eq, Clone, Debug)] pub enum ViableEpochDescriptor { @@ -286,6 +358,7 @@ impl EpochChanges where Ok(()) } + /// Get a reference to an epoch with given identifier. pub fn epoch(&self, id: &EpochIdentifier) -> Option<&E> { self.epochs.get(&(id.hash, id.number)) .and_then(|v| { @@ -301,6 +374,25 @@ impl EpochChanges where }) } + /// Get a reference to a viable epoch with given descriptor. + pub fn viable_epoch( + &self, + descriptor: &ViableEpochDescriptor, + make_genesis: G, + ) -> Option> where + G: FnOnce(E::SlotNumber) -> E + { + match descriptor { + ViableEpochDescriptor::UnimportedGenesis(slot_number) => { + Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot_number))) + }, + ViableEpochDescriptor::Signaled(identifier, _) => { + self.epoch(&identifier).map(ViableEpoch::Signaled) + }, + } + } + + /// Get a mutable reference to an epoch with given identifier. pub fn epoch_mut(&mut self, id: &EpochIdentifier) -> Option<&mut E> { self.epochs.get_mut(&(id.hash, id.number)) .and_then(|v| { @@ -316,29 +408,28 @@ impl EpochChanges where }) } - /// Given an epoch descriptor, increment it using the next epoch descriptor. - pub fn increment( - &self, + /// Get a mutable reference to a viable epoch with given descriptor. + pub fn viable_epoch_mut( + &mut self, descriptor: &ViableEpochDescriptor, - next_descriptor: E::NextEpochDescriptor, make_genesis: G, - ) -> Option> where - G: FnOnce(E::SlotNumber) -> E, + ) -> Option> where + G: FnOnce(E::SlotNumber) -> E { match descriptor { ViableEpochDescriptor::UnimportedGenesis(slot_number) => { - let epoch_0 = make_genesis(*slot_number); - let epoch_1 = epoch_0.increment(next_descriptor); - Some(IncrementedEpoch(PersistedEpoch::Genesis(epoch_0, epoch_1))) + Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot_number))) }, ViableEpochDescriptor::Signaled(identifier, _) => { - let epoch_n_plus_1 = self.epoch(identifier)? - .increment(next_descriptor); - Some(IncrementedEpoch(PersistedEpoch::Regular(epoch_n_plus_1))) + self.epoch_mut(&identifier).map(ViableEpoch::Signaled) }, } } + /// Get the epoch data from an epoch descriptor. + /// + /// Note that this function ignores the fact that an genesis epoch might need to be imported. + /// Mostly useful for testing. pub fn epoch_data( &self, descriptor: &ViableEpochDescriptor, @@ -644,7 +735,10 @@ mod tests { assert_eq!(genesis_epoch, ViableEpochDescriptor::UnimportedGenesis(100)); - let import_epoch_1 = epoch_changes.increment(&genesis_epoch, (), &make_genesis).unwrap(); + let import_epoch_1 = epoch_changes + .viable_epoch(&genesis_epoch, &make_genesis) + .unwrap() + .increment(()); let epoch_1 = import_epoch_1.as_ref().clone(); epoch_changes.import( @@ -737,11 +831,10 @@ mod tests { 100, ).unwrap().unwrap(); - let incremented_epoch = epoch_changes.increment( - &genesis_epoch_a_descriptor, - next_descriptor.clone(), - &make_genesis, - ).unwrap(); + let incremented_epoch = epoch_changes + .viable_epoch(&genesis_epoch_a_descriptor, &make_genesis) + .unwrap() + .increment(next_descriptor.clone()); epoch_changes.import( &is_descendent_of, @@ -761,11 +854,10 @@ mod tests { 1000, ).unwrap().unwrap(); - let incremented_epoch = epoch_changes.increment( - &genesis_epoch_x_descriptor, - next_descriptor.clone(), - &make_genesis, - ).unwrap(); + let incremented_epoch = epoch_changes + .viable_epoch(&genesis_epoch_x_descriptor, &make_genesis) + .unwrap() + .increment(next_descriptor.clone()); epoch_changes.import( &is_descendent_of, From 7a82b6d6fde81a5862c502143703fa69cc8c5e9d Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 10 Feb 2020 18:00:51 +0100 Subject: [PATCH 23/75] Incoperate the new epoch_changes interface for BABE --- client/consensus/babe/src/lib.rs | 90 ++++++++++++++++++------------ client/consensus/epochs/src/lib.rs | 14 ++++- client/consensus/slots/src/lib.rs | 9 ++- 3 files changed, 72 insertions(+), 41 deletions(-) diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index f9e3ef98d6735..c9b547144d418 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -78,7 +78,7 @@ use sp_runtime::{ generic::{BlockId, OpaqueDigestItemId}, Justification, traits::{Block as BlockT, Header, DigestItemFor, Zero}, }; -use sp_api::ProvideRuntimeApi; +use sp_api::{ProvideRuntimeApi, NumberFor}; use sc_keystore::KeyStorePtr; use parking_lot::Mutex; use sp_core::Pair; @@ -107,7 +107,7 @@ use sc_consensus_slots::{ SlotWorker, SlotInfo, SlotCompatible, StorageChanges, CheckedHeader, check_equivocation, }; use sc_consensus_epochs::{ - descendent_query, ViableEpoch, SharedEpochChanges, EpochChangesFor, Epoch as EpochT + descendent_query, SharedEpochChanges, EpochChangesFor, Epoch as EpochT, ViableEpochDescriptor, }; use sp_blockchain::{ Result as ClientResult, Error as ClientError, @@ -234,9 +234,9 @@ macro_rules! babe_info { /// Intermediate value passed to block importer. -pub struct BabeIntermediate { - /// The epoch data, if available. - pub epoch: ViableEpoch, +pub struct BabeIntermediate { + /// The epoch descriptor. + pub epoch_descriptor: ViableEpochDescriptor, Epoch>, } /// Intermediate key for Babe engine. @@ -405,7 +405,7 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeWork SO: SyncOracle + Send + Clone, Error: std::error::Error + Send + From + From + 'static, { - type EpochData = ViableEpoch; + type EpochData = ViableEpochDescriptor, Epoch>; type Claim = (PreDigest, AuthorityPair); type SyncOracle = SO; type CreateProposer = Pin sc_consensus_slots::SimpleSlotWorker for BabeWork parent: &B::Header, slot_number: u64, ) -> Result { - self.epoch_changes.lock().epoch_for_child_of( + self.epoch_changes.lock().epoch_descriptor_for_child_of( descendent_query(&*self.client), &parent.hash(), parent.number().clone(), slot_number, - |slot| self.config.genesis_epoch(slot) ) .map_err(|e| ConsensusError::ChainLookup(format!("{:?}", e)))? .ok_or(sp_consensus::Error::InvalidAuthoritiesSet) } - fn authorities_len(&self, epoch_data: &Self::EpochData) -> usize { - epoch_data.as_ref().authorities.len() + fn authorities_len(&self, epoch_descriptor: &Self::EpochData) -> Option { + self.epoch_changes.lock() + .viable_epoch(&epoch_descriptor, |slot| self.config.genesis_epoch(slot)) + .map(|epoch| epoch.as_ref().authorities.len()) } fn claim_slot( &self, _parent_header: &B::Header, slot_number: SlotNumber, - epoch_data: &ViableEpoch, + epoch_descriptor: &ViableEpochDescriptor, Epoch>, ) -> Option { debug!(target: "babe", "Attempting to claim slot {}", slot_number); let s = authorship::claim_slot( slot_number, - epoch_data.as_ref(), + self.epoch_changes.lock().viable_epoch( + &epoch_descriptor, + |slot| self.config.genesis_epoch(slot) + )?.as_ref(), &*self.config, &self.keystore, ); @@ -481,7 +485,7 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeWork Self::Claim, Self::EpochData, ) -> sp_consensus::BlockImportParams + Send> { - Box::new(|header, header_hash, body, storage_changes, (_, pair), epoch| { + Box::new(|header, header_hash, body, storage_changes, (_, pair), epoch_descriptor| { // sign the pre-sealed hash of the block and then // add it to a digest item. let signature = pair.sign(header_hash.as_ref()); @@ -500,7 +504,7 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeWork let mut intermediates = HashMap::new(); intermediates.insert( Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate { epoch }) as Box, + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, ); intermediates }, @@ -788,18 +792,19 @@ impl Verifier for BabeVerifier::FetchParentHeader)?; let pre_digest = find_pre_digest::(&header)?; - let epoch = { - let epoch_changes = self.epoch_changes.lock(); - epoch_changes.epoch_for_child_of( - descendent_query(&*self.client), - &parent_hash, - parent_header_metadata.number, - pre_digest.slot_number(), - |slot| self.config.genesis_epoch(slot), - ) - .map_err(|e| Error::::ForkTree(Box::new(e)))? - .ok_or_else(|| Error::::FetchEpoch(parent_hash))? - }; + let epoch_changes = self.epoch_changes.lock(); + let epoch_descriptor = epoch_changes.epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent_hash, + parent_header_metadata.number, + pre_digest.slot_number(), + ) + .map_err(|e| Error::::ForkTree(Box::new(e)))? + .ok_or_else(|| Error::::FetchEpoch(parent_hash))?; + let viable_epoch = epoch_changes.viable_epoch( + &epoch_descriptor, + |slot| self.config.genesis_epoch(slot) + ).ok_or_else(|| Error::::FetchEpoch(parent_hash))?; // We add one to the current slot to allow for some small drift. // FIXME #1019 in the future, alter this queue to allow deferring of headers @@ -807,7 +812,7 @@ impl Verifier for BabeVerifier Verifier for BabeVerifier { + epoch_descriptor, }) as Box, ); @@ -1025,7 +1030,7 @@ impl BlockImport for BabeBlockImport BlockImport for BabeBlockImport( + let intermediate = block.take_intermediate::>( INTERMEDIATE_KEY )?; - let epoch = intermediate.epoch; - let first_in_epoch = parent_slot < epoch.as_ref().start_slot; - (epoch, first_in_epoch, parent_weight) + let epoch_descriptor = intermediate.epoch_descriptor; + let first_in_epoch = parent_slot < epoch_descriptor.start_slot(); + (epoch_descriptor, first_in_epoch, parent_weight) }; let total_weight = parent_weight + pre_digest.added_weight(); @@ -1073,12 +1078,23 @@ impl BlockImport for BabeBlockImport::FetchEpoch(parent_hash).into()) + })?; + babe_info!("New epoch {} launching at block {} (block slot {} >= start slot {}).", - epoch.as_ref().epoch_index, hash, slot_number, epoch.as_ref().start_slot); + viable_epoch.as_ref().epoch_index, + hash, + slot_number, + viable_epoch.as_ref().start_slot); + + let next_epoch = viable_epoch.increment(next_epoch_descriptor); + babe_info!("Next epoch starts at slot {}", next_epoch.as_ref().start_slot); // prune the tree of epochs not part of the finalized chain or diff --git a/client/consensus/epochs/src/lib.rs b/client/consensus/epochs/src/lib.rs index 4fd1b34607077..6e35958bda282 100644 --- a/client/consensus/epochs/src/lib.rs +++ b/client/consensus/epochs/src/lib.rs @@ -208,14 +208,24 @@ impl ViableEpoch where /// Descriptor for a viable epoch. #[derive(PartialEq, Eq, Clone, Debug)] pub enum ViableEpochDescriptor { - /// The epoch is an unimported genesis, with given slot number. + /// The epoch is an unimported genesis, with given start slot number. UnimportedGenesis(E::SlotNumber), /// The epoch is signaled and has been imported, with given identifier and header. Signaled(EpochIdentifier, EpochHeader) } +impl ViableEpochDescriptor { + /// Start slot of the descriptor. + pub fn start_slot(&self) -> E::SlotNumber { + match self { + Self::UnimportedGenesis(start_slot) => *start_slot, + Self::Signaled(_, header) => header.start_slot, + } + } +} + /// Persisted epoch stored in EpochChanges. -#[derive(Clone)] +#[derive(Clone, Encode, Decode)] pub enum PersistedEpoch { /// Genesis persisted epoch data. epoch_0, epoch_1. Genesis(E, E), diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index 8bc2547a49e39..c010dadecd376 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -94,7 +94,8 @@ pub trait SimpleSlotWorker { fn epoch_data(&self, header: &B::Header, slot_number: u64) -> Result; /// Returns the number of authorities given the epoch data. - fn authorities_len(&self, epoch_data: &Self::EpochData) -> usize; + /// None indicate that the authorities information is incomplete. + fn authorities_len(&self, epoch_data: &Self::EpochData) -> Option; /// Tries to claim the given slot, returning an object with claim data if successful. fn claim_slot( @@ -194,7 +195,11 @@ pub trait SimpleSlotWorker { let authorities_len = self.authorities_len(&epoch_data); - if !self.force_authoring() && self.sync_oracle().is_offline() && authorities_len > 1 { + if authorities_len.is_none() || + (!self.force_authoring() && + self.sync_oracle().is_offline() && + authorities_len.map(|a| a > 1).unwrap_or(true)) + { debug!(target: self.logging_target(), "Skipping proposal slot. Waiting for the network."); telemetry!( CONSENSUS_DEBUG; From e366d8955e079d8e2a5b651086df671e877bb489 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 10 Feb 2020 18:07:19 +0100 Subject: [PATCH 24/75] Fix BABE tests --- client/consensus/babe/src/tests.rs | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 687f23e646f66..0f0c2f2e471b1 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -123,7 +123,7 @@ impl DummyProposer { // figure out if we should add a consensus digest, since the test runtime // doesn't. let epoch_changes = self.factory.epoch_changes.lock(); - let epoch = epoch_changes.epoch_for_child_of( + let epoch = epoch_changes.epoch_data_for_child_of( descendent_query(&*self.factory.client), &self.parent_hash, self.parent_number, @@ -131,8 +131,7 @@ impl DummyProposer { |slot| self.factory.config.genesis_epoch(slot), ) .expect("client has data to find epoch") - .expect("can compute epoch for baked block") - .into_inner(); + .expect("can compute epoch for baked block"); let first_in_epoch = self.parent_slot < epoch.start_slot; if first_in_epoch { @@ -569,12 +568,11 @@ fn propose_and_import_block( let mut block = futures::executor::block_on(proposer.propose_with(pre_digest)).unwrap().block; - let epoch = proposer_factory.epoch_changes.lock().epoch_for_child_of( + let epoch_descriptor = proposer_factory.epoch_changes.lock().epoch_descriptor_for_child_of( descendent_query(&*proposer_factory.client), &parent_hash, *parent.number(), slot_number, - |slot| proposer_factory.config.genesis_epoch(slot) ).unwrap().unwrap(); let seal = { @@ -607,7 +605,7 @@ fn propose_and_import_block( let mut intermediates = HashMap::new(); intermediates.insert( Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate { epoch }) as Box, + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, ); intermediates }, @@ -655,13 +653,13 @@ fn importing_block_one_sets_genesis_epoch() { let genesis_epoch = data.link.config.genesis_epoch(999); let epoch_changes = data.link.epoch_changes.lock(); - let epoch_for_second_block = epoch_changes.epoch_for_child_of( + let epoch_for_second_block = epoch_changes.epoch_data_for_child_of( descendent_query(&*client), &block_hash, 1, 1000, |slot| data.link.config.genesis_epoch(slot), - ).unwrap().unwrap().into_inner(); + ).unwrap().unwrap(); assert_eq!(epoch_for_second_block, genesis_epoch); } From 7dba1ca956edd42e8e88bbc372c965ee45221470 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 10 Feb 2020 18:49:55 +0100 Subject: [PATCH 25/75] Fix fork-tree pruning issue --- utils/fork-tree/src/lib.rs | 44 +++++++++++++++++--------------------- 1 file changed, 20 insertions(+), 24 deletions(-) diff --git a/utils/fork-tree/src/lib.rs b/utils/fork-tree/src/lib.rs index 89c161942869f..78d89a7bc0142 100644 --- a/utils/fork-tree/src/lib.rs +++ b/utils/fork-tree/src/lib.rs @@ -106,47 +106,43 @@ impl ForkTree where F: Fn(&H, &H) -> Result, P: Fn(&V) -> bool, { + let mut removed = Vec::new(); + let new_root_index = self.find_node_index_where( hash, number, is_descendent_of, predicate, )?; - let old_roots = std::mem::replace(&mut self.roots, Vec::new()); - - let mut removed = Vec::new(); if let Some(mut root_index) = new_root_index { - let mut root = { - let mut found = None; - let top_index = root_index.pop() - .expect("find_node_index_where will return array with at least one index; qed"); - - for (index, child) in old_roots.into_iter().enumerate() { - if index == top_index { - found = Some(child); - } else { - removed.push((child.hash, child.number, child.data)); - } - } + let old_roots = std::mem::replace(&mut self.roots, Vec::new()); - found.expect("find_node_index_where returns indexes that exist in the tree; qed") - }; + let mut cur_children = old_roots; while let Some(cur_index) = root_index.pop() { let mut found = None; - - for (index, child) in root.children.into_iter().enumerate() { + for (index, child) in cur_children.into_iter().enumerate() { if index == cur_index { - found = Some(child); - } else { - removed.push((child.hash, child.number, child.data)); + found = Some(child.children); } + removed.push((child.hash, child.number, child.data)); } - - root = found.expect("find_node_index_where returns indexes that exist in the tree; qed") + cur_children = found + .expect("find_node_index_where always return valid index; qed"); } + let (root_hash, root_number, root_data) = removed.pop() + .expect("find_node_index_where will return array with at least one index; \ + this results in at least one item in removed; qed"); + + let mut root = Node { + hash: root_hash, + number: root_number, + data: root_data, + children: cur_children + }; + // we found the deepest ancestor of the finalized block, so we prune // out any children that don't include the finalized block. let root_children = std::mem::replace(&mut root.children, Vec::new()); From 417e6a1de4a0c3e82ef46679832130729919ae86 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 10 Feb 2020 19:02:39 +0100 Subject: [PATCH 26/75] Fix tests --- bin/node/cli/src/service.rs | 5 ++--- client/consensus/aura/src/lib.rs | 4 ++-- client/consensus/babe/src/lib.rs | 5 +++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 2c500c6a1c1ed..d504d7196e1ef 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -509,12 +509,11 @@ mod tests { transaction_pool: service.transaction_pool(), }; - let epoch = babe_link.epoch_changes().lock().epoch_for_child_of( + let epoch_descriptor = babe_link.epoch_changes().lock().epoch_descriptor_for_child_of( descendent_query(&*service.client()), &parent_hash, parent_number, slot_num, - |slot| babe_link.config().genesis_epoch(slot) ).unwrap().unwrap(); let mut digest = Digest::::default(); @@ -572,7 +571,7 @@ mod tests { let mut intermediates = HashMap::new(); intermediates.insert( Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate { epoch }) as Box, + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, ); intermediates }, diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 434314a85353e..43ccbeed41ff3 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -240,8 +240,8 @@ impl sc_consensus_slots::SimpleSlotWorker for AuraW authorities(self.client.as_ref(), &BlockId::Hash(header.hash())) } - fn authorities_len(&self, epoch_data: &Self::EpochData) -> usize { - epoch_data.len() + fn authorities_len(&self, epoch_data: &Self::EpochData) -> Option { + Some(epoch_data.len()) } fn claim_slot( diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index c9b547144d418..af6adbf2f39cd 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -1333,7 +1333,8 @@ pub mod test_helpers { HeaderMetadata, C::Api: BabeApi, { - let epoch = link.epoch_changes.lock().epoch_for_child_of( + let epoch_changes = link.epoch_changes.lock(); + let epoch = epoch_changes.epoch_data_for_child_of( descendent_query(client), &parent.hash(), parent.number().clone(), @@ -1343,7 +1344,7 @@ pub mod test_helpers { authorship::claim_slot( slot_number, - epoch.as_ref(), + &epoch, &link.config, keystore, ).map(|(digest, _)| digest) From a93dc2e3f50b737f347775f0ea7bcb7f8f895003 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 10 Feb 2020 20:00:17 +0100 Subject: [PATCH 27/75] Fix pruning algorithm --- utils/fork-tree/src/lib.rs | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) diff --git a/utils/fork-tree/src/lib.rs b/utils/fork-tree/src/lib.rs index 78d89a7bc0142..6bee48de56821 100644 --- a/utils/fork-tree/src/lib.rs +++ b/utils/fork-tree/src/lib.rs @@ -118,31 +118,29 @@ impl ForkTree where if let Some(mut root_index) = new_root_index { let old_roots = std::mem::replace(&mut self.roots, Vec::new()); - let mut cur_children = old_roots; + let mut root = None; + let mut cur_children = Some(old_roots); while let Some(cur_index) = root_index.pop() { - let mut found = None; - for (index, child) in cur_children.into_iter().enumerate() { - if index == cur_index { - found = Some(child.children); + if let Some(children) = cur_children.take() { + for (index, child) in children.into_iter().enumerate() { + if index == cur_index { + if root_index.is_empty() { + root = Some(child); + } else { + cur_children = Some(child.children); + } + } else { + removed.push((child.hash, child.number, child.data)); + } } - removed.push((child.hash, child.number, child.data)); } - cur_children = found - .expect("find_node_index_where always return valid index; qed"); } - let (root_hash, root_number, root_data) = removed.pop() + let mut root = root .expect("find_node_index_where will return array with at least one index; \ this results in at least one item in removed; qed"); - let mut root = Node { - hash: root_hash, - number: root_number, - data: root_data, - children: cur_children - }; - // we found the deepest ancestor of the finalized block, so we prune // out any children that don't include the finalized block. let root_children = std::mem::replace(&mut root.children, Vec::new()); From 47fc9e95b3cd7e320390ff450d002dd04de5d767 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Sat, 15 Feb 2020 18:36:58 +0100 Subject: [PATCH 28/75] Add license header --- client/consensus/sassafras/src/lib.rs | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 17487428e706b..03324294cb5d4 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -1,3 +1,21 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! # Sassafras Consensus + mod aux_schema; use std::{ From 8322425f3ba8fe46c5d21ffd9d94636a6c0c3ee9 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Sat, 15 Feb 2020 20:13:56 +0100 Subject: [PATCH 29/75] Use new descriptor interface --- client/consensus/sassafras/src/aux_schema.rs | 5 --- client/consensus/sassafras/src/lib.rs | 38 ++++++++++---------- 2 files changed, 19 insertions(+), 24 deletions(-) diff --git a/client/consensus/sassafras/src/aux_schema.rs b/client/consensus/sassafras/src/aux_schema.rs index 3549d77edd836..465fa2fe9bb43 100644 --- a/client/consensus/sassafras/src/aux_schema.rs +++ b/client/consensus/sassafras/src/aux_schema.rs @@ -2,11 +2,6 @@ use std::sync::Arc; use parking_lot::Mutex; use codec::{Encode, Decode}; use log::info; -use sp_core::H256; -use sp_consensus_sassafras::{ - EpochNumber, SlotNumber, SassafrasBlockWeight, SassafrasAuthorityWeight, - VRFProof, Randomness, AuthorityId -}; use sp_runtime::traits::Block as BlockT; use sp_blockchain::{Result as ClientResult, Error as ClientError}; use sc_client_api::AuxStore; diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 03324294cb5d4..cc2aa9e1c66a7 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -19,19 +19,19 @@ mod aux_schema; use std::{ - sync::Arc, marker::PhantomData, time::{Duration, Instant}, collections::HashMap, + sync::Arc, time::{Duration, Instant}, collections::HashMap, }; use log::trace; use codec::{Encode, Decode}; use parking_lot::Mutex; use merlin::Transcript; -use sp_core::{Blake2Hasher, H256, crypto::{Pair, Public}}; -use sp_blockchain::{Result as ClientResult, ProvideCache, HeaderMetadata}; +use sp_core::{H256, crypto::{Pair, Public}}; +use sp_blockchain::{ProvideCache, HeaderMetadata}; use sp_inherents::InherentData; use sp_timestamp::{TimestampInherentData, InherentType as TimestampInherent}; use sp_consensus::{ - Error as ConsensusError, BlockImportParams, BlockOrigin, ForkChoiceStrategy, - ImportResult, BlockImport, BlockCheckParams, + Error as ConsensusError, BlockImportParams, BlockOrigin, ImportResult, + BlockImport, BlockCheckParams, }; use sp_consensus::import_queue::{Verifier, CacheKeyId, BasicQueue}; use sp_consensus_sassafras::{ @@ -42,7 +42,7 @@ use sp_consensus_sassafras::digest::{ NextEpochDescriptor, PostBlockDescriptor, PreDigest, CompatibleDigestItem }; use sp_consensus_sassafras::inherents::SassafrasInherentData; -use sp_runtime::{generic::BlockId, Justification}; +use sp_runtime::Justification; use sp_runtime::traits::{Block as BlockT, Header}; use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_block_builder::BlockBuilder as BlockBuilderApi; @@ -50,7 +50,6 @@ use sc_client::{Client, CallExecutor}; use sc_client_api::backend::{AuxStore, Backend}; use sc_consensus_epochs::{descendent_query, Epoch as EpochT, SharedEpochChanges}; use sc_consensus_slots::SlotCompatible; -use crate::aux_schema::{load_epoch_changes, write_epoch_changes}; /// Validator set of a particular epoch, can be either publishing or validating. #[derive(Debug, Clone, Encode, Decode)] @@ -194,18 +193,19 @@ impl SassafrasVerifier where } let mut epoch_changes = self.epoch_changes.lock(); - let epoch_data = { - epoch_changes.epoch_for_child_of_mut( - descendent_query(&*self.client), - &parent_hash, - parent_header_metadata.number, - unimplemented!(), // TODO - |slot| unimplemented!() // TODO - ) - .map_err(|_| Error::InvalidEpochData)? - .ok_or(Error::InvalidEpochData)? - }; - let epoch = epoch_data.as_mut(); + let epoch_descriptor = epoch_changes.epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent_hash, + parent_header_metadata.number, + unimplemented!(), // TODO + ) + .map_err(|_| Error::InvalidEpochData)? + .ok_or_else(|| Error::InvalidEpochData)?; + let viable_epoch = epoch_changes.viable_epoch_mut( + &epoch_descriptor, + |_| unimplemented!(), + ).ok_or_else(|| Error::InvalidEpochData)?; + let epoch = viable_epoch.as_mut(); // Check the signature. let (author, block_weight) = epoch.validating.authorities From 7a784becfccfea5cbe37fcee738037b2f6b6f940 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Sun, 16 Feb 2020 15:06:31 +0100 Subject: [PATCH 30/75] [WIP] Duplicate some code --- Cargo.lock | 2 + client/consensus/sassafras/src/lib.rs | 407 +++++++++++++----- primitives/consensus/sassafras/Cargo.toml | 4 + .../sassafras/src/{digest.rs => digests.rs} | 0 primitives/consensus/sassafras/src/lib.rs | 44 +- primitives/consensus/sassafras/src/vrf.rs | 55 ++- 6 files changed, 390 insertions(+), 122 deletions(-) rename primitives/consensus/sassafras/src/{digest.rs => digests.rs} (100%) diff --git a/Cargo.lock b/Cargo.lock index 012bb551702d2..e8efe61094c34 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6986,7 +6986,9 @@ version = "2.0.0" dependencies = [ "parity-scale-codec", "schnorrkel", + "sp-api", "sp-application-crypto", + "sp-consensus", "sp-core", "sp-inherents", "sp-runtime", diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index cc2aa9e1c66a7..3590fe6fbec2c 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -19,14 +19,14 @@ mod aux_schema; use std::{ - sync::Arc, time::{Duration, Instant}, collections::HashMap, + sync::Arc, time::{Duration, Instant}, collections::HashMap, convert::TryInto, }; use log::trace; use codec::{Encode, Decode}; use parking_lot::Mutex; use merlin::Transcript; use sp_core::{H256, crypto::{Pair, Public}}; -use sp_blockchain::{ProvideCache, HeaderMetadata}; +use sp_blockchain::{ProvideCache, HeaderMetadata, Result as ClientResult}; use sp_inherents::InherentData; use sp_timestamp::{TimestampInherentData, InherentType as TimestampInherent}; use sp_consensus::{ @@ -36,19 +36,22 @@ use sp_consensus::{ use sp_consensus::import_queue::{Verifier, CacheKeyId, BasicQueue}; use sp_consensus_sassafras::{ SASSAFRAS_ENGINE_ID, AuthorityPair, AuthorityId, Randomness, VRFProof, - SassafrasAuthorityWeight, SlotNumber, + SassafrasAuthorityWeight, SlotNumber, SassafrasConfiguration, + SassafrasApi, }; -use sp_consensus_sassafras::digest::{ +use sp_consensus_sassafras::digests::{ NextEpochDescriptor, PostBlockDescriptor, PreDigest, CompatibleDigestItem }; use sp_consensus_sassafras::inherents::SassafrasInherentData; use sp_runtime::Justification; use sp_runtime::traits::{Block as BlockT, Header}; -use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_api::{ApiExt, ProvideRuntimeApi, NumberFor}; use sp_block_builder::BlockBuilder as BlockBuilderApi; use sc_client::{Client, CallExecutor}; use sc_client_api::backend::{AuxStore, Backend}; -use sc_consensus_epochs::{descendent_query, Epoch as EpochT, SharedEpochChanges}; +use sc_consensus_epochs::{ + descendent_query, Epoch as EpochT, SharedEpochChanges, ViableEpochDescriptor +}; use sc_consensus_slots::SlotCompatible; /// Validator set of a particular epoch, can be either publishing or validating. @@ -139,12 +142,176 @@ impl std::convert::From> for String { } } +/// Intermediate value passed to block importer. +pub struct SassafrasIntermediate { + /// The epoch descriptor. + pub epoch_descriptor: ViableEpochDescriptor, Epoch>, +} + +/// Intermediate key for Sassafras engine. +pub static INTERMEDIATE_KEY: &[u8] = b"sassafras1"; + +/// Configuration for Sassafras. +#[derive(Clone)] +pub struct Config(sc_consensus_slots::SlotDuration); + +impl Config { + /// Either fetch the slot duration from disk or compute it from the genesis + /// state. + pub fn get_or_compute(client: &C) -> ClientResult where + C: AuxStore + ProvideRuntimeApi, C::Api: SassafrasApi, + { + sc_consensus_slots::SlotDuration::get_or_compute(client, |a, b| a.configuration(b)).map(Self) + } + + /// Create the genesis epoch (epoch #0) + pub fn genesis_epoch(&self, slot_number: SlotNumber) -> Epoch { + let proofs = self.genesis_proofs.clone() + .into_iter() + .map(|p| p.try_into().expect("Genesis proofs are invalid")) + .collect::>(); + + Epoch { + epoch_index: 0, + start_slot: slot_number, + duration: self.epoch_length, + + validating: ValidatorSet { + proofs: proofs.clone(), + authorities: self.genesis_authorities.clone(), + randomness: self.randomness.clone(), + }, + publishing: ValidatorSet { + proofs, + authorities: self.genesis_authorities.clone(), + randomness: self.randomness.clone(), + }, + } + } +} + +impl std::ops::Deref for Config { + type Target = SassafrasConfiguration; + + fn deref(&self) -> &SassafrasConfiguration { + &*self.0 + } +} + +#[derive(Default, Clone)] +struct TimeSource(Arc, Vec<(Instant, u64)>)>>); + +impl SlotCompatible for TimeSource { + fn extract_timestamp_and_slot( + &self, + data: &InherentData, + ) -> Result<(TimestampInherent, u64, std::time::Duration), sp_consensus::Error> { + trace!(target: "sassafras", "extract timestamp"); + data.timestamp_inherent_data() + .and_then(|t| data.sassafras_inherent_data().map(|a| (t, a))) + .map_err(Into::into) + .map_err(sp_consensus::Error::InherentData) + .map(|(x, y)| (x, y, self.0.lock().0.take().unwrap_or_default())) + } +} + +/// State that must be shared between the import queue and the authoring logic. +#[derive(Clone)] +pub struct SassafrasLink { + time_source: TimeSource, + epoch_changes: SharedEpochChanges, + config: Config, +} + +impl SassafrasLink { + /// Get the epoch changes of this link. + pub fn epoch_changes(&self) -> &SharedEpochChanges { + &self.epoch_changes + } + + /// Get the config of this link. + pub fn config(&self) -> &Config { + &self.config + } +} + pub struct SassafrasVerifier { client: Arc>, api: Arc, - epoch_changes: SharedEpochChanges, inherent_data_providers: sp_inherents::InherentDataProviders, - time_source: TimeSource, + link: SassafrasLink, +} + +impl BabeVerifier { + fn check_inherents( + &self, + block: Block, + block_id: BlockId, + inherent_data: InherentData, + ) -> Result<(), Error> + where + PRA: ProvideRuntimeApi, + PRA::Api: BlockBuilderApi + { + let inherent_res = self.api.runtime_api().check_inherents( + &block_id, + block, + inherent_data, + ).map_err(Error::Client)?; + + if !inherent_res.ok() { + inherent_res + .into_errors() + .try_for_each(|(i, e)| { + Err(Error::CheckInherents(self.inherent_data_providers.error_to_string(&i, &e))) + }) + } else { + Ok(()) + } + } +} + +#[allow(dead_code)] +fn median_algorithm( + median_required_blocks: u64, + slot_duration: u64, + slot_number: u64, + slot_now: u64, + time_source: &mut (Option, Vec<(Instant, u64)>), +) { + let num_timestamps = time_source.1.len(); + if num_timestamps as u64 >= median_required_blocks && median_required_blocks > 0 { + let mut new_list: Vec<_> = time_source.1.iter().map(|&(t, sl)| { + let offset: u128 = u128::from(slot_duration) + .checked_mul(1_000_000u128) // self.config.slot_duration returns milliseconds + .and_then(|x| { + x.checked_mul(u128::from(slot_number).saturating_sub(u128::from(sl))) + }) + .expect("we cannot have timespans long enough for this to overflow; qed"); + + const NANOS_PER_SEC: u32 = 1_000_000_000; + let nanos = (offset % u128::from(NANOS_PER_SEC)) as u32; + let secs = (offset / u128::from(NANOS_PER_SEC)) as u64; + + t + Duration::new(secs, nanos) + }).collect(); + + // Use a partial sort to move the median timestamp to the middle of the list + pdqselect::select(&mut new_list, num_timestamps / 2); + + let &median = new_list + .get(num_timestamps / 2) + .expect("we have at least one timestamp, so this is a valid index; qed"); + + let now = Instant::now(); + if now >= median { + time_source.0.replace(now - median); + } + + time_source.1.clear(); + } else { + time_source.1.push((Instant::now(), slot_now)) + } } impl SassafrasVerifier where @@ -153,12 +320,12 @@ impl SassafrasVerifier where E: CallExecutor + 'static + Clone + Send + Sync, RA: Send + Sync, PRA: ProvideRuntimeApi + Send + Sync + AuxStore + ProvideCache, - PRA::Api: BlockBuilderApi, + PRA::Api: BlockBuilderApi + SassafrasApi, { fn verify( &mut self, origin: BlockOrigin, - mut header: Block::Header, + header: Block::Header, justification: Option, mut body: Option>, ) -> Result<(BlockImportParams, Option)>>), Error> { @@ -187,107 +354,111 @@ impl SassafrasVerifier where // First, Verify pre-runtime digest. let pre_digest = find_pre_digest::(&header)?; - - if pre_digest.slot > slot_now { - return Err(Error::SlotInFuture.into()) - } - let mut epoch_changes = self.epoch_changes.lock(); let epoch_descriptor = epoch_changes.epoch_descriptor_for_child_of( descendent_query(&*self.client), &parent_hash, parent_header_metadata.number, - unimplemented!(), // TODO + pre_digest.slot_number(), ) - .map_err(|_| Error::InvalidEpochData)? - .ok_or_else(|| Error::InvalidEpochData)?; + .map_err(|e| Error::ForkTree(Box::new(e)))? + .ok_or_else(|| Error::FetchEpoch(parent_hash))?; let viable_epoch = epoch_changes.viable_epoch_mut( &epoch_descriptor, - |_| unimplemented!(), - ).ok_or_else(|| Error::InvalidEpochData)?; - let epoch = viable_epoch.as_mut(); - - // Check the signature. - let (author, block_weight) = epoch.validating.authorities - .get(pre_digest.authority_index as usize) - .cloned() - .ok_or(Error::InvalidAuthorityId)?; - let seal = header.digest_mut().pop() - .ok_or(Error::HeaderUnsealed(header.hash()))?; - let signature = seal.as_sassafras_seal().ok_or(Error::InvalidSeal)?; - let pre_hash = header.hash(); - if !AuthorityPair::verify(&signature, pre_hash, &author) { - return Err(Error::InvalidSeal.into()) - } - - // Check that the ticket VRF is of a valid index in auxiliary.validating. - let ticket_vrf_proof = epoch.validating.proofs - .get(pre_digest.ticket_vrf_index as usize) - .cloned() - .ok_or(Error::InvalidTicketVRFIndex)?; - - // Check that the ticket VRF is valid. - let ticket_transcript = make_ticket_transcript( - &epoch.validating.randomness, - pre_digest.slot, - epoch.epoch_index, - ); - schnorrkel::PublicKey::from_bytes(author.as_slice()).and_then(|p| { - p.vrf_verify(ticket_transcript, &pre_digest.ticket_vrf_output, &ticket_vrf_proof) - }).map_err(|_| Error::TicketVRFVerificationFailed)?; - - // Check that the post-block VRF is valid. - let post_transcript = make_post_transcript( - &epoch.validating.randomness, - pre_digest.slot, - epoch.epoch_index, - ); - schnorrkel::PublicKey::from_bytes(author.as_slice()).and_then(|p| { - p.vrf_verify(post_transcript, &pre_digest.post_vrf_output, &pre_digest.post_vrf_proof) - }).map_err(|_| Error::PostVRFVerificationFailed)?; - - // Second, push in any commitments of ticket VRF. - if let Some(post_block_desc) = find_post_block_descriptor::(&header)? { - // TODO: verify that proofs are below threshold. - - epoch.publishing.proofs.append(&mut post_block_desc.commitments.clone()); - } - - // Finally, if we are switching epoch, move publishing to validating, and sort the proofs. - if let Some(next_epoch_desc) = find_next_epoch_descriptor::(&header)? { - // TODO: check descriptor validity. - - std::mem::swap(&mut epoch.publishing, &mut epoch.validating); - epoch.publishing = ValidatorSet { - proofs: Vec::new(), - authorities: next_epoch_desc.authorities, - randomness: next_epoch_desc.randomness, - }; - - // TODO: sort the validating proofs in "outside-in" order. - } - - let mut block_import_params = BlockImportParams { - origin, - header, - post_digests: vec![seal], - body, - finalized: false, - justification, - auxiliary: vec![], - fork_choice: None, - intermediates: Default::default(), - storage_changes: None, - allow_missing_state: false, - import_existing: false, + |slot| self.link.config.genesis_epoch(slot), + ).ok_or_else(|| Error::FetchEpoch(parent_hash))?; + + let v_params = verification::VerificationParams { + header: header.clone(), + pre_digest: Some(pre_digest.clone()), + slot_now: slot_now + 1, + epoch: viable_epoch.as_ref(), + config: &self.config, }; - crate::aux_schema::write_epoch_changes::( - &*epoch_changes, - |insert| block_import_params.auxiliary.extend( - insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) - ) - ); + match verification::check_header::(v_params)? { + CheckedHeader::Checked(pre_header, verified_info) => { + let babe_pre_digest = verified_info.pre_digest.as_babe_pre_digest() + .expect("check_header always returns a pre-digest digest item; qed"); + + let slot_number = babe_pre_digest.slot_number(); + + let author = verified_info.author; + + // the header is valid but let's check if there was something else already + // proposed at the same slot by the given author + if let Some(equivocation_proof) = check_equivocation( + &*self.api, + slot_now, + babe_pre_digest.slot_number(), + &header, + &author, + ).map_err(|e| e.to_string())? { + info!( + "Slot author {:?} is equivocating at slot {} with headers {:?} and {:?}", + author, + babe_pre_digest.slot_number(), + equivocation_proof.fst_header().hash(), + equivocation_proof.snd_header().hash(), + ); + } + + // if the body is passed through, we need to use the runtime + // to check that the internally-set timestamp in the inherents + // actually matches the slot set in the seal. + if let Some(inner_body) = body.take() { + inherent_data.babe_replace_inherent_data(slot_number); + let block = Block::new(pre_header.clone(), inner_body); + + self.check_inherents( + block.clone(), + BlockId::Hash(parent_hash), + inherent_data, + )?; + + let (_, inner_body) = block.deconstruct(); + body = Some(inner_body); + } + + trace!(target: "babe", "Checked {:?}; importing.", pre_header); + telemetry!( + CONSENSUS_TRACE; + "babe.checked_and_importing"; + "pre_header" => ?pre_header); + + let mut intermediates = HashMap::new(); + intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(BabeIntermediate:: { + epoch_descriptor, + }) as Box, + ); + + let block_import_params = BlockImportParams { + origin, + header: pre_header, + post_digests: vec![verified_info.seal], + body, + storage_changes: None, + finalized: false, + justification, + auxiliary: Vec::new(), + intermediates, + fork_choice: None, + allow_missing_state: false, + import_existing: false, + }; + + Ok((block_import_params, Default::default())) + } + CheckedHeader::Deferred(a, b) => { + debug!(target: "babe", "Checking {:?} failed; {:?}, {:?}.", hash, a, b); + telemetry!(CONSENSUS_DEBUG; "babe.header_too_far_in_future"; + "hash" => ?hash, "a" => ?a, "b" => ?b + ); + Err(Error::::TooFarInFuture(hash).into()) + } + } Ok((block_import_params, Default::default())) } @@ -314,10 +485,27 @@ impl Verifier for SassafrasVerifier = BasicQueue; +/// Register the Sassafras inherent data provider, if not registered already. +fn register_sassafras_inherent_data_provider( + inherent_data_providers: &InherentDataProviders, + slot_duration: u64, +) -> Result<(), sp_consensus::Error> { + debug!(target: "sassafras", "Registering"); + if !inherent_data_providers.has_provider(&sp_consensus_sassafras::inherents::INHERENT_IDENTIFIER) { + inherent_data_providers + .register_provider(sp_consensus_sassafras::inherents::InherentDataProvider::new(slot_duration)) + .map_err(Into::into) + .map_err(sp_consensus::Error::InherentData) + } else { + Ok(()) + } +} + pub struct SassafrasBlockImport { inner: I, client: Arc>, api: Arc, + link: } impl BlockImport for @@ -360,23 +548,6 @@ where } } -#[derive(Default, Clone)] -struct TimeSource(Arc, Vec<(Instant, u64)>)>>); - -impl SlotCompatible for TimeSource { - fn extract_timestamp_and_slot( - &self, - data: &InherentData, - ) -> Result<(TimestampInherent, u64, std::time::Duration), sp_consensus::Error> { - trace!(target: "babe", "extract timestamp"); - data.timestamp_inherent_data() - .and_then(|t| data.sassafras_inherent_data().map(|a| (t, a))) - .map_err(Into::into) - .map_err(sp_consensus::Error::InherentData) - .map(|(x, y)| (x, y, self.0.lock().0.take().unwrap_or_default())) - } -} - fn find_pre_digest( header: &B::Header, ) -> Result> { diff --git a/primitives/consensus/sassafras/Cargo.toml b/primitives/consensus/sassafras/Cargo.toml index 1e7bbb5d6bf4a..8586b99478a5e 100644 --- a/primitives/consensus/sassafras/Cargo.toml +++ b/primitives/consensus/sassafras/Cargo.toml @@ -10,6 +10,8 @@ codec = { package = "parity-scale-codec", version = "1.0.0", default-features = schnorrkel = { version = "0.8.5", features = ["preaudit_deprecated"], optional = true } sp-std = { path = "../../std", default-features = false } sp-core = { path = "../../core", default-features = false } +sp-api = { version = "2.0.0", default-features = false, path = "../../api" } +sp-consensus = { version = "0.8", optional = true, path = "../common" } sp-inherents = { path = "../../inherents", default-features = false } sp-timestamp = { path = "../../timestamp", default-features = false } sp-runtime = { path = "../../runtime", default-features = false } @@ -22,6 +24,8 @@ std = [ "schnorrkel", "sp-std/std", "sp-core/std", + "sp-api/std", + "sp-consensus", "sp-inherents/std", "sp-timestamp/std", "sp-runtime/std", diff --git a/primitives/consensus/sassafras/src/digest.rs b/primitives/consensus/sassafras/src/digests.rs similarity index 100% rename from primitives/consensus/sassafras/src/digest.rs rename to primitives/consensus/sassafras/src/digests.rs diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index 8b2becbf05c38..c55194f0f243a 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -20,7 +20,7 @@ // #![forbid(unsafe_code, missing_docs, unused_variables, unused_imports)] #![cfg_attr(not(feature = "std"), no_std)] -pub mod digest; +pub mod digests; pub mod inherents; mod vrf; @@ -29,7 +29,8 @@ pub use crate::vrf::{ RawVRFProof, VRFProof, Randomness, }; -use sp_runtime::ConsensusEngineId; +use sp_runtime::{ConsensusEngineId, RuntimeDebug}; +use codec::{Encode, Decode}; mod app { use sp_application_crypto::{app_crypto, key_types::SASSAFRAS, sr25519}; @@ -74,3 +75,42 @@ pub type SassafrasAuthorityWeight = u64; /// The weight of a Sassafras block. pub type SassafrasBlockWeight = u32; + +/// Configuration data used by the Sassafras consensus engine. +#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] +pub struct SassafrasConfiguration { + /// The slot duration in milliseconds for Sassafras. + pub slot_duration: u64, + + /// The duration of epochs in slots. + pub epoch_length: SlotNumber, + + /// The authorities for the genesis epoch. + pub genesis_authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, + + /// The proofs for genesis epoch. + pub genesis_proofs: Vec, + + /// The randomness for the genesis epoch. + pub randomness: [u8; VRF_OUTPUT_LENGTH], +} + +#[cfg(feature = "std")] +impl sp_consensus::SlotData for SassafrasConfiguration { + fn slot_duration(&self) -> u64 { + self.slot_duration + } + + const SLOT_KEY: &'static [u8] = b"sassafras_configuration"; +} + +sp_api::decl_runtime_apis! { + /// API necessary for block authorship with Sassafras. + pub trait SassafrasApi { + /// Return the configuration for Sassafras. + fn configuration() -> SassafrasConfiguration; + + /// Return the proofs appended at the current block. + fn proofs() -> Vec; + } +} diff --git a/primitives/consensus/sassafras/src/vrf.rs b/primitives/consensus/sassafras/src/vrf.rs index 2e36b773f6080..93c160ae46e4d 100644 --- a/primitives/consensus/sassafras/src/vrf.rs +++ b/primitives/consensus/sassafras/src/vrf.rs @@ -1,10 +1,13 @@ +use core::convert::TryFrom; use codec::{Encode, Decode, EncodeLike}; use schnorrkel::{SignatureError, errors::MultiSignatureStage}; use sp_std::ops::{Deref, DerefMut}; +use sp_runtime::RuntimeDebug; pub use schnorrkel::vrf::{VRF_PROOF_LENGTH, VRF_OUTPUT_LENGTH}; -pub type RawVRFOutput = [u8; VRF_OUTPUT_LENGTH]; +#[derive(Clone, Eq, PartialEq, RuntimeDebug, Encode, Decode)] +pub struct RawVRFOutput(pub [u8; VRF_OUTPUT_LENGTH]); #[cfg(feature = "std")] #[derive(Clone, Debug)] @@ -42,7 +45,39 @@ impl Decode for VRFOutput { } } -pub type RawVRFProof = [u8; VRF_PROOF_LENGTH]; +#[cfg(feature = "std")] +impl TryFrom for VRFOutput { + type Error = SignatureError; + + fn try_from(raw: RawVRFOutput) -> Result { + schnorrkel::vrf::VRFOutput::from_bytes(&raw.0).map(VRFOutput) + } +} + +#[cfg(feature = "std")] +impl From for RawVRFOutput { + fn from(output: VRFOutput) -> RawVRFOutput { + RawVRFOutput(output.to_bytes()) + } +} + +#[derive(Clone, Encode, Decode)] +pub struct RawVRFProof(pub [u8; VRF_PROOF_LENGTH]); + +#[cfg(feature = "std")] +impl std::fmt::Debug for RawVRFProof { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", &self) + } +} + +impl core::cmp::PartialEq for RawVRFProof { + fn eq(&self, other: &Self) -> bool { + self == other + } +} + +impl core::cmp::Eq for RawVRFProof { } #[cfg(feature = "std")] #[derive(Clone, Debug)] @@ -80,6 +115,22 @@ impl Decode for VRFProof { } } +#[cfg(feature = "std")] +impl TryFrom for VRFProof { + type Error = SignatureError; + + fn try_from(raw: RawVRFProof) -> Result { + schnorrkel::vrf::VRFProof::from_bytes(&raw.0).map(VRFProof) + } +} + +#[cfg(feature = "std")] +impl From for RawVRFProof { + fn from(output: VRFProof) -> RawVRFProof { + RawVRFProof(output.to_bytes()) + } +} + #[cfg(feature = "std")] fn convert_error(e: SignatureError) -> codec::Error { use SignatureError::*; From 72e7d7d262523ba737f1ebcedce30f402ba181af Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Sun, 16 Feb 2020 15:08:05 +0100 Subject: [PATCH 31/75] [WIP] Reinit, use BABE as starting point --- client/consensus/sassafras/Cargo.toml | 67 +- client/consensus/sassafras/src/authorship.rs | 218 +++ client/consensus/sassafras/src/aux_schema.rs | 71 +- client/consensus/sassafras/src/lib.rs | 1167 ++++++++++++++--- client/consensus/sassafras/src/tests.rs | 812 ++++++++++++ .../consensus/sassafras/src/verification.rs | 217 +++ 6 files changed, 2306 insertions(+), 246 deletions(-) create mode 100644 client/consensus/sassafras/src/authorship.rs create mode 100644 client/consensus/sassafras/src/tests.rs create mode 100644 client/consensus/sassafras/src/verification.rs diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml index 88194e7c2c043..c36b5216c2d1f 100644 --- a/client/consensus/sassafras/Cargo.toml +++ b/client/consensus/sassafras/Cargo.toml @@ -1,27 +1,58 @@ [package] -name = "sc-consensus-sassafras" -version = "2.0.0" +name = "sc-consensus-babe" +version = "0.8.0" authors = ["Parity Technologies "] -description = "SASSAFRAS consensus algorithm for substrate" +description = "BABE consensus algorithm for substrate" edition = "2018" +license = "GPL-3.0" [dependencies] codec = { package = "parity-scale-codec", version = "1.0.0", features = ["derive"] } +sp-consensus-babe = { version = "0.8", path = "../../../primitives/consensus/babe" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sp-application-crypto = { version = "2.0.0", path = "../../../primitives/application-crypto" } +num-bigint = "0.2.3" +num-rational = "0.2.2" +num-traits = "0.2.8" +sp-version = { version = "2.0.0", path = "../../../primitives/version" } +sp-io = { version = "2.0.0", path = "../../../primitives/io" } +sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } +sp-timestamp = { version = "2.0.0", path = "../../../primitives/timestamp" } +sc-telemetry = { version = "2.0.0", path = "../../telemetry" } +sc-keystore = { version = "2.0.0", path = "../../keystore" } +sc-client-api = { version = "2.0.0", path = "../../api" } +sc-client = { version = "0.8", path = "../../" } +sc-consensus-epochs = { version = "0.8", path = "../epochs" } +sp-api = { version = "2.0.0", path = "../../../primitives/api" } +sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } +sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } +sp-consensus = { version = "0.8", path = "../../../primitives/consensus/common" } +sc-consensus-uncles = { version = "0.8", path = "../uncles" } +sc-consensus-slots = { version = "0.8", path = "../slots" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +fork-tree = { version = "2.0.0", path = "../../../utils/fork-tree" } +futures = "0.3.1" +futures-timer = "3.0.1" +parking_lot = "0.10.0" log = "0.4.8" schnorrkel = { version = "0.8.5", features = ["preaudit_deprecated"] } -derive_more = "0.99.2" -parking_lot = "0.10.0" +rand = "0.7.2" merlin = "1.2.1" -sp-core = { path = "../../../primitives/core" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { path = "../../../primitives/consensus/common" } -sp-consensus-sassafras = { path = "../../../primitives/consensus/sassafras" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-block-builder = { path = "../../../primitives/block-builder" } -sp-inherents = { path = "../../../primitives/inherents" } -sp-timestamp = { path = "../../../primitives/timestamp" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sc-client = { path = "../../" } -sc-client-api = { path = "../../api" } -sc-consensus-slots = { path = "../slots" } -sc-consensus-epochs = { path = "../epochs" } +pdqselect = "0.1.0" +derive_more = "0.99.2" + +[dev-dependencies] +sp-keyring = { version = "2.0.0", path = "../../../primitives/keyring" } +sc-executor = { version = "0.8", path = "../../executor" } +sc-network = { version = "0.8", path = "../../network" } +sc-network-test = { version = "0.8.0", path = "../../network/test" } +sc-service = { version = "0.8", path = "../../service" } +substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } +sc-block-builder = { version = "0.8", path = "../../block-builder" } +tokio = "0.1.22" +env_logger = "0.7.0" +tempfile = "3.1.0" +futures01 = { package = "futures", version = "0.1" } + +[features] +test-helpers = [] diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs new file mode 100644 index 0000000000000..8b28aefa2f77a --- /dev/null +++ b/client/consensus/sassafras/src/authorship.rs @@ -0,0 +1,218 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! BABE authority selection and slot claiming. + +use merlin::Transcript; +use sp_consensus_babe::{ + AuthorityId, BabeAuthorityWeight, BABE_ENGINE_ID, BABE_VRF_PREFIX, + SlotNumber, AuthorityPair, BabeConfiguration +}; +use sp_consensus_babe::digests::PreDigest; +use sp_core::{U256, blake2_256}; +use codec::Encode; +use schnorrkel::vrf::VRFInOut; +use sp_core::Pair; +use sc_keystore::KeyStorePtr; +use super::Epoch; + +/// Calculates the primary selection threshold for a given authority, taking +/// into account `c` (`1 - c` represents the probability of a slot being empty). +pub(super) fn calculate_primary_threshold( + c: (u64, u64), + authorities: &[(AuthorityId, BabeAuthorityWeight)], + authority_index: usize, +) -> u128 { + use num_bigint::BigUint; + use num_rational::BigRational; + use num_traits::{cast::ToPrimitive, identities::One}; + + let c = c.0 as f64 / c.1 as f64; + + let theta = + authorities[authority_index].1 as f64 / + authorities.iter().map(|(_, weight)| weight).sum::() as f64; + + let calc = || { + let p = BigRational::from_float(1f64 - (1f64 - c).powf(theta))?; + let numer = p.numer().to_biguint()?; + let denom = p.denom().to_biguint()?; + ((BigUint::one() << 128) * numer / denom).to_u128() + }; + + calc().unwrap_or(u128::max_value()) +} + +/// Returns true if the given VRF output is lower than the given threshold, +/// false otherwise. +pub(super) fn check_primary_threshold(inout: &VRFInOut, threshold: u128) -> bool { + u128::from_le_bytes(inout.make_bytes::<[u8; 16]>(BABE_VRF_PREFIX)) < threshold +} + +/// Get the expected secondary author for the given slot and with given +/// authorities. This should always assign the slot to some authority unless the +/// authorities list is empty. +pub(super) fn secondary_slot_author( + slot_number: u64, + authorities: &[(AuthorityId, BabeAuthorityWeight)], + randomness: [u8; 32], +) -> Option<&AuthorityId> { + if authorities.is_empty() { + return None; + } + + let rand = U256::from((randomness, slot_number).using_encoded(blake2_256)); + + let authorities_len = U256::from(authorities.len()); + let idx = rand % authorities_len; + + let expected_author = authorities.get(idx.as_u32() as usize) + .expect("authorities not empty; index constrained to list length; \ + this is a valid index; qed"); + + Some(&expected_author.0) +} + +#[allow(deprecated)] +pub(super) fn make_transcript( + randomness: &[u8], + slot_number: u64, + epoch: u64, +) -> Transcript { + let mut transcript = Transcript::new(&BABE_ENGINE_ID); + transcript.commit_bytes(b"slot number", &slot_number.to_le_bytes()); + transcript.commit_bytes(b"current epoch", &epoch.to_le_bytes()); + transcript.commit_bytes(b"chain randomness", randomness); + transcript +} + + +/// Claim a secondary slot if it is our turn to propose, returning the +/// pre-digest to use when authoring the block, or `None` if it is not our turn +/// to propose. +fn claim_secondary_slot( + slot_number: SlotNumber, + authorities: &[(AuthorityId, BabeAuthorityWeight)], + keystore: &KeyStorePtr, + randomness: [u8; 32], +) -> Option<(PreDigest, AuthorityPair)> { + if authorities.is_empty() { + return None; + } + + let expected_author = super::authorship::secondary_slot_author( + slot_number, + authorities, + randomness, + )?; + + let keystore = keystore.read(); + + for (pair, authority_index) in authorities.iter() + .enumerate() + .flat_map(|(i, a)| { + keystore.key_pair::(&a.0).ok().map(|kp| (kp, i)) + }) + { + if pair.public() == *expected_author { + let pre_digest = PreDigest::Secondary { + slot_number, + authority_index: authority_index as u32, + }; + + return Some((pre_digest, pair)); + } + } + + None +} + +/// Tries to claim the given slot number. This method starts by trying to claim +/// a primary VRF based slot. If we are not able to claim it, then if we have +/// secondary slots enabled for the given epoch, we will fallback to trying to +/// claim a secondary slot. +pub(super) fn claim_slot( + slot_number: SlotNumber, + epoch: &Epoch, + config: &BabeConfiguration, + keystore: &KeyStorePtr, +) -> Option<(PreDigest, AuthorityPair)> { + claim_primary_slot(slot_number, epoch, config.c, keystore) + .or_else(|| { + if config.secondary_slots { + claim_secondary_slot( + slot_number, + &epoch.authorities, + keystore, + epoch.randomness, + ) + } else { + None + } + }) +} + +fn get_keypair(q: &AuthorityPair) -> &schnorrkel::Keypair { + use sp_core::crypto::IsWrappedBy; + sp_core::sr25519::Pair::from_ref(q).as_ref() +} + +/// Claim a primary slot if it is our turn. Returns `None` if it is not our turn. +/// This hashes the slot number, epoch, genesis hash, and chain randomness into +/// the VRF. If the VRF produces a value less than `threshold`, it is our turn, +/// so it returns `Some(_)`. Otherwise, it returns `None`. +fn claim_primary_slot( + slot_number: SlotNumber, + epoch: &Epoch, + c: (u64, u64), + keystore: &KeyStorePtr, +) -> Option<(PreDigest, AuthorityPair)> { + let Epoch { authorities, randomness, epoch_index, .. } = epoch; + let keystore = keystore.read(); + + for (pair, authority_index) in authorities.iter() + .enumerate() + .flat_map(|(i, a)| { + keystore.key_pair::(&a.0).ok().map(|kp| (kp, i)) + }) + { + let transcript = super::authorship::make_transcript(randomness, slot_number, *epoch_index); + + // Compute the threshold we will use. + // + // We already checked that authorities contains `key.public()`, so it can't + // be empty. Therefore, this division in `calculate_threshold` is safe. + let threshold = super::authorship::calculate_primary_threshold(c, authorities, authority_index); + + let pre_digest = get_keypair(&pair) + .vrf_sign_after_check(transcript, |inout| super::authorship::check_primary_threshold(inout, threshold)) + .map(|s| { + PreDigest::Primary { + slot_number, + vrf_output: s.0.to_output(), + vrf_proof: s.1, + authority_index: authority_index as u32, + } + }); + + // early exit on first successful claim + if let Some(pre_digest) = pre_digest { + return Some((pre_digest, pair)); + } + } + + None +} diff --git a/client/consensus/sassafras/src/aux_schema.rs b/client/consensus/sassafras/src/aux_schema.rs index 465fa2fe9bb43..2f64157f22951 100644 --- a/client/consensus/sassafras/src/aux_schema.rs +++ b/client/consensus/sassafras/src/aux_schema.rs @@ -1,14 +1,38 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Schema for BABE epoch changes in the aux-db. + use std::sync::Arc; use parking_lot::Mutex; -use codec::{Encode, Decode}; use log::info; -use sp_runtime::traits::Block as BlockT; +use codec::{Decode, Encode}; + +use sc_client_api::backend::AuxStore; use sp_blockchain::{Result as ClientResult, Error as ClientError}; -use sc_client_api::AuxStore; -use sc_consensus_epochs::{SharedEpochChanges, EpochChangesFor}; +use sp_runtime::traits::Block as BlockT; +use sp_consensus_babe::BabeBlockWeight; +use sc_consensus_epochs::{EpochChangesFor, SharedEpochChanges}; use crate::Epoch; -const SASSAFRAS_EPOCH_CHANGES: &[u8] = b"sassafras_epoch_changes"; +const BABE_EPOCH_CHANGES: &[u8] = b"babe_epoch_changes"; + +fn block_weight_key(block_hash: H) -> Vec { + (b"block_weight", block_hash).encode() +} fn load_decode(backend: &B, key: &[u8]) -> ClientResult> where @@ -16,7 +40,7 @@ fn load_decode(backend: &B, key: &[u8]) -> ClientResult> T: Decode, { let corrupt = |e: codec::Error| { - ClientError::Backend(format!("Sassafras DB is corrupted. Decode error: {}", e.what())) + ClientError::Backend(format!("BABE DB is corrupted. Decode error: {}", e.what())) }; match backend.get_aux(key)? { None => Ok(None), @@ -28,13 +52,11 @@ fn load_decode(backend: &B, key: &[u8]) -> ClientResult> pub(crate) fn load_epoch_changes( backend: &B, ) -> ClientResult> { - let epoch_changes = load_decode::<_, EpochChangesFor>( - backend, SASSAFRAS_EPOCH_CHANGES - )? + let epoch_changes = load_decode::<_, EpochChangesFor>(backend, BABE_EPOCH_CHANGES)? .map(|v| Arc::new(Mutex::new(v))) .unwrap_or_else(|| { - info!(target: "sassafras", - "Creating empty Sassafras epoch changes on what appears to be first startup." + info!(target: "babe", + "Creating empty BABE epoch changes on what appears to be first startup." ); SharedEpochChanges::::default() }); @@ -57,6 +79,31 @@ pub(crate) fn write_epoch_changes( { let encoded_epoch_changes = epoch_changes.encode(); write_aux( - &[(SASSAFRAS_EPOCH_CHANGES, encoded_epoch_changes.as_slice())], + &[(BABE_EPOCH_CHANGES, encoded_epoch_changes.as_slice())], ) } + +/// Write the cumulative chain-weight of a block ot aux storage. +pub(crate) fn write_block_weight( + block_hash: H, + block_weight: &BabeBlockWeight, + write_aux: F, +) -> R where + F: FnOnce(&[(Vec, &[u8])]) -> R, +{ + + let key = block_weight_key(block_hash); + block_weight.using_encoded(|s| + write_aux( + &[(key, s)], + ) + ) +} + +/// Load the cumulative chain-weight associated with a block. +pub(crate) fn load_block_weight( + backend: &B, + block_hash: H, +) -> ClientResult> { + load_decode(backend, block_weight_key(block_hash).as_slice()) +} diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 3590fe6fbec2c..af6adbf2f39cd 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -14,89 +14,141 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -//! # Sassafras Consensus - -mod aux_schema; - +//! # BABE (Blind Assignment for Blockchain Extension) +//! +//! BABE is a slot-based block production mechanism which uses a VRF PRNG to +//! randomly perform the slot allocation. On every slot, all the authorities +//! generate a new random number with the VRF function and if it is lower than a +//! given threshold (which is proportional to their weight/stake) they have a +//! right to produce a block. The proof of the VRF function execution will be +//! used by other peer to validate the legitimacy of the slot claim. +//! +//! The engine is also responsible for collecting entropy on-chain which will be +//! used to seed the given VRF PRNG. An epoch is a contiguous number of slots +//! under which we will be using the same authority set. During an epoch all VRF +//! outputs produced as a result of block production will be collected on an +//! on-chain randomness pool. Epoch changes are announced one epoch in advance, +//! i.e. when ending epoch N, we announce the parameters (randomness, +//! authorities, etc.) for epoch N+2. +//! +//! Since the slot assignment is randomized, it is possible that a slot is +//! assigned to multiple validators in which case we will have a temporary fork, +//! or that a slot is assigned to no validator in which case no block is +//! produced. Which means that block times are not deterministic. +//! +//! The protocol has a parameter `c` [0, 1] for which `1 - c` is the probability +//! of a slot being empty. The choice of this parameter affects the security of +//! the protocol relating to maximum tolerable network delays. +//! +//! In addition to the VRF-based slot assignment described above, which we will +//! call primary slots, the engine also supports a deterministic secondary slot +//! assignment. Primary slots take precedence over secondary slots, when +//! authoring the node starts by trying to claim a primary slot and falls back +//! to a secondary slot claim attempt. The secondary slot assignment is done +//! by picking the authority at index: +//! +//! `blake2_256(epoch_randomness ++ slot_number) % authorities_len`. +//! +//! The fork choice rule is weight-based, where weight equals the number of +//! primary blocks in the chain. We will pick the heaviest chain (more primary +//! blocks) and will go with the longest one in case of a tie. +//! +//! An in-depth description and analysis of the protocol can be found here: +//! + +#![forbid(unsafe_code)] +#![warn(missing_docs)] +pub use sp_consensus_babe::{ + BabeApi, ConsensusLog, BABE_ENGINE_ID, SlotNumber, BabeConfiguration, + AuthorityId, AuthorityPair, AuthoritySignature, + BabeAuthorityWeight, VRF_OUTPUT_LENGTH, + digests::{PreDigest, CompatibleDigestItem, NextEpochDescriptor}, +}; +pub use sp_consensus::SyncOracle; use std::{ - sync::Arc, time::{Duration, Instant}, collections::HashMap, convert::TryInto, + collections::HashMap, sync::Arc, u64, pin::Pin, time::{Instant, Duration}, + any::Any, borrow::Cow }; -use log::trace; -use codec::{Encode, Decode}; +use sp_consensus_babe; +use sp_consensus::{ImportResult, CanAuthorWith}; +use sp_consensus::import_queue::{ + BoxJustificationImport, BoxFinalityProofImport, +}; +use sp_runtime::{ + generic::{BlockId, OpaqueDigestItemId}, Justification, + traits::{Block as BlockT, Header, DigestItemFor, Zero}, +}; +use sp_api::{ProvideRuntimeApi, NumberFor}; +use sc_keystore::KeyStorePtr; use parking_lot::Mutex; -use merlin::Transcript; -use sp_core::{H256, crypto::{Pair, Public}}; -use sp_blockchain::{ProvideCache, HeaderMetadata, Result as ClientResult}; -use sp_inherents::InherentData; -use sp_timestamp::{TimestampInherentData, InherentType as TimestampInherent}; +use sp_core::Pair; +use sp_inherents::{InherentDataProviders, InherentData}; +use sc_telemetry::{telemetry, CONSENSUS_TRACE, CONSENSUS_DEBUG}; use sp_consensus::{ - Error as ConsensusError, BlockImportParams, BlockOrigin, ImportResult, - BlockImport, BlockCheckParams, -}; -use sp_consensus::import_queue::{Verifier, CacheKeyId, BasicQueue}; -use sp_consensus_sassafras::{ - SASSAFRAS_ENGINE_ID, AuthorityPair, AuthorityId, Randomness, VRFProof, - SassafrasAuthorityWeight, SlotNumber, SassafrasConfiguration, - SassafrasApi, + self, BlockImport, Environment, Proposer, BlockCheckParams, + ForkChoiceStrategy, BlockImportParams, BlockOrigin, Error as ConsensusError, + SelectChain, SlotData, }; -use sp_consensus_sassafras::digests::{ - NextEpochDescriptor, PostBlockDescriptor, PreDigest, CompatibleDigestItem +use sp_consensus_babe::inherents::BabeInherentData; +use sp_timestamp::{TimestampInherentData, InherentType as TimestampInherent}; +use sp_consensus::import_queue::{Verifier, BasicQueue, CacheKeyId}; +use sc_client_api::{ + backend::{AuxStore, Backend}, + call_executor::CallExecutor, + BlockchainEvents, ProvideUncles, }; -use sp_consensus_sassafras::inherents::SassafrasInherentData; -use sp_runtime::Justification; -use sp_runtime::traits::{Block as BlockT, Header}; -use sp_api::{ApiExt, ProvideRuntimeApi, NumberFor}; +use sc_client::Client; + use sp_block_builder::BlockBuilder as BlockBuilderApi; -use sc_client::{Client, CallExecutor}; -use sc_client_api::backend::{AuxStore, Backend}; + +use futures::prelude::*; +use log::{warn, debug, info, trace}; +use sc_consensus_slots::{ + SlotWorker, SlotInfo, SlotCompatible, StorageChanges, CheckedHeader, check_equivocation, +}; use sc_consensus_epochs::{ - descendent_query, Epoch as EpochT, SharedEpochChanges, ViableEpochDescriptor + descendent_query, SharedEpochChanges, EpochChangesFor, Epoch as EpochT, ViableEpochDescriptor, }; -use sc_consensus_slots::SlotCompatible; +use sp_blockchain::{ + Result as ClientResult, Error as ClientError, + HeaderBackend, ProvideCache, HeaderMetadata +}; +use schnorrkel::SignatureError; +use codec::{Encode, Decode}; +use sp_api::ApiExt; -/// Validator set of a particular epoch, can be either publishing or validating. -#[derive(Debug, Clone, Encode, Decode)] -pub struct ValidatorSet { - /// Proofs of all VRFs collected. - pub proofs: Vec, - /// The authorities and their weights. - pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, - /// Randomness for this epoch. - pub randomness: Randomness, -} +mod aux_schema; +mod verification; +mod authorship; +#[cfg(test)] +mod tests; -/// Epoch data for Sassafras -#[derive(Debug, Clone, Encode, Decode)] +/// BABE epoch information +#[derive(Decode, Encode, Default, PartialEq, Eq, Clone, Debug)] pub struct Epoch { - /// Start slot of the epoch. + /// The epoch index + pub epoch_index: u64, + /// The starting slot of the epoch, pub start_slot: SlotNumber, - /// Duration of this epoch. + /// The duration of this epoch pub duration: SlotNumber, - /// Epoch index. - pub epoch_index: u64, - - /// Publishing validator set. The set will start validating block in the next epoch. - pub publishing: ValidatorSet, - /// Validating validator set. The set validates block in the current epoch. - pub validating: ValidatorSet, + /// The authorities and their weights + pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, + /// Randomness for this epoch + pub randomness: [u8; VRF_OUTPUT_LENGTH], } impl EpochT for Epoch { - type SlotNumber = SlotNumber; type NextEpochDescriptor = NextEpochDescriptor; + type SlotNumber = SlotNumber; fn increment(&self, descriptor: NextEpochDescriptor) -> Epoch { Epoch { epoch_index: self.epoch_index + 1, start_slot: self.start_slot + self.duration, duration: self.duration, - - validating: self.publishing.clone(), - publishing: ValidatorSet { - proofs: Vec::new(), - authorities: descriptor.authorities, - randomness: descriptor.randomness, - }, + authorities: descriptor.authorities, + randomness: descriptor.randomness, } } @@ -111,29 +163,53 @@ impl EpochT for Epoch { #[derive(derive_more::Display, Debug)] enum Error { + #[display(fmt = "Multiple BABE pre-runtime digests, rejecting!")] + MultiplePreRuntimeDigests, + #[display(fmt = "No BABE pre-runtime digest found")] + NoPreRuntimeDigest, + #[display(fmt = "Multiple BABE epoch change digests, rejecting!")] + MultipleEpochChangeDigests, #[display(fmt = "Could not extract timestamp and slot: {:?}", _0)] Extraction(sp_consensus::Error), + #[display(fmt = "Could not fetch epoch at {:?}", _0)] + FetchEpoch(B::Hash), #[display(fmt = "Header {:?} rejected: too far in the future", _0)] TooFarInFuture(B::Hash), #[display(fmt = "Parent ({}) of {} unavailable. Cannot import", _0, _1)] ParentUnavailable(B::Hash, B::Hash), + #[display(fmt = "Slot number must increase: parent slot: {}, this slot: {}", _0, _1)] + SlotNumberMustIncrease(u64, u64), + #[display(fmt = "Header {:?} has a bad seal", _0)] + HeaderBadSeal(B::Hash), + #[display(fmt = "Header {:?} is unsealed", _0)] + HeaderUnsealed(B::Hash), + #[display(fmt = "Slot author not found")] + SlotAuthorNotFound, + #[display(fmt = "Secondary slot assignments are disabled for the current epoch.")] + SecondarySlotAssignmentsDisabled, + #[display(fmt = "Bad signature on {:?}", _0)] + BadSignature(B::Hash), + #[display(fmt = "Invalid author: Expected secondary author: {:?}, got: {:?}.", _0, _1)] + InvalidAuthor(AuthorityId, AuthorityId), + #[display(fmt = "No secondary author expected.")] + NoSecondaryAuthorExpected, + #[display(fmt = "VRF verification of block by author {:?} failed: threshold {} exceeded", _0, _1)] + VRFVerificationOfBlockFailed(AuthorityId, u128), + #[display(fmt = "VRF verification failed: {:?}", _0)] + VRFVerificationFailed(SignatureError), #[display(fmt = "Could not fetch parent header: {:?}", _0)] FetchParentHeader(sp_blockchain::Error), - InvalidEpochData, - MultiplePreRuntimeDigest, - NoPreRuntimeDigest, - MultipleNextEpochDescriptor, - MultiplePostBlockDescriptor, - InvalidTicketVRFIndex, - InvalidAuthorityId, - InvalidSeal, - HeaderUnsealed(B::Hash), - TicketVRFVerificationFailed, - PostVRFVerificationFailed, - SlotInPast, - SlotInFuture, - Runtime(sp_inherents::Error), + #[display(fmt = "Expected epoch change to happen at {:?}, s{}", _0, _1)] + ExpectedEpochChange(B::Hash, u64), + #[display(fmt = "Unexpected epoch change")] + UnexpectedEpochChange, + #[display(fmt = "Parent block of {} has no associated weight", _0)] + ParentBlockNoAssociatedWeight(B::Hash), + #[display(fmt = "Checking inherents failed: {}", _0)] + CheckInherents(String), Client(sp_blockchain::Error), + Runtime(sp_inherents::Error), + ForkTree(Box>), } impl std::convert::From> for String { @@ -142,62 +218,420 @@ impl std::convert::From> for String { } } +fn babe_err(error: Error) -> Error { + debug!(target: "babe", "{}", error); + error +} + +macro_rules! babe_info { + ($($i: expr),+) => { + { + info!(target: "babe", $($i),+); + format!($($i),+) + } + }; +} + + /// Intermediate value passed to block importer. -pub struct SassafrasIntermediate { +pub struct BabeIntermediate { /// The epoch descriptor. pub epoch_descriptor: ViableEpochDescriptor, Epoch>, } -/// Intermediate key for Sassafras engine. -pub static INTERMEDIATE_KEY: &[u8] = b"sassafras1"; +/// Intermediate key for Babe engine. +pub static INTERMEDIATE_KEY: &[u8] = b"babe1"; -/// Configuration for Sassafras. +/// A slot duration. Create with `get_or_compute`. +// FIXME: Once Rust has higher-kinded types, the duplication between this +// and `super::babe::Config` can be eliminated. +// https://github.com/paritytech/substrate/issues/2434 #[derive(Clone)] -pub struct Config(sc_consensus_slots::SlotDuration); +pub struct Config(sc_consensus_slots::SlotDuration); impl Config { /// Either fetch the slot duration from disk or compute it from the genesis /// state. pub fn get_or_compute(client: &C) -> ClientResult where - C: AuxStore + ProvideRuntimeApi, C::Api: SassafrasApi, + C: AuxStore + ProvideRuntimeApi, C::Api: BabeApi, { - sc_consensus_slots::SlotDuration::get_or_compute(client, |a, b| a.configuration(b)).map(Self) + trace!(target: "babe", "Getting slot duration"); + match sc_consensus_slots::SlotDuration::get_or_compute(client, |a, b| a.configuration(b)).map(Self) { + Ok(s) => Ok(s), + Err(s) => { + warn!(target: "babe", "Failed to get slot duration"); + Err(s) + } + } } - /// Create the genesis epoch (epoch #0) + /// Create the genesis epoch (epoch #0). This is defined to start at the slot of + /// the first block, so that has to be provided. pub fn genesis_epoch(&self, slot_number: SlotNumber) -> Epoch { - let proofs = self.genesis_proofs.clone() - .into_iter() - .map(|p| p.try_into().expect("Genesis proofs are invalid")) - .collect::>(); - Epoch { epoch_index: 0, start_slot: slot_number, duration: self.epoch_length, - - validating: ValidatorSet { - proofs: proofs.clone(), - authorities: self.genesis_authorities.clone(), - randomness: self.randomness.clone(), - }, - publishing: ValidatorSet { - proofs, - authorities: self.genesis_authorities.clone(), - randomness: self.randomness.clone(), - }, + authorities: self.genesis_authorities.clone(), + randomness: self.randomness.clone(), } } } impl std::ops::Deref for Config { - type Target = SassafrasConfiguration; + type Target = BabeConfiguration; - fn deref(&self) -> &SassafrasConfiguration { + fn deref(&self) -> &BabeConfiguration { &*self.0 } } +/// Parameters for BABE. +pub struct BabeParams { + /// The keystore that manages the keys of the node. + pub keystore: KeyStorePtr, + + /// The client to use + pub client: Arc, + + /// The SelectChain Strategy + pub select_chain: SC, + + /// The environment we are producing blocks for. + pub env: E, + + /// The underlying block-import object to supply our produced blocks to. + /// This must be a `BabeBlockImport` or a wrapper of it, otherwise + /// critical consensus logic will be omitted. + pub block_import: I, + + /// A sync oracle + pub sync_oracle: SO, + + /// Providers for inherent data. + pub inherent_data_providers: InherentDataProviders, + + /// Force authoring of blocks even if we are offline + pub force_authoring: bool, + + /// The source of timestamps for relative slots + pub babe_link: BabeLink, + + /// Checks if the current native implementation can author with a runtime at a given block. + pub can_author_with: CAW, +} + +/// Start the babe worker. The returned future should be run in a tokio runtime. +pub fn start_babe(BabeParams { + keystore, + client, + select_chain, + env, + block_import, + sync_oracle, + inherent_data_providers, + force_authoring, + babe_link, + can_author_with, +}: BabeParams) -> Result< + impl futures::Future, + sp_consensus::Error, +> where + B: BlockT, + C: ProvideRuntimeApi + ProvideCache + ProvideUncles + BlockchainEvents + + HeaderBackend + HeaderMetadata + Send + Sync + 'static, + C::Api: BabeApi, + SC: SelectChain + 'static, + E: Environment + Send + Sync, + E::Proposer: Proposer>, + I: BlockImport> + Send + + Sync + 'static, + Error: std::error::Error + Send + From + From + 'static, + SO: SyncOracle + Send + Sync + Clone, + CAW: CanAuthorWith + Send, +{ + let config = babe_link.config; + let worker = BabeWorker { + client: client.clone(), + block_import: Arc::new(Mutex::new(block_import)), + env, + sync_oracle: sync_oracle.clone(), + force_authoring, + keystore, + epoch_changes: babe_link.epoch_changes.clone(), + config: config.clone(), + }; + + register_babe_inherent_data_provider(&inherent_data_providers, config.slot_duration())?; + sc_consensus_uncles::register_uncles_inherent_data_provider( + client.clone(), + select_chain.clone(), + &inherent_data_providers, + )?; + + babe_info!("Starting BABE Authorship worker"); + Ok(sc_consensus_slots::start_slot_worker( + config.0, + select_chain, + worker, + sync_oracle, + inherent_data_providers, + babe_link.time_source, + can_author_with, + )) +} + +struct BabeWorker { + client: Arc, + block_import: Arc>, + env: E, + sync_oracle: SO, + force_authoring: bool, + keystore: KeyStorePtr, + epoch_changes: SharedEpochChanges, + config: Config, +} + +impl sc_consensus_slots::SimpleSlotWorker for BabeWorker where + B: BlockT, + C: ProvideRuntimeApi + + ProvideCache + + HeaderBackend + + HeaderMetadata, + C::Api: BabeApi, + E: Environment, + E::Proposer: Proposer>, + I: BlockImport> + Send + Sync + 'static, + SO: SyncOracle + Send + Clone, + Error: std::error::Error + Send + From + From + 'static, +{ + type EpochData = ViableEpochDescriptor, Epoch>; + type Claim = (PreDigest, AuthorityPair); + type SyncOracle = SO; + type CreateProposer = Pin> + Send + 'static + >>; + type Proposer = E::Proposer; + type BlockImport = I; + + fn logging_target(&self) -> &'static str { + "babe" + } + + fn block_import(&self) -> Arc> { + self.block_import.clone() + } + + fn epoch_data( + &self, + parent: &B::Header, + slot_number: u64, + ) -> Result { + self.epoch_changes.lock().epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent.hash(), + parent.number().clone(), + slot_number, + ) + .map_err(|e| ConsensusError::ChainLookup(format!("{:?}", e)))? + .ok_or(sp_consensus::Error::InvalidAuthoritiesSet) + } + + fn authorities_len(&self, epoch_descriptor: &Self::EpochData) -> Option { + self.epoch_changes.lock() + .viable_epoch(&epoch_descriptor, |slot| self.config.genesis_epoch(slot)) + .map(|epoch| epoch.as_ref().authorities.len()) + } + + fn claim_slot( + &self, + _parent_header: &B::Header, + slot_number: SlotNumber, + epoch_descriptor: &ViableEpochDescriptor, Epoch>, + ) -> Option { + debug!(target: "babe", "Attempting to claim slot {}", slot_number); + let s = authorship::claim_slot( + slot_number, + self.epoch_changes.lock().viable_epoch( + &epoch_descriptor, + |slot| self.config.genesis_epoch(slot) + )?.as_ref(), + &*self.config, + &self.keystore, + ); + + if let Some(_) = s { + debug!(target: "babe", "Claimed slot {}", slot_number); + } + + s + } + + fn pre_digest_data( + &self, + _slot_number: u64, + claim: &Self::Claim, + ) -> Vec> { + vec![ + as CompatibleDigestItem>::babe_pre_digest(claim.0.clone()), + ] + } + + fn block_import_params(&self) -> Box, + StorageChanges, + Self::Claim, + Self::EpochData, + ) -> sp_consensus::BlockImportParams + Send> { + Box::new(|header, header_hash, body, storage_changes, (_, pair), epoch_descriptor| { + // sign the pre-sealed hash of the block and then + // add it to a digest item. + let signature = pair.sign(header_hash.as_ref()); + let digest_item = as CompatibleDigestItem>::babe_seal(signature); + + BlockImportParams { + origin: BlockOrigin::Own, + header, + justification: None, + post_digests: vec![digest_item], + body: Some(body), + storage_changes: Some(storage_changes), + finalized: false, + auxiliary: Vec::new(), // block-weight is written in block import. + intermediates: { + let mut intermediates = HashMap::new(); + intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, + ); + intermediates + }, + fork_choice: None, + allow_missing_state: false, + import_existing: false, + } + }) + } + + fn force_authoring(&self) -> bool { + self.force_authoring + } + + fn sync_oracle(&mut self) -> &mut Self::SyncOracle { + &mut self.sync_oracle + } + + fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer { + Box::pin(self.env.init(block).map_err(|e| { + sp_consensus::Error::ClientImport(format!("{:?}", e)) + })) + } + + fn proposing_remaining_duration( + &self, + head: &B::Header, + slot_info: &SlotInfo + ) -> Option { + // never give more than 2^this times the lenience. + const BACKOFF_CAP: u64 = 8; + + // how many slots it takes before we double the lenience. + const BACKOFF_STEP: u64 = 2; + + let slot_remaining = self.slot_remaining_duration(slot_info); + let parent_slot = match find_pre_digest::(head) { + Err(_) => return Some(slot_remaining), + Ok(d) => d.slot_number(), + }; + + // we allow a lenience of the number of slots since the head of the + // chain was produced, minus 1 (since there is always a difference of at least 1) + // + // exponential back-off. + // in normal cases we only attempt to issue blocks up to the end of the slot. + // when the chain has been stalled for a few slots, we give more lenience. + let slot_lenience = slot_info.number.saturating_sub(parent_slot + 1); + + let slot_lenience = std::cmp::min(slot_lenience, BACKOFF_CAP); + let slot_duration = slot_info.duration << (slot_lenience / BACKOFF_STEP); + + if slot_lenience >= 1 { + debug!(target: "babe", "No block for {} slots. Applying 2^({}/{}) lenience", + slot_lenience, slot_lenience, BACKOFF_STEP); + } + + let slot_lenience = Duration::from_secs(slot_duration); + Some(slot_lenience + slot_remaining) + } +} + +impl SlotWorker for BabeWorker where + B: BlockT, + C: ProvideRuntimeApi + + ProvideCache + + HeaderBackend + + HeaderMetadata + Send + Sync, + C::Api: BabeApi, + E: Environment + Send + Sync, + E::Proposer: Proposer>, + I: BlockImport> + Send + Sync + 'static, + SO: SyncOracle + Send + Sync + Clone, + Error: std::error::Error + Send + From + From + 'static, +{ + type OnSlot = Pin> + Send>>; + + fn on_slot(&mut self, chain_head: B::Header, slot_info: SlotInfo) -> Self::OnSlot { + >::on_slot(self, chain_head, slot_info) + } +} + +/// Extract the BABE pre digest from the given header. Pre-runtime digests are +/// mandatory, the function will return `Err` if none is found. +fn find_pre_digest(header: &B::Header) -> Result> +{ + // genesis block doesn't contain a pre digest so let's generate a + // dummy one to not break any invariants in the rest of the code + if header.number().is_zero() { + return Ok(PreDigest::Secondary { + slot_number: 0, + authority_index: 0, + }); + } + + let mut pre_digest: Option<_> = None; + for log in header.digest().logs() { + trace!(target: "babe", "Checking log {:?}, looking for pre runtime digest", log); + match (log.as_babe_pre_digest(), pre_digest.is_some()) { + (Some(_), true) => return Err(babe_err(Error::MultiplePreRuntimeDigests)), + (None, _) => trace!(target: "babe", "Ignoring digest not meant for us"), + (s, false) => pre_digest = s, + } + } + pre_digest.ok_or_else(|| babe_err(Error::NoPreRuntimeDigest)) +} + +/// Extract the BABE epoch change digest from the given header, if it exists. +fn find_next_epoch_digest(header: &B::Header) + -> Result, Error> + where DigestItemFor: CompatibleDigestItem, +{ + let mut epoch_digest: Option<_> = None; + for log in header.digest().logs() { + trace!(target: "babe", "Checking log {:?}, looking for epoch change digest.", log); + let log = log.try_to::(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID)); + match (log, epoch_digest.is_some()) { + (Some(ConsensusLog::NextEpochData(_)), true) => return Err(babe_err(Error::MultipleEpochChangeDigests)), + (Some(ConsensusLog::NextEpochData(epoch)), false) => epoch_digest = Some(epoch), + _ => trace!(target: "babe", "Ignoring digest not meant for us"), + } + } + + Ok(epoch_digest) +} + + #[derive(Default, Clone)] struct TimeSource(Arc, Vec<(Instant, u64)>)>>); @@ -206,9 +640,9 @@ impl SlotCompatible for TimeSource { &self, data: &InherentData, ) -> Result<(TimestampInherent, u64, std::time::Duration), sp_consensus::Error> { - trace!(target: "sassafras", "extract timestamp"); + trace!(target: "babe", "extract timestamp"); data.timestamp_inherent_data() - .and_then(|t| data.sassafras_inherent_data().map(|a| (t, a))) + .and_then(|t| data.babe_inherent_data().map(|a| (t, a))) .map_err(Into::into) .map_err(sp_consensus::Error::InherentData) .map(|(x, y)| (x, y, self.0.lock().0.take().unwrap_or_default())) @@ -217,13 +651,13 @@ impl SlotCompatible for TimeSource { /// State that must be shared between the import queue and the authoring logic. #[derive(Clone)] -pub struct SassafrasLink { +pub struct BabeLink { time_source: TimeSource, epoch_changes: SharedEpochChanges, config: Config, } -impl SassafrasLink { +impl BabeLink { /// Get the epoch changes of this link. pub fn epoch_changes(&self) -> &SharedEpochChanges { &self.epoch_changes @@ -235,11 +669,14 @@ impl SassafrasLink { } } -pub struct SassafrasVerifier { +/// A verifier for Babe blocks. +pub struct BabeVerifier { client: Arc>, api: Arc, inherent_data_providers: sp_inherents::InherentDataProviders, - link: SassafrasLink, + config: Config, + epoch_changes: SharedEpochChanges, + time_source: TimeSource, } impl BabeVerifier { @@ -314,13 +751,14 @@ fn median_algorithm( } } -impl SassafrasVerifier where - Block: BlockT, +impl Verifier for BabeVerifier where + Block: BlockT, B: Backend + 'static, E: CallExecutor + 'static + Clone + Send + Sync, RA: Send + Sync, PRA: ProvideRuntimeApi + Send + Sync + AuxStore + ProvideCache, - PRA::Api: BlockBuilderApi + SassafrasApi, + PRA::Api: BlockBuilderApi + + BabeApi, { fn verify( &mut self, @@ -328,9 +766,9 @@ impl SassafrasVerifier where header: Block::Header, justification: Option, mut body: Option>, - ) -> Result<(BlockImportParams, Option)>>), Error> { + ) -> Result<(BlockImportParams, Option)>>), String> { trace!( - target: "sassafras", + target: "babe", "Verifying origin: {:?} header: {:?} justification: {:?} body: {:?}", origin, header, @@ -338,36 +776,38 @@ impl SassafrasVerifier where body, ); + debug!(target: "babe", "We have {:?} logs in this header", header.digest().logs().len()); let mut inherent_data = self .inherent_data_providers .create_inherent_data() - .map_err(Error::Runtime)?; + .map_err(Error::::Runtime)?; let (_, slot_now, _) = self.time_source.extract_timestamp_and_slot(&inherent_data) - .map_err(Error::Extraction)?; + .map_err(Error::::Extraction)?; let hash = header.hash(); let parent_hash = *header.parent_hash(); let parent_header_metadata = self.client.header_metadata(parent_hash) - .map_err(Error::FetchParentHeader)?; + .map_err(Error::::FetchParentHeader)?; - // First, Verify pre-runtime digest. let pre_digest = find_pre_digest::(&header)?; - let mut epoch_changes = self.epoch_changes.lock(); + let epoch_changes = self.epoch_changes.lock(); let epoch_descriptor = epoch_changes.epoch_descriptor_for_child_of( descendent_query(&*self.client), &parent_hash, parent_header_metadata.number, pre_digest.slot_number(), ) - .map_err(|e| Error::ForkTree(Box::new(e)))? - .ok_or_else(|| Error::FetchEpoch(parent_hash))?; - let viable_epoch = epoch_changes.viable_epoch_mut( + .map_err(|e| Error::::ForkTree(Box::new(e)))? + .ok_or_else(|| Error::::FetchEpoch(parent_hash))?; + let viable_epoch = epoch_changes.viable_epoch( &epoch_descriptor, - |slot| self.link.config.genesis_epoch(slot), - ).ok_or_else(|| Error::FetchEpoch(parent_hash))?; + |slot| self.config.genesis_epoch(slot) + ).ok_or_else(|| Error::::FetchEpoch(parent_hash))?; + // We add one to the current slot to allow for some small drift. + // FIXME #1019 in the future, alter this queue to allow deferring of headers let v_params = verification::VerificationParams { header: header.clone(), pre_digest: Some(pre_digest.clone()), @@ -459,41 +899,21 @@ impl SassafrasVerifier where Err(Error::::TooFarInFuture(hash).into()) } } - - Ok((block_import_params, Default::default())) - } -} - -impl Verifier for SassafrasVerifier where - Block: BlockT, - B: Backend + 'static, - E: CallExecutor + 'static + Clone + Send + Sync, - RA: Send + Sync, - PRA: ProvideRuntimeApi + Send + Sync + AuxStore + ProvideCache, - PRA::Api: BlockBuilderApi, -{ - fn verify( - &mut self, - origin: BlockOrigin, - mut header: Block::Header, - justification: Option, - mut body: Option>, - ) -> Result<(BlockImportParams, Option)>>), String> { - self.verify(origin, header, justification, body).map_err(Into::into) } } -pub type SassafrasImportQueue = BasicQueue; +/// The BABE import queue type. +pub type BabeImportQueue = BasicQueue; -/// Register the Sassafras inherent data provider, if not registered already. -fn register_sassafras_inherent_data_provider( +/// Register the babe inherent data provider, if not registered already. +fn register_babe_inherent_data_provider( inherent_data_providers: &InherentDataProviders, slot_duration: u64, ) -> Result<(), sp_consensus::Error> { - debug!(target: "sassafras", "Registering"); - if !inherent_data_providers.has_provider(&sp_consensus_sassafras::inherents::INHERENT_IDENTIFIER) { + debug!(target: "babe", "Registering"); + if !inherent_data_providers.has_provider(&sp_consensus_babe::inherents::INHERENT_IDENTIFIER) { inherent_data_providers - .register_provider(sp_consensus_sassafras::inherents::InherentDataProvider::new(slot_duration)) + .register_provider(sp_consensus_babe::inherents::InherentDataProvider::new(slot_duration)) .map_err(Into::into) .map_err(sp_consensus::Error::InherentData) } else { @@ -501,16 +921,54 @@ fn register_sassafras_inherent_data_provider( } } -pub struct SassafrasBlockImport { +/// A block-import handler for BABE. +/// +/// This scans each imported block for epoch change signals. The signals are +/// tracked in a tree (of all forks), and the import logic validates all epoch +/// change transitions, i.e. whether a given epoch change is expected or whether +/// it is missing. +/// +/// The epoch change tree should be pruned as blocks are finalized. +pub struct BabeBlockImport { inner: I, client: Arc>, api: Arc, - link: + epoch_changes: SharedEpochChanges, + config: Config, } -impl BlockImport for - SassafrasBlockImport -where +impl Clone for BabeBlockImport { + fn clone(&self) -> Self { + BabeBlockImport { + inner: self.inner.clone(), + client: self.client.clone(), + api: self.api.clone(), + epoch_changes: self.epoch_changes.clone(), + config: self.config.clone(), + } + } +} + +impl BabeBlockImport { + fn new( + client: Arc>, + api: Arc, + epoch_changes: SharedEpochChanges, + block_import: I, + config: Config, + ) -> Self { + BabeBlockImport { + client, + api, + inner: block_import, + epoch_changes, + config, + } + } +} + +impl BlockImport for BabeBlockImport where + Block: BlockT, I: BlockImport> + Send + Sync, I::Error: Into, B: Backend + 'static, @@ -518,7 +976,7 @@ where Client: AuxStore, RA: Send + Sync, PRA: ProvideRuntimeApi + ProvideCache, - PRA::Api: ApiExt, + PRA::Api: BabeApi + ApiExt, { type Error = ConsensusError; type Transaction = sp_api::TransactionFor; @@ -529,14 +987,199 @@ where new_cache: HashMap>, ) -> Result { let hash = block.post_header().hash(); - let parent_hash = *block.header.parent_hash(); let number = block.header.number().clone(); - // let pre_digest = find_pre_digest::(&block.header)?; - // TODO: Verify that the slot is increasing, and not in the future. + // early exit if block already in chain, otherwise the check for + // epoch changes will error when trying to re-import an epoch change + match self.client.status(BlockId::Hash(hash)) { + Ok(sp_blockchain::BlockStatus::InChain) => return Ok(ImportResult::AlreadyInChain), + Ok(sp_blockchain::BlockStatus::Unknown) => {}, + Err(e) => return Err(ConsensusError::ClientImport(e.to_string())), + } + + let pre_digest = find_pre_digest::(&block.header) + .expect("valid babe headers must contain a predigest; \ + header has been already verified; qed"); + let slot_number = pre_digest.slot_number(); + + let parent_hash = *block.header.parent_hash(); + let parent_header = self.client.header(&BlockId::Hash(parent_hash)) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? + .ok_or_else(|| ConsensusError::ChainLookup(babe_err( + Error::::ParentUnavailable(parent_hash, hash) + ).into()))?; + + let parent_slot = find_pre_digest::(&parent_header) + .map(|d| d.slot_number()) + .expect("parent is non-genesis; valid BABE headers contain a pre-digest; \ + header has already been verified; qed"); + + // make sure that slot number is strictly increasing + if slot_number <= parent_slot { + return Err( + ConsensusError::ClientImport(babe_err( + Error::::SlotNumberMustIncrease(parent_slot, slot_number) + ).into()) + ); + } + + let mut epoch_changes = self.epoch_changes.lock(); + + // check if there's any epoch change expected to happen at this slot. + // `epoch` is the epoch to verify the block under, and `first_in_epoch` is true + // if this is the first block in its chain for that epoch. + // + // also provides the total weight of the chain, including the imported block. + let (epoch_descriptor, first_in_epoch, parent_weight) = { + let parent_weight = if *parent_header.number() == Zero::zero() { + 0 + } else { + aux_schema::load_block_weight(&*self.client, parent_hash) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + .ok_or_else(|| ConsensusError::ClientImport( + babe_err(Error::::ParentBlockNoAssociatedWeight(hash)).into() + ))? + }; + + let intermediate = block.take_intermediate::>( + INTERMEDIATE_KEY + )?; + + let epoch_descriptor = intermediate.epoch_descriptor; + let first_in_epoch = parent_slot < epoch_descriptor.start_slot(); + (epoch_descriptor, first_in_epoch, parent_weight) + }; + + let total_weight = parent_weight + pre_digest.added_weight(); + + // search for this all the time so we can reject unexpected announcements. + let next_epoch_digest = find_next_epoch_digest::(&block.header) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + + match (first_in_epoch, next_epoch_digest.is_some()) { + (true, true) => {}, + (false, false) => {}, + (true, false) => { + return Err( + ConsensusError::ClientImport( + babe_err(Error::::ExpectedEpochChange(hash, slot_number)).into(), + ) + ); + }, + (false, true) => { + return Err(ConsensusError::ClientImport(Error::::UnexpectedEpochChange.into())); + }, + } + + // if there's a pending epoch we'll save the previous epoch changes here + // this way we can revert it if there's any error + let mut old_epoch_changes = None; + + let info = self.client.chain_info(); + + if let Some(next_epoch_descriptor) = next_epoch_digest { + old_epoch_changes = Some(epoch_changes.clone()); + + let viable_epoch = epoch_changes.viable_epoch( + &epoch_descriptor, + |slot| self.config.genesis_epoch(slot), + ).ok_or_else(|| { + ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) + })?; + + babe_info!("New epoch {} launching at block {} (block slot {} >= start slot {}).", + viable_epoch.as_ref().epoch_index, + hash, + slot_number, + viable_epoch.as_ref().start_slot); + + let next_epoch = viable_epoch.increment(next_epoch_descriptor); + + babe_info!("Next epoch starts at slot {}", next_epoch.as_ref().start_slot); + + // prune the tree of epochs not part of the finalized chain or + // that are not live anymore, and then track the given epoch change + // in the tree. + // NOTE: it is important that these operations are done in this + // order, otherwise if pruning after import the `is_descendent_of` + // used by pruning may not know about the block that is being + // imported. + let prune_and_import = || { + prune_finalized( + &self.client, + &mut epoch_changes, + )?; + + epoch_changes.import( + descendent_query(&*self.client), + hash, + number, + *block.header.parent_hash(), + next_epoch, + ).map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))?; + + Ok(()) + }; + + if let Err(e) = prune_and_import() { + debug!(target: "babe", "Failed to launch next epoch: {:?}", e); + *epoch_changes = old_epoch_changes.expect("set `Some` above and not taken; qed"); + return Err(e); + } + + crate::aux_schema::write_epoch_changes::( + &*epoch_changes, + |insert| block.auxiliary.extend( + insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) + ) + ); + } + + aux_schema::write_block_weight( + hash, + &total_weight, + |values| block.auxiliary.extend( + values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) + ), + ); + + // The fork choice rule is that we pick the heaviest chain (i.e. + // more primary blocks), if there's a tie we go with the longest + // chain. + block.fork_choice = { + let (last_best, last_best_number) = (info.best_hash, info.best_number); + + let last_best_weight = if &last_best == block.header.parent_hash() { + // the parent=genesis case is already covered for loading parent weight, + // so we don't need to cover again here. + parent_weight + } else { + aux_schema::load_block_weight(&*self.client, last_best) + .map_err(|e| ConsensusError::ChainLookup(format!("{:?}", e)))? + .ok_or_else( + || ConsensusError::ChainLookup(format!("No block weight for parent header.")) + )? + }; + + Some(ForkChoiceStrategy::Custom(if total_weight > last_best_weight { + true + } else if total_weight == last_best_weight { + number > last_best_number + } else { + false + })) + }; let import_result = self.inner.import_block(block, new_cache); + // revert to the original epoch changes in case there's an error + // importing the block + if let Err(_) = import_result { + if let Some(old_epoch_changes) = old_epoch_changes { + *epoch_changes = old_epoch_changes; + } + } + import_result.map_err(Into::into) } @@ -548,70 +1191,162 @@ where } } -fn find_pre_digest( - header: &B::Header, -) -> Result> { - let mut pre_digest = None; - for log in header.digest().logs() { - match (log.as_sassafras_pre_digest(), pre_digest.is_some()) { - (Some(_), true) => return Err(Error::MultiplePreRuntimeDigest), - (None, _) => (), - (s, false) => pre_digest = s, - } - } - pre_digest.ok_or_else(|| Error::NoPreRuntimeDigest) +/// Gets the best finalized block and its slot, and prunes the given epoch tree. +fn prune_finalized( + client: &Client, + epoch_changes: &mut EpochChangesFor, +) -> Result<(), ConsensusError> where + Block: BlockT, + E: CallExecutor + Send + Sync, + B: Backend, + RA: Send + Sync, +{ + let info = client.chain_info(); + + let finalized_slot = { + let finalized_header = client.header(&BlockId::Hash(info.finalized_hash)) + .map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))? + .expect("best finalized hash was given by client; \ + finalized headers must exist in db; qed"); + + find_pre_digest::(&finalized_header) + .expect("finalized header must be valid; \ + valid blocks have a pre-digest; qed") + .slot_number() + }; + + epoch_changes.prune_finalized( + descendent_query(&*client), + &info.finalized_hash, + info.finalized_number, + finalized_slot, + ).map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))?; + + Ok(()) } -fn find_post_block_descriptor( - header: &B::Header, -) -> Result, Error> { - let mut desc = None; - for log in header.digest().logs() { - match (log.as_sassafras_post_block_descriptor(), desc.is_some()) { - (Some(_), true) => return Err(Error::MultiplePostBlockDescriptor), - (None, _) => (), - (s, false) => desc = s, - } - } - Ok(desc) +/// Produce a BABE block-import object to be used later on in the construction of +/// an import-queue. +/// +/// Also returns a link object used to correctly instantiate the import queue +/// and background worker. +pub fn block_import( + config: Config, + wrapped_block_import: I, + client: Arc>, + api: Arc, +) -> ClientResult<(BabeBlockImport, BabeLink)> where + B: Backend, + E: CallExecutor + Send + Sync, + RA: Send + Sync, + Client: AuxStore, +{ + let epoch_changes = aux_schema::load_epoch_changes::(&*client)?; + let link = BabeLink { + epoch_changes: epoch_changes.clone(), + time_source: Default::default(), + config: config.clone(), + }; + + // NOTE: this isn't entirely necessary, but since we didn't use to prune the + // epoch tree it is useful as a migration, so that nodes prune long trees on + // startup rather than waiting until importing the next epoch change block. + prune_finalized( + &client, + &mut epoch_changes.lock(), + )?; + + let import = BabeBlockImport::new( + client, + api, + epoch_changes, + wrapped_block_import, + config, + ); + + Ok((import, link)) } -fn find_next_epoch_descriptor( - header: &B::Header, -) -> Result, Error> { - let mut desc = None; - for log in header.digest().logs() { - match (log.as_sassafras_next_epoch_descriptor(), desc.is_some()) { - (Some(_), true) => return Err(Error::MultipleNextEpochDescriptor), - (None, _) => (), - (s, false) => desc = s, - } - } - Ok(desc) +/// Start an import queue for the BABE consensus algorithm. +/// +/// This method returns the import queue, some data that needs to be passed to the block authoring +/// logic (`BabeLink`), and a future that must be run to +/// completion and is responsible for listening to finality notifications and +/// pruning the epoch changes tree. +/// +/// The block import object provided must be the `BabeBlockImport` or a wrapper +/// of it, otherwise crucial import logic will be omitted. +pub fn import_queue( + babe_link: BabeLink, + block_import: I, + justification_import: Option>, + finality_proof_import: Option>, + client: Arc>, + api: Arc, + inherent_data_providers: InherentDataProviders, +) -> ClientResult>> where + B: Backend + 'static, + I: BlockImport> + + Send + Sync + 'static, + E: CallExecutor + Clone + Send + Sync + 'static, + RA: Send + Sync + 'static, + PRA: ProvideRuntimeApi + ProvideCache + Send + Sync + AuxStore + 'static, + PRA::Api: BlockBuilderApi + BabeApi + ApiExt, +{ + register_babe_inherent_data_provider(&inherent_data_providers, babe_link.config.slot_duration)?; + + let verifier = BabeVerifier { + client: client.clone(), + api, + inherent_data_providers, + config: babe_link.config, + epoch_changes: babe_link.epoch_changes, + time_source: babe_link.time_source, + }; + + Ok(BasicQueue::new( + verifier, + Box::new(block_import), + justification_import, + finality_proof_import, + )) } -fn make_ticket_transcript( - randomness: &[u8], - slot_number: u64, - epoch: u64, -) -> Transcript { - let mut transcript = Transcript::new(&SASSAFRAS_ENGINE_ID); - transcript.append_message(b"type", b"ticket"); - transcript.append_message(b"slot number", &slot_number.to_le_bytes()); - transcript.append_message(b"current epoch", &epoch.to_le_bytes()); - transcript.append_message(b"chain randomness", randomness); - transcript -} - -fn make_post_transcript( - randomness: &[u8], - slot_number: u64, - epoch: u64, -) -> Transcript { - let mut transcript = Transcript::new(&SASSAFRAS_ENGINE_ID); - transcript.append_message(b"type", b"post"); - transcript.append_message(b"slot number", &slot_number.to_le_bytes()); - transcript.append_message(b"current epoch", &epoch.to_le_bytes()); - transcript.append_message(b"chain randomness", randomness); - transcript +/// BABE test helpers. Utility methods for manually authoring blocks. +#[cfg(feature = "test-helpers")] +pub mod test_helpers { + use super::*; + + /// Try to claim the given slot and return a `BabePreDigest` if + /// successful. + pub fn claim_slot( + slot_number: u64, + parent: &B::Header, + client: &C, + keystore: &KeyStorePtr, + link: &BabeLink, + ) -> Option where + B: BlockT, + C: ProvideRuntimeApi + + ProvideCache + + HeaderBackend + + HeaderMetadata, + C::Api: BabeApi, + { + let epoch_changes = link.epoch_changes.lock(); + let epoch = epoch_changes.epoch_data_for_child_of( + descendent_query(client), + &parent.hash(), + parent.number().clone(), + slot_number, + |slot| link.config.genesis_epoch(slot), + ).unwrap().unwrap(); + + authorship::claim_slot( + slot_number, + &epoch, + &link.config, + keystore, + ).map(|(digest, _)| digest) + } } diff --git a/client/consensus/sassafras/src/tests.rs b/client/consensus/sassafras/src/tests.rs new file mode 100644 index 0000000000000..0f0c2f2e471b1 --- /dev/null +++ b/client/consensus/sassafras/src/tests.rs @@ -0,0 +1,812 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! BABE testsuite + +// FIXME #2532: need to allow deprecated until refactor is done +// https://github.com/paritytech/substrate/issues/2532 +#![allow(deprecated)] +use super::*; +use authorship::claim_slot; + +use sp_consensus_babe::{AuthorityPair, SlotNumber}; +use sc_block_builder::BlockBuilder; +use sp_consensus::{ + NoNetwork as DummyOracle, Proposal, RecordProof, + import_queue::{BoxBlockImport, BoxJustificationImport, BoxFinalityProofImport}, +}; +use sc_network_test::*; +use sc_network_test::{Block as TestBlock, PeersClient}; +use sc_network::config::{BoxFinalityProofRequestBuilder, ProtocolConfig}; +use sp_runtime::{generic::DigestItem, traits::{Block as BlockT, DigestFor}}; +use tokio::runtime::current_thread; +use sc_client_api::{BlockchainEvents, backend::TransactionFor}; +use log::debug; +use std::{time::Duration, cell::RefCell}; + +type Item = DigestItem; + +type Error = sp_blockchain::Error; + +type TestClient = sc_client::Client< + substrate_test_runtime_client::Backend, + substrate_test_runtime_client::Executor, + TestBlock, + substrate_test_runtime_client::runtime::RuntimeApi, +>; + +#[derive(Copy, Clone, PartialEq)] +enum Stage { + PreSeal, + PostSeal, +} + +type Mutator = Arc; + +#[derive(Clone)] +struct DummyFactory { + client: Arc, + epoch_changes: SharedEpochChanges, + config: Config, + mutator: Mutator, +} + +struct DummyProposer { + factory: DummyFactory, + parent_hash: Hash, + parent_number: u64, + parent_slot: SlotNumber, +} + +impl Environment for DummyFactory { + type CreateProposer = future::Ready>; + type Proposer = DummyProposer; + type Error = Error; + + fn init(&mut self, parent_header: &::Header) + -> Self::CreateProposer + { + + let parent_slot = crate::find_pre_digest::(parent_header) + .expect("parent header has a pre-digest") + .slot_number(); + + future::ready(Ok(DummyProposer { + factory: self.clone(), + parent_hash: parent_header.hash(), + parent_number: *parent_header.number(), + parent_slot, + })) + } +} + +impl DummyProposer { + fn propose_with(&mut self, pre_digests: DigestFor) + -> future::Ready< + Result< + Proposal< + TestBlock, + sc_client_api::TransactionFor + >, + Error + > + > + { + let block_builder = self.factory.client.new_block_at( + &BlockId::Hash(self.parent_hash), + pre_digests, + false, + ).unwrap(); + + let mut block = match block_builder.build().map_err(|e| e.into()) { + Ok(b) => b.block, + Err(e) => return future::ready(Err(e)), + }; + + let this_slot = crate::find_pre_digest::(block.header()) + .expect("baked block has valid pre-digest") + .slot_number(); + + // figure out if we should add a consensus digest, since the test runtime + // doesn't. + let epoch_changes = self.factory.epoch_changes.lock(); + let epoch = epoch_changes.epoch_data_for_child_of( + descendent_query(&*self.factory.client), + &self.parent_hash, + self.parent_number, + this_slot, + |slot| self.factory.config.genesis_epoch(slot), + ) + .expect("client has data to find epoch") + .expect("can compute epoch for baked block"); + + let first_in_epoch = self.parent_slot < epoch.start_slot; + if first_in_epoch { + // push a `Consensus` digest signalling next change. + // we just reuse the same randomness and authorities as the prior + // epoch. this will break when we add light client support, since + // that will re-check the randomness logic off-chain. + let digest_data = ConsensusLog::NextEpochData(NextEpochDescriptor { + authorities: epoch.authorities.clone(), + randomness: epoch.randomness.clone(), + }).encode(); + let digest = DigestItem::Consensus(BABE_ENGINE_ID, digest_data); + block.header.digest_mut().push(digest) + } + + // mutate the block header according to the mutator. + (self.factory.mutator)(&mut block.header, Stage::PreSeal); + + future::ready(Ok(Proposal { block, proof: None, storage_changes: Default::default() })) + } +} + +impl Proposer for DummyProposer { + type Error = Error; + type Transaction = sc_client_api::TransactionFor; + type Proposal = future::Ready, Error>>; + + fn propose( + &mut self, + _: InherentData, + pre_digests: DigestFor, + _: Duration, + _: RecordProof, + ) -> Self::Proposal { + self.propose_with(pre_digests) + } +} + +thread_local! { + static MUTATOR: RefCell = RefCell::new(Arc::new(|_, _|())); +} + +#[derive(Clone)] +struct PanickingBlockImport(B); + +impl> BlockImport for PanickingBlockImport { + type Error = B::Error; + type Transaction = B::Transaction; + + fn import_block( + &mut self, + block: BlockImportParams, + new_cache: HashMap>, + ) -> Result { + Ok(self.0.import_block(block, new_cache).expect("importing block failed")) + } + + fn check_block( + &mut self, + block: BlockCheckParams, + ) -> Result { + Ok(self.0.check_block(block).expect("checking block failed")) + } +} + +pub struct BabeTestNet { + peers: Vec, DummySpecialization>>, +} + +type TestHeader = ::Header; +type TestExtrinsic = ::Extrinsic; + +pub struct TestVerifier { + inner: BabeVerifier< + substrate_test_runtime_client::Backend, + substrate_test_runtime_client::Executor, + TestBlock, + substrate_test_runtime_client::runtime::RuntimeApi, + PeersFullClient, + >, + mutator: Mutator, +} + +impl Verifier for TestVerifier { + /// Verify the given data and return the BlockImportParams and an optional + /// new set of validators to import. If not, err with an Error-Message + /// presented to the User in the logs. + fn verify( + &mut self, + origin: BlockOrigin, + mut header: TestHeader, + justification: Option, + body: Option>, + ) -> Result<(BlockImportParams, Option)>>), String> { + // apply post-sealing mutations (i.e. stripping seal, if desired). + (self.mutator)(&mut header, Stage::PostSeal); + Ok(self.inner.verify(origin, header, justification, body).expect("verification failed!")) + } +} + +pub struct PeerData { + link: BabeLink, + inherent_data_providers: InherentDataProviders, + block_import: Mutex< + Option>> + >, +} + +impl TestNetFactory for BabeTestNet { + type Specialization = DummySpecialization; + type Verifier = TestVerifier; + type PeerData = Option; + + /// Create new test network with peers and given config. + fn from_config(_config: &ProtocolConfig) -> Self { + debug!(target: "babe", "Creating test network from config"); + BabeTestNet { + peers: Vec::new(), + } + } + + fn make_block_import(&self, client: PeersClient) + -> ( + BlockImportAdapter, + Option>, + Option>, + Option>, + Option, + ) + { + let client = client.as_full().expect("only full clients are tested"); + let inherent_data_providers = InherentDataProviders::new(); + + let config = Config::get_or_compute(&*client).expect("config available"); + let (block_import, link) = crate::block_import( + config, + client.clone(), + client.clone(), + client.clone(), + ).expect("can initialize block-import"); + + let block_import = PanickingBlockImport(block_import); + + let data_block_import = Mutex::new( + Some(Box::new(block_import.clone()) as BoxBlockImport<_, _>) + ); + ( + BlockImportAdapter::new_full(block_import), + None, + None, + None, + Some(PeerData { link, inherent_data_providers, block_import: data_block_import }), + ) + } + + fn make_verifier( + &self, + client: PeersClient, + _cfg: &ProtocolConfig, + maybe_link: &Option, + ) + -> Self::Verifier + { + let client = client.as_full().expect("only full clients are used in test"); + trace!(target: "babe", "Creating a verifier"); + + // ensure block import and verifier are linked correctly. + let data = maybe_link.as_ref().expect("babe link always provided to verifier instantiation"); + + TestVerifier { + inner: BabeVerifier { + client: client.clone(), + api: client, + inherent_data_providers: data.inherent_data_providers.clone(), + config: data.link.config.clone(), + epoch_changes: data.link.epoch_changes.clone(), + time_source: data.link.time_source.clone(), + }, + mutator: MUTATOR.with(|m| m.borrow().clone()), + } + } + + fn peer(&mut self, i: usize) -> &mut Peer { + trace!(target: "babe", "Retreiving a peer"); + &mut self.peers[i] + } + + fn peers(&self) -> &Vec> { + trace!(target: "babe", "Retreiving peers"); + &self.peers + } + + fn mut_peers>)>( + &mut self, + closure: F, + ) { + closure(&mut self.peers); + } +} + +#[test] +#[should_panic] +fn rejects_empty_block() { + env_logger::try_init().unwrap(); + let mut net = BabeTestNet::new(3); + let block_builder = |builder: BlockBuilder<_, _, _>| { + builder.build().unwrap().block + }; + net.mut_peers(|peer| { + peer[0].generate_blocks(1, BlockOrigin::NetworkInitialSync, block_builder); + }) +} + +fn run_one_test( + mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static, +) { + let _ = env_logger::try_init(); + let mutator = Arc::new(mutator) as Mutator; + + MUTATOR.with(|m| *m.borrow_mut() = mutator.clone()); + let net = BabeTestNet::new(3); + + let peers = &[ + (0, "//Alice"), + (1, "//Bob"), + (2, "//Charlie"), + ]; + + let net = Arc::new(Mutex::new(net)); + let mut import_notifications = Vec::new(); + let mut runtime = current_thread::Runtime::new().unwrap(); + let mut keystore_paths = Vec::new(); + + for (peer_id, seed) in peers { + let mut net = net.lock(); + let peer = net.peer(*peer_id); + let client = peer.client().as_full().expect("Only full clients are used in tests").clone(); + let select_chain = peer.select_chain().expect("Full client has select_chain"); + + let keystore_path = tempfile::tempdir().expect("Creates keystore path"); + let keystore = sc_keystore::Store::open(keystore_path.path(), None).expect("Creates keystore"); + keystore.write().insert_ephemeral_from_seed::(seed).expect("Generates authority key"); + keystore_paths.push(keystore_path); + + let mut got_own = false; + let mut got_other = false; + + let data = peer.data.as_ref().expect("babe link set up during initialization"); + + let environ = DummyFactory { + client: client.clone(), + config: data.link.config.clone(), + epoch_changes: data.link.epoch_changes.clone(), + mutator: mutator.clone(), + }; + + import_notifications.push( + // run each future until we get one of our own blocks with number higher than 5 + // that was produced locally. + client.import_notification_stream() + .take_while(move |n| future::ready(n.header.number() < &5 || { + if n.origin == BlockOrigin::Own { + got_own = true; + } else { + got_other = true; + } + + // continue until we have at least one block of our own + // and one of another peer. + !(got_own && got_other) + })) + .for_each(|_| future::ready(()) ) + ); + + + runtime.spawn(start_babe(BabeParams { + block_import: data.block_import.lock().take().expect("import set up during init"), + select_chain, + client, + env: environ, + sync_oracle: DummyOracle, + inherent_data_providers: data.inherent_data_providers.clone(), + force_authoring: false, + babe_link: data.link.clone(), + keystore, + can_author_with: sp_consensus::AlwaysCanAuthor, + }).expect("Starts babe").unit_error().compat()); + } + + runtime.spawn(futures01::future::poll_fn(move || { + net.lock().poll(); + Ok::<_, ()>(futures01::Async::NotReady::<()>) + })); + + runtime.block_on(future::join_all(import_notifications) + .unit_error().compat()).unwrap(); +} + +#[test] +fn authoring_blocks() { + run_one_test(|_, _| ()) +} + +#[test] +#[should_panic] +fn rejects_missing_inherent_digest() { + run_one_test(|header: &mut TestHeader, stage| { + let v = std::mem::replace(&mut header.digest_mut().logs, vec![]); + header.digest_mut().logs = v.into_iter() + .filter(|v| stage == Stage::PostSeal || v.as_babe_pre_digest().is_none()) + .collect() + }) +} + +#[test] +#[should_panic] +fn rejects_missing_seals() { + run_one_test(|header: &mut TestHeader, stage| { + let v = std::mem::replace(&mut header.digest_mut().logs, vec![]); + header.digest_mut().logs = v.into_iter() + .filter(|v| stage == Stage::PreSeal || v.as_babe_seal().is_none()) + .collect() + }) +} + +#[test] +#[should_panic] +fn rejects_missing_consensus_digests() { + run_one_test(|header: &mut TestHeader, stage| { + let v = std::mem::replace(&mut header.digest_mut().logs, vec![]); + header.digest_mut().logs = v.into_iter() + .filter(|v| stage == Stage::PostSeal || v.as_next_epoch_descriptor().is_none()) + .collect() + }); +} + +#[test] +fn wrong_consensus_engine_id_rejected() { + let _ = env_logger::try_init(); + let sig = AuthorityPair::generate().0.sign(b""); + let bad_seal: Item = DigestItem::Seal([0; 4], sig.to_vec()); + assert!(bad_seal.as_babe_pre_digest().is_none()); + assert!(bad_seal.as_babe_seal().is_none()) +} + +#[test] +fn malformed_pre_digest_rejected() { + let _ = env_logger::try_init(); + let bad_seal: Item = DigestItem::Seal(BABE_ENGINE_ID, [0; 64].to_vec()); + assert!(bad_seal.as_babe_pre_digest().is_none()); +} + +#[test] +fn sig_is_not_pre_digest() { + let _ = env_logger::try_init(); + let sig = AuthorityPair::generate().0.sign(b""); + let bad_seal: Item = DigestItem::Seal(BABE_ENGINE_ID, sig.to_vec()); + assert!(bad_seal.as_babe_pre_digest().is_none()); + assert!(bad_seal.as_babe_seal().is_some()) +} + +#[test] +fn can_author_block() { + let _ = env_logger::try_init(); + let keystore_path = tempfile::tempdir().expect("Creates keystore path"); + let keystore = sc_keystore::Store::open(keystore_path.path(), None).expect("Creates keystore"); + let pair = keystore.write().insert_ephemeral_from_seed::("//Alice") + .expect("Generates authority pair"); + + let mut i = 0; + let epoch = Epoch { + start_slot: 0, + authorities: vec![(pair.public(), 1)], + randomness: [0; 32], + epoch_index: 1, + duration: 100, + }; + + let mut config = crate::BabeConfiguration { + slot_duration: 1000, + epoch_length: 100, + c: (3, 10), + genesis_authorities: Vec::new(), + randomness: [0; 32], + secondary_slots: true, + }; + + // with secondary slots enabled it should never be empty + match claim_slot(i, &epoch, &config, &keystore) { + None => i += 1, + Some(s) => debug!(target: "babe", "Authored block {:?}", s.0), + } + + // otherwise with only vrf-based primary slots we might need to try a couple + // of times. + config.secondary_slots = false; + loop { + match claim_slot(i, &epoch, &config, &keystore) { + None => i += 1, + Some(s) => { + debug!(target: "babe", "Authored block {:?}", s.0); + break; + } + } + } +} + +// Propose and import a new BABE block on top of the given parent. +fn propose_and_import_block( + parent: &TestHeader, + slot_number: Option, + proposer_factory: &mut DummyFactory, + block_import: &mut BoxBlockImport, +) -> sp_core::H256 { + let mut proposer = futures::executor::block_on(proposer_factory.init(parent)).unwrap(); + + let slot_number = slot_number.unwrap_or_else(|| { + let parent_pre_digest = find_pre_digest::(parent).unwrap(); + parent_pre_digest.slot_number() + 1 + }); + + let pre_digest = sp_runtime::generic::Digest { + logs: vec![ + Item::babe_pre_digest( + PreDigest::Secondary { + authority_index: 0, + slot_number, + }, + ), + ], + }; + + let parent_hash = parent.hash(); + + let mut block = futures::executor::block_on(proposer.propose_with(pre_digest)).unwrap().block; + + let epoch_descriptor = proposer_factory.epoch_changes.lock().epoch_descriptor_for_child_of( + descendent_query(&*proposer_factory.client), + &parent_hash, + *parent.number(), + slot_number, + ).unwrap().unwrap(); + + let seal = { + // sign the pre-sealed hash of the block and then + // add it to a digest item. + let pair = AuthorityPair::from_seed(&[1; 32]); + let pre_hash = block.header.hash(); + let signature = pair.sign(pre_hash.as_ref()); + Item::babe_seal(signature) + }; + + let post_hash = { + block.header.digest_mut().push(seal.clone()); + let h = block.header.hash(); + block.header.digest_mut().pop(); + h + }; + + let import_result = block_import.import_block( + BlockImportParams { + origin: BlockOrigin::Own, + header: block.header, + justification: None, + post_digests: vec![seal], + body: Some(block.extrinsics), + storage_changes: None, + finalized: false, + auxiliary: Vec::new(), + intermediates: { + let mut intermediates = HashMap::new(); + intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, + ); + intermediates + }, + fork_choice: Some(ForkChoiceStrategy::LongestChain), + allow_missing_state: false, + import_existing: false, + }, + Default::default(), + ).unwrap(); + + match import_result { + ImportResult::Imported(_) => {}, + _ => panic!("expected block to be imported"), + } + + post_hash +} + +#[test] +fn importing_block_one_sets_genesis_epoch() { + let mut net = BabeTestNet::new(1); + + let peer = net.peer(0); + let data = peer.data.as_ref().expect("babe link set up during initialization"); + let client = peer.client().as_full().expect("Only full clients are used in tests").clone(); + + let mut proposer_factory = DummyFactory { + client: client.clone(), + config: data.link.config.clone(), + epoch_changes: data.link.epoch_changes.clone(), + mutator: Arc::new(|_, _| ()), + }; + + let mut block_import = data.block_import.lock().take().expect("import set up during init"); + + let genesis_header = client.header(&BlockId::Number(0)).unwrap().unwrap(); + + let block_hash = propose_and_import_block( + &genesis_header, + Some(999), + &mut proposer_factory, + &mut block_import, + ); + + let genesis_epoch = data.link.config.genesis_epoch(999); + + let epoch_changes = data.link.epoch_changes.lock(); + let epoch_for_second_block = epoch_changes.epoch_data_for_child_of( + descendent_query(&*client), + &block_hash, + 1, + 1000, + |slot| data.link.config.genesis_epoch(slot), + ).unwrap().unwrap(); + + assert_eq!(epoch_for_second_block, genesis_epoch); +} + +#[test] +fn importing_epoch_change_block_prunes_tree() { + use sc_client_api::Finalizer; + + let mut net = BabeTestNet::new(1); + + let peer = net.peer(0); + let data = peer.data.as_ref().expect("babe link set up during initialization"); + + let client = peer.client().as_full().expect("Only full clients are used in tests").clone(); + let mut block_import = data.block_import.lock().take().expect("import set up during init"); + let epoch_changes = data.link.epoch_changes.clone(); + + let mut proposer_factory = DummyFactory { + client: client.clone(), + config: data.link.config.clone(), + epoch_changes: data.link.epoch_changes.clone(), + mutator: Arc::new(|_, _| ()), + }; + + // This is just boilerplate code for proposing and importing n valid BABE + // blocks that are built on top of the given parent. The proposer takes care + // of producing epoch change digests according to the epoch duration (which + // is set to 6 slots in the test runtime). + let mut propose_and_import_blocks = |parent_id, n| { + let mut hashes = Vec::new(); + let mut parent_header = client.header(&parent_id).unwrap().unwrap(); + + for _ in 0..n { + let block_hash = propose_and_import_block( + &parent_header, + None, + &mut proposer_factory, + &mut block_import, + ); + hashes.push(block_hash); + parent_header = client.header(&BlockId::Hash(block_hash)).unwrap().unwrap(); + } + + hashes + }; + + // This is the block tree that we're going to use in this test. Each node + // represents an epoch change block, the epoch duration is 6 slots. + // + // *---- F (#7) + // / *------ G (#19) - H (#25) + // / / + // A (#1) - B (#7) - C (#13) - D (#19) - E (#25) + // \ + // *------ I (#25) + + // Create and import the canon chain and keep track of fork blocks (A, C, D) + // from the diagram above. + let canon_hashes = propose_and_import_blocks(BlockId::Number(0), 30); + + // Create the forks + let fork_1 = propose_and_import_blocks(BlockId::Hash(canon_hashes[0]), 10); + let fork_2 = propose_and_import_blocks(BlockId::Hash(canon_hashes[12]), 15); + let fork_3 = propose_and_import_blocks(BlockId::Hash(canon_hashes[18]), 10); + + // We should be tracking a total of 9 epochs in the fork tree + assert_eq!( + epoch_changes.lock().tree().iter().count(), + 9, + ); + + // And only one root + assert_eq!( + epoch_changes.lock().tree().roots().count(), + 1, + ); + + // We finalize block #13 from the canon chain, so on the next epoch + // change the tree should be pruned, to not contain F (#7). + client.finalize_block(BlockId::Hash(canon_hashes[12]), None, false).unwrap(); + propose_and_import_blocks(BlockId::Hash(client.chain_info().best_hash), 7); + + // at this point no hashes from the first fork must exist on the tree + assert!( + !epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_1.contains(h)), + ); + + // but the epoch changes from the other forks must still exist + assert!( + epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_2.contains(h)) + ); + + assert!( + epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_3.contains(h)), + ); + + // finalizing block #25 from the canon chain should prune out the second fork + client.finalize_block(BlockId::Hash(canon_hashes[24]), None, false).unwrap(); + propose_and_import_blocks(BlockId::Hash(client.chain_info().best_hash), 8); + + // at this point no hashes from the second fork must exist on the tree + assert!( + !epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_2.contains(h)), + ); + + // while epoch changes from the last fork should still exist + assert!( + epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_3.contains(h)), + ); +} + +#[test] +#[should_panic] +fn verify_slots_are_strictly_increasing() { + let mut net = BabeTestNet::new(1); + + let peer = net.peer(0); + let data = peer.data.as_ref().expect("babe link set up during initialization"); + + let client = peer.client().as_full().expect("Only full clients are used in tests").clone(); + let mut block_import = data.block_import.lock().take().expect("import set up during init"); + + let mut proposer_factory = DummyFactory { + client: client.clone(), + config: data.link.config.clone(), + epoch_changes: data.link.epoch_changes.clone(), + mutator: Arc::new(|_, _| ()), + }; + + let genesis_header = client.header(&BlockId::Number(0)).unwrap().unwrap(); + + // we should have no issue importing this block + let b1 = propose_and_import_block( + &genesis_header, + Some(999), + &mut proposer_factory, + &mut block_import, + ); + + let b1 = client.header(&BlockId::Hash(b1)).unwrap().unwrap(); + + // we should fail to import this block since the slot number didn't increase. + // we will panic due to the `PanickingBlockImport` defined above. + propose_and_import_block( + &b1, + Some(999), + &mut proposer_factory, + &mut block_import, + ); +} diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs new file mode 100644 index 0000000000000..70418b8aea1e3 --- /dev/null +++ b/client/consensus/sassafras/src/verification.rs @@ -0,0 +1,217 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Verification for BABE headers. +use schnorrkel::vrf::{VRFOutput, VRFProof}; +use sp_runtime::{traits::Header, traits::DigestItemFor}; +use sp_core::{Pair, Public}; +use sp_consensus_babe::{AuthoritySignature, SlotNumber, AuthorityIndex, AuthorityPair, AuthorityId}; +use sp_consensus_babe::digests::{PreDigest, CompatibleDigestItem}; +use sc_consensus_slots::CheckedHeader; +use log::{debug, trace}; +use super::{find_pre_digest, babe_err, Epoch, BlockT, Error}; +use super::authorship::{make_transcript, calculate_primary_threshold, check_primary_threshold, secondary_slot_author}; + +/// BABE verification parameters +pub(super) struct VerificationParams<'a, B: 'a + BlockT> { + /// the header being verified. + pub(super) header: B::Header, + /// the pre-digest of the header being verified. this is optional - if prior + /// verification code had to read it, it can be included here to avoid duplicate + /// work. + pub(super) pre_digest: Option, + /// the slot number of the current time. + pub(super) slot_now: SlotNumber, + /// epoch descriptor of the epoch this block _should_ be under, if it's valid. + pub(super) epoch: &'a Epoch, + /// genesis config of this BABE chain. + pub(super) config: &'a super::Config, +} + +/// Check a header has been signed by the right key. If the slot is too far in +/// the future, an error will be returned. If successful, returns the pre-header +/// and the digest item containing the seal. +/// +/// The seal must be the last digest. Otherwise, the whole header is considered +/// unsigned. This is required for security and must not be changed. +/// +/// This digest item will always return `Some` when used with `as_babe_pre_digest`. +/// +/// The given header can either be from a primary or secondary slot assignment, +/// with each having different validation logic. +pub(super) fn check_header( + params: VerificationParams, +) -> Result>, Error> where + DigestItemFor: CompatibleDigestItem, +{ + let VerificationParams { + mut header, + pre_digest, + slot_now, + epoch, + config, + } = params; + + let authorities = &epoch.authorities; + let pre_digest = pre_digest.map(Ok).unwrap_or_else(|| find_pre_digest::(&header))?; + + trace!(target: "babe", "Checking header"); + let seal = match header.digest_mut().pop() { + Some(x) => x, + None => return Err(babe_err(Error::HeaderUnsealed(header.hash()))), + }; + + let sig = seal.as_babe_seal().ok_or_else(|| { + babe_err(Error::HeaderBadSeal(header.hash())) + })?; + + // the pre-hash of the header doesn't include the seal + // and that's what we sign + let pre_hash = header.hash(); + + if pre_digest.slot_number() > slot_now { + header.digest_mut().push(seal); + return Ok(CheckedHeader::Deferred(header, pre_digest.slot_number())); + } + + let author = match authorities.get(pre_digest.authority_index() as usize) { + Some(author) => author.0.clone(), + None => return Err(babe_err(Error::SlotAuthorNotFound)), + }; + + match &pre_digest { + PreDigest::Primary { vrf_output, vrf_proof, authority_index, slot_number } => { + debug!(target: "babe", "Verifying Primary block"); + + let digest = (vrf_output, vrf_proof, *authority_index, *slot_number); + + check_primary_header::( + pre_hash, + digest, + sig, + &epoch, + config.c, + )?; + }, + PreDigest::Secondary { authority_index, slot_number } if config.secondary_slots => { + debug!(target: "babe", "Verifying Secondary block"); + + let digest = (*authority_index, *slot_number); + + check_secondary_header::( + pre_hash, + digest, + sig, + &epoch, + )?; + }, + _ => { + return Err(babe_err(Error::SecondarySlotAssignmentsDisabled)); + } + } + + let info = VerifiedHeaderInfo { + pre_digest: CompatibleDigestItem::babe_pre_digest(pre_digest), + seal, + author, + }; + Ok(CheckedHeader::Checked(header, info)) +} + +pub(super) struct VerifiedHeaderInfo { + pub(super) pre_digest: DigestItemFor, + pub(super) seal: DigestItemFor, + pub(super) author: AuthorityId, +} + +/// Check a primary slot proposal header. We validate that the given header is +/// properly signed by the expected authority, and that the contained VRF proof +/// is valid. Additionally, the weight of this block must increase compared to +/// its parent since it is a primary block. +fn check_primary_header( + pre_hash: B::Hash, + pre_digest: (&VRFOutput, &VRFProof, AuthorityIndex, SlotNumber), + signature: AuthoritySignature, + epoch: &Epoch, + c: (u64, u64), +) -> Result<(), Error> { + let (vrf_output, vrf_proof, authority_index, slot_number) = pre_digest; + + let author = &epoch.authorities[authority_index as usize].0; + + if AuthorityPair::verify(&signature, pre_hash, &author) { + let (inout, _) = { + let transcript = make_transcript( + &epoch.randomness, + slot_number, + epoch.epoch_index, + ); + + schnorrkel::PublicKey::from_bytes(author.as_slice()).and_then(|p| { + p.vrf_verify(transcript, vrf_output, vrf_proof) + }).map_err(|s| { + babe_err(Error::VRFVerificationFailed(s)) + })? + }; + + let threshold = calculate_primary_threshold( + c, + &epoch.authorities, + authority_index as usize, + ); + + if !check_primary_threshold(&inout, threshold) { + return Err(babe_err(Error::VRFVerificationOfBlockFailed(author.clone(), threshold))); + } + + Ok(()) + } else { + Err(babe_err(Error::BadSignature(pre_hash))) + } +} + +/// Check a secondary slot proposal header. We validate that the given header is +/// properly signed by the expected authority, which we have a deterministic way +/// of computing. Additionally, the weight of this block must stay the same +/// compared to its parent since it is a secondary block. +fn check_secondary_header( + pre_hash: B::Hash, + pre_digest: (AuthorityIndex, SlotNumber), + signature: AuthoritySignature, + epoch: &Epoch, +) -> Result<(), Error> { + let (authority_index, slot_number) = pre_digest; + + // check the signature is valid under the expected authority and + // chain state. + let expected_author = secondary_slot_author( + slot_number, + &epoch.authorities, + epoch.randomness, + ).ok_or_else(|| Error::NoSecondaryAuthorExpected)?; + + let author = &epoch.authorities[authority_index as usize].0; + + if expected_author != author { + return Err(Error::InvalidAuthor(expected_author.clone(), author.clone())); + } + + if AuthorityPair::verify(&signature, pre_hash.as_ref(), author) { + Ok(()) + } else { + Err(Error::BadSignature(pre_hash)) + } +} From eb4332e11c2c0a4d24635191c9a5cd0e6ca27e0e Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Sun, 16 Feb 2020 15:28:44 +0100 Subject: [PATCH 32/75] [WIP] Remove BABE-specific verification code and rename --- client/consensus/sassafras/Cargo.toml | 6 +- client/consensus/sassafras/src/authorship.rs | 187 +--- client/consensus/sassafras/src/aux_schema.rs | 20 +- client/consensus/sassafras/src/lib.rs | 358 +++----- client/consensus/sassafras/src/tests.rs | 812 ------------------ .../consensus/sassafras/src/verification.rs | 155 +--- 6 files changed, 159 insertions(+), 1379 deletions(-) delete mode 100644 client/consensus/sassafras/src/tests.rs diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml index c36b5216c2d1f..285ef35ef08aa 100644 --- a/client/consensus/sassafras/Cargo.toml +++ b/client/consensus/sassafras/Cargo.toml @@ -1,14 +1,14 @@ [package] -name = "sc-consensus-babe" +name = "sc-consensus-sassafras" version = "0.8.0" authors = ["Parity Technologies "] -description = "BABE consensus algorithm for substrate" +description = "Sassafras consensus algorithm for substrate" edition = "2018" license = "GPL-3.0" [dependencies] codec = { package = "parity-scale-codec", version = "1.0.0", features = ["derive"] } -sp-consensus-babe = { version = "0.8", path = "../../../primitives/consensus/babe" } +sp-consensus-sassafras = { version = "0.8", path = "../../../primitives/consensus/sassafras" } sp-core = { version = "2.0.0", path = "../../../primitives/core" } sp-application-crypto = { version = "2.0.0", path = "../../../primitives/application-crypto" } num-bigint = "0.2.3" diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 8b28aefa2f77a..09493f029777d 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -14,14 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -//! BABE authority selection and slot claiming. +//! Sassafras authority selection and slot claiming. use merlin::Transcript; -use sp_consensus_babe::{ - AuthorityId, BabeAuthorityWeight, BABE_ENGINE_ID, BABE_VRF_PREFIX, - SlotNumber, AuthorityPair, BabeConfiguration +use sp_consensus_sassafras::{ + AuthorityId, SassafrasAuthorityWeight, SASSAFRAS_ENGINE_ID, SASSAFRAS_VRF_PREFIX, + SlotNumber, AuthorityPair, SassafrasConfiguration }; -use sp_consensus_babe::digests::PreDigest; +use sp_consensus_sassafras::digests::PreDigest; use sp_core::{U256, blake2_256}; use codec::Encode; use schnorrkel::vrf::VRFInOut; @@ -29,117 +29,6 @@ use sp_core::Pair; use sc_keystore::KeyStorePtr; use super::Epoch; -/// Calculates the primary selection threshold for a given authority, taking -/// into account `c` (`1 - c` represents the probability of a slot being empty). -pub(super) fn calculate_primary_threshold( - c: (u64, u64), - authorities: &[(AuthorityId, BabeAuthorityWeight)], - authority_index: usize, -) -> u128 { - use num_bigint::BigUint; - use num_rational::BigRational; - use num_traits::{cast::ToPrimitive, identities::One}; - - let c = c.0 as f64 / c.1 as f64; - - let theta = - authorities[authority_index].1 as f64 / - authorities.iter().map(|(_, weight)| weight).sum::() as f64; - - let calc = || { - let p = BigRational::from_float(1f64 - (1f64 - c).powf(theta))?; - let numer = p.numer().to_biguint()?; - let denom = p.denom().to_biguint()?; - ((BigUint::one() << 128) * numer / denom).to_u128() - }; - - calc().unwrap_or(u128::max_value()) -} - -/// Returns true if the given VRF output is lower than the given threshold, -/// false otherwise. -pub(super) fn check_primary_threshold(inout: &VRFInOut, threshold: u128) -> bool { - u128::from_le_bytes(inout.make_bytes::<[u8; 16]>(BABE_VRF_PREFIX)) < threshold -} - -/// Get the expected secondary author for the given slot and with given -/// authorities. This should always assign the slot to some authority unless the -/// authorities list is empty. -pub(super) fn secondary_slot_author( - slot_number: u64, - authorities: &[(AuthorityId, BabeAuthorityWeight)], - randomness: [u8; 32], -) -> Option<&AuthorityId> { - if authorities.is_empty() { - return None; - } - - let rand = U256::from((randomness, slot_number).using_encoded(blake2_256)); - - let authorities_len = U256::from(authorities.len()); - let idx = rand % authorities_len; - - let expected_author = authorities.get(idx.as_u32() as usize) - .expect("authorities not empty; index constrained to list length; \ - this is a valid index; qed"); - - Some(&expected_author.0) -} - -#[allow(deprecated)] -pub(super) fn make_transcript( - randomness: &[u8], - slot_number: u64, - epoch: u64, -) -> Transcript { - let mut transcript = Transcript::new(&BABE_ENGINE_ID); - transcript.commit_bytes(b"slot number", &slot_number.to_le_bytes()); - transcript.commit_bytes(b"current epoch", &epoch.to_le_bytes()); - transcript.commit_bytes(b"chain randomness", randomness); - transcript -} - - -/// Claim a secondary slot if it is our turn to propose, returning the -/// pre-digest to use when authoring the block, or `None` if it is not our turn -/// to propose. -fn claim_secondary_slot( - slot_number: SlotNumber, - authorities: &[(AuthorityId, BabeAuthorityWeight)], - keystore: &KeyStorePtr, - randomness: [u8; 32], -) -> Option<(PreDigest, AuthorityPair)> { - if authorities.is_empty() { - return None; - } - - let expected_author = super::authorship::secondary_slot_author( - slot_number, - authorities, - randomness, - )?; - - let keystore = keystore.read(); - - for (pair, authority_index) in authorities.iter() - .enumerate() - .flat_map(|(i, a)| { - keystore.key_pair::(&a.0).ok().map(|kp| (kp, i)) - }) - { - if pair.public() == *expected_author { - let pre_digest = PreDigest::Secondary { - slot_number, - authority_index: authority_index as u32, - }; - - return Some((pre_digest, pair)); - } - } - - None -} - /// Tries to claim the given slot number. This method starts by trying to claim /// a primary VRF based slot. If we are not able to claim it, then if we have /// secondary slots enabled for the given epoch, we will fallback to trying to @@ -147,72 +36,8 @@ fn claim_secondary_slot( pub(super) fn claim_slot( slot_number: SlotNumber, epoch: &Epoch, - config: &BabeConfiguration, - keystore: &KeyStorePtr, -) -> Option<(PreDigest, AuthorityPair)> { - claim_primary_slot(slot_number, epoch, config.c, keystore) - .or_else(|| { - if config.secondary_slots { - claim_secondary_slot( - slot_number, - &epoch.authorities, - keystore, - epoch.randomness, - ) - } else { - None - } - }) -} - -fn get_keypair(q: &AuthorityPair) -> &schnorrkel::Keypair { - use sp_core::crypto::IsWrappedBy; - sp_core::sr25519::Pair::from_ref(q).as_ref() -} - -/// Claim a primary slot if it is our turn. Returns `None` if it is not our turn. -/// This hashes the slot number, epoch, genesis hash, and chain randomness into -/// the VRF. If the VRF produces a value less than `threshold`, it is our turn, -/// so it returns `Some(_)`. Otherwise, it returns `None`. -fn claim_primary_slot( - slot_number: SlotNumber, - epoch: &Epoch, - c: (u64, u64), + config: &SassafrasConfiguration, keystore: &KeyStorePtr, ) -> Option<(PreDigest, AuthorityPair)> { - let Epoch { authorities, randomness, epoch_index, .. } = epoch; - let keystore = keystore.read(); - - for (pair, authority_index) in authorities.iter() - .enumerate() - .flat_map(|(i, a)| { - keystore.key_pair::(&a.0).ok().map(|kp| (kp, i)) - }) - { - let transcript = super::authorship::make_transcript(randomness, slot_number, *epoch_index); - - // Compute the threshold we will use. - // - // We already checked that authorities contains `key.public()`, so it can't - // be empty. Therefore, this division in `calculate_threshold` is safe. - let threshold = super::authorship::calculate_primary_threshold(c, authorities, authority_index); - - let pre_digest = get_keypair(&pair) - .vrf_sign_after_check(transcript, |inout| super::authorship::check_primary_threshold(inout, threshold)) - .map(|s| { - PreDigest::Primary { - slot_number, - vrf_output: s.0.to_output(), - vrf_proof: s.1, - authority_index: authority_index as u32, - } - }); - - // early exit on first successful claim - if let Some(pre_digest) = pre_digest { - return Some((pre_digest, pair)); - } - } - None } diff --git a/client/consensus/sassafras/src/aux_schema.rs b/client/consensus/sassafras/src/aux_schema.rs index 2f64157f22951..26c7b9b61a2f5 100644 --- a/client/consensus/sassafras/src/aux_schema.rs +++ b/client/consensus/sassafras/src/aux_schema.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -//! Schema for BABE epoch changes in the aux-db. +//! Schema for Sassafras epoch changes in the aux-db. use std::sync::Arc; use parking_lot::Mutex; @@ -24,11 +24,11 @@ use codec::{Decode, Encode}; use sc_client_api::backend::AuxStore; use sp_blockchain::{Result as ClientResult, Error as ClientError}; use sp_runtime::traits::Block as BlockT; -use sp_consensus_babe::BabeBlockWeight; +use sp_consensus_sassafras::SassafrasBlockWeight; use sc_consensus_epochs::{EpochChangesFor, SharedEpochChanges}; use crate::Epoch; -const BABE_EPOCH_CHANGES: &[u8] = b"babe_epoch_changes"; +const SASSAFRAS_EPOCH_CHANGES: &[u8] = b"sassafras_epoch_changes"; fn block_weight_key(block_hash: H) -> Vec { (b"block_weight", block_hash).encode() @@ -40,7 +40,7 @@ fn load_decode(backend: &B, key: &[u8]) -> ClientResult> T: Decode, { let corrupt = |e: codec::Error| { - ClientError::Backend(format!("BABE DB is corrupted. Decode error: {}", e.what())) + ClientError::Backend(format!("Sassafras DB is corrupted. Decode error: {}", e.what())) }; match backend.get_aux(key)? { None => Ok(None), @@ -52,11 +52,11 @@ fn load_decode(backend: &B, key: &[u8]) -> ClientResult> pub(crate) fn load_epoch_changes( backend: &B, ) -> ClientResult> { - let epoch_changes = load_decode::<_, EpochChangesFor>(backend, BABE_EPOCH_CHANGES)? + let epoch_changes = load_decode::<_, EpochChangesFor>(backend, SASSAFRAS_EPOCH_CHANGES)? .map(|v| Arc::new(Mutex::new(v))) .unwrap_or_else(|| { - info!(target: "babe", - "Creating empty BABE epoch changes on what appears to be first startup." + info!(target: "sassafras", + "Creating empty Sassafras epoch changes on what appears to be first startup." ); SharedEpochChanges::::default() }); @@ -79,14 +79,14 @@ pub(crate) fn write_epoch_changes( { let encoded_epoch_changes = epoch_changes.encode(); write_aux( - &[(BABE_EPOCH_CHANGES, encoded_epoch_changes.as_slice())], + &[(SASSAFRAS_EPOCH_CHANGES, encoded_epoch_changes.as_slice())], ) } /// Write the cumulative chain-weight of a block ot aux storage. pub(crate) fn write_block_weight( block_hash: H, - block_weight: &BabeBlockWeight, + block_weight: &SassafrasBlockWeight, write_aux: F, ) -> R where F: FnOnce(&[(Vec, &[u8])]) -> R, @@ -104,6 +104,6 @@ pub(crate) fn write_block_weight( pub(crate) fn load_block_weight( backend: &B, block_hash: H, -) -> ClientResult> { +) -> ClientResult> { load_decode(backend, block_weight_key(block_hash).as_slice()) } diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index af6adbf2f39cd..2ce037b35b408 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -14,54 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -//! # BABE (Blind Assignment for Blockchain Extension) -//! -//! BABE is a slot-based block production mechanism which uses a VRF PRNG to -//! randomly perform the slot allocation. On every slot, all the authorities -//! generate a new random number with the VRF function and if it is lower than a -//! given threshold (which is proportional to their weight/stake) they have a -//! right to produce a block. The proof of the VRF function execution will be -//! used by other peer to validate the legitimacy of the slot claim. -//! -//! The engine is also responsible for collecting entropy on-chain which will be -//! used to seed the given VRF PRNG. An epoch is a contiguous number of slots -//! under which we will be using the same authority set. During an epoch all VRF -//! outputs produced as a result of block production will be collected on an -//! on-chain randomness pool. Epoch changes are announced one epoch in advance, -//! i.e. when ending epoch N, we announce the parameters (randomness, -//! authorities, etc.) for epoch N+2. -//! -//! Since the slot assignment is randomized, it is possible that a slot is -//! assigned to multiple validators in which case we will have a temporary fork, -//! or that a slot is assigned to no validator in which case no block is -//! produced. Which means that block times are not deterministic. -//! -//! The protocol has a parameter `c` [0, 1] for which `1 - c` is the probability -//! of a slot being empty. The choice of this parameter affects the security of -//! the protocol relating to maximum tolerable network delays. -//! -//! In addition to the VRF-based slot assignment described above, which we will -//! call primary slots, the engine also supports a deterministic secondary slot -//! assignment. Primary slots take precedence over secondary slots, when -//! authoring the node starts by trying to claim a primary slot and falls back -//! to a secondary slot claim attempt. The secondary slot assignment is done -//! by picking the authority at index: -//! -//! `blake2_256(epoch_randomness ++ slot_number) % authorities_len`. -//! -//! The fork choice rule is weight-based, where weight equals the number of -//! primary blocks in the chain. We will pick the heaviest chain (more primary -//! blocks) and will go with the longest one in case of a tie. -//! -//! An in-depth description and analysis of the protocol can be found here: -//! - -#![forbid(unsafe_code)] -#![warn(missing_docs)] -pub use sp_consensus_babe::{ - BabeApi, ConsensusLog, BABE_ENGINE_ID, SlotNumber, BabeConfiguration, +//! # Sassafras + +pub use sp_consensus_sassafras::{ + SassafrasApi, ConsensusLog, SASSAFRAS_ENGINE_ID, SlotNumber, SassafrasConfiguration, AuthorityId, AuthorityPair, AuthoritySignature, - BabeAuthorityWeight, VRF_OUTPUT_LENGTH, + SassafrasAuthorityWeight, VRF_OUTPUT_LENGTH, digests::{PreDigest, CompatibleDigestItem, NextEpochDescriptor}, }; pub use sp_consensus::SyncOracle; @@ -69,7 +27,7 @@ use std::{ collections::HashMap, sync::Arc, u64, pin::Pin, time::{Instant, Duration}, any::Any, borrow::Cow }; -use sp_consensus_babe; +use sp_consensus_sassafras; use sp_consensus::{ImportResult, CanAuthorWith}; use sp_consensus::import_queue::{ BoxJustificationImport, BoxFinalityProofImport, @@ -89,7 +47,7 @@ use sp_consensus::{ ForkChoiceStrategy, BlockImportParams, BlockOrigin, Error as ConsensusError, SelectChain, SlotData, }; -use sp_consensus_babe::inherents::BabeInherentData; +use sp_consensus_sassafras::inherents::SassafrasInherentData; use sp_timestamp::{TimestampInherentData, InherentType as TimestampInherent}; use sp_consensus::import_queue::{Verifier, BasicQueue, CacheKeyId}; use sc_client_api::{ @@ -123,7 +81,7 @@ mod authorship; #[cfg(test)] mod tests; -/// BABE epoch information +/// Sassafras epoch information #[derive(Decode, Encode, Default, PartialEq, Eq, Clone, Debug)] pub struct Epoch { /// The epoch index @@ -133,7 +91,7 @@ pub struct Epoch { /// The duration of this epoch pub duration: SlotNumber, /// The authorities and their weights - pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, + pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, /// Randomness for this epoch pub randomness: [u8; VRF_OUTPUT_LENGTH], } @@ -163,11 +121,11 @@ impl EpochT for Epoch { #[derive(derive_more::Display, Debug)] enum Error { - #[display(fmt = "Multiple BABE pre-runtime digests, rejecting!")] + #[display(fmt = "Multiple Sassafras pre-runtime digests, rejecting!")] MultiplePreRuntimeDigests, - #[display(fmt = "No BABE pre-runtime digest found")] + #[display(fmt = "No Sassafras pre-runtime digest found")] NoPreRuntimeDigest, - #[display(fmt = "Multiple BABE epoch change digests, rejecting!")] + #[display(fmt = "Multiple Sassafras epoch change digests, rejecting!")] MultipleEpochChangeDigests, #[display(fmt = "Could not extract timestamp and slot: {:?}", _0)] Extraction(sp_consensus::Error), @@ -218,48 +176,30 @@ impl std::convert::From> for String { } } -fn babe_err(error: Error) -> Error { - debug!(target: "babe", "{}", error); - error -} - -macro_rules! babe_info { - ($($i: expr),+) => { - { - info!(target: "babe", $($i),+); - format!($($i),+) - } - }; -} - - /// Intermediate value passed to block importer. -pub struct BabeIntermediate { +pub struct SassafrasIntermediate { /// The epoch descriptor. pub epoch_descriptor: ViableEpochDescriptor, Epoch>, } -/// Intermediate key for Babe engine. -pub static INTERMEDIATE_KEY: &[u8] = b"babe1"; +/// Intermediate key for Sassafras engine. +pub static INTERMEDIATE_KEY: &[u8] = b"sassafras1"; /// A slot duration. Create with `get_or_compute`. -// FIXME: Once Rust has higher-kinded types, the duplication between this -// and `super::babe::Config` can be eliminated. -// https://github.com/paritytech/substrate/issues/2434 #[derive(Clone)] -pub struct Config(sc_consensus_slots::SlotDuration); +pub struct Config(sc_consensus_slots::SlotDuration); impl Config { /// Either fetch the slot duration from disk or compute it from the genesis /// state. pub fn get_or_compute(client: &C) -> ClientResult where - C: AuxStore + ProvideRuntimeApi, C::Api: BabeApi, + C: AuxStore + ProvideRuntimeApi, C::Api: SassafrasApi, { - trace!(target: "babe", "Getting slot duration"); + trace!(target: "sassafras", "Getting slot duration"); match sc_consensus_slots::SlotDuration::get_or_compute(client, |a, b| a.configuration(b)).map(Self) { Ok(s) => Ok(s), Err(s) => { - warn!(target: "babe", "Failed to get slot duration"); + warn!(target: "sassafras", "Failed to get slot duration"); Err(s) } } @@ -279,15 +219,15 @@ impl Config { } impl std::ops::Deref for Config { - type Target = BabeConfiguration; + type Target = SassafrasConfiguration; - fn deref(&self) -> &BabeConfiguration { + fn deref(&self) -> &SassafrasConfiguration { &*self.0 } } -/// Parameters for BABE. -pub struct BabeParams { +/// Parameters for Sassafras. +pub struct SassafrasParams { /// The keystore that manages the keys of the node. pub keystore: KeyStorePtr, @@ -301,7 +241,7 @@ pub struct BabeParams { pub env: E, /// The underlying block-import object to supply our produced blocks to. - /// This must be a `BabeBlockImport` or a wrapper of it, otherwise + /// This must be a `SassafrasBlockImport` or a wrapper of it, otherwise /// critical consensus logic will be omitted. pub block_import: I, @@ -315,14 +255,14 @@ pub struct BabeParams { pub force_authoring: bool, /// The source of timestamps for relative slots - pub babe_link: BabeLink, + pub sassafras_link: SassafrasLink, /// Checks if the current native implementation can author with a runtime at a given block. pub can_author_with: CAW, } -/// Start the babe worker. The returned future should be run in a tokio runtime. -pub fn start_babe(BabeParams { +/// Start the sassafras worker. The returned future should be run in a tokio runtime. +pub fn start_sassafras(SassafrasParams { keystore, client, select_chain, @@ -331,16 +271,16 @@ pub fn start_babe(BabeParams { sync_oracle, inherent_data_providers, force_authoring, - babe_link, + sassafras_link, can_author_with, -}: BabeParams) -> Result< +}: SassafrasParams) -> Result< impl futures::Future, sp_consensus::Error, > where B: BlockT, C: ProvideRuntimeApi + ProvideCache + ProvideUncles + BlockchainEvents + HeaderBackend + HeaderMetadata + Send + Sync + 'static, - C::Api: BabeApi, + C::Api: SassafrasApi, SC: SelectChain + 'static, E: Environment + Send + Sync, E::Proposer: Proposer>, @@ -350,38 +290,38 @@ pub fn start_babe(BabeParams { SO: SyncOracle + Send + Sync + Clone, CAW: CanAuthorWith + Send, { - let config = babe_link.config; - let worker = BabeWorker { + let config = sassafras_link.config; + let worker = SassafrasWorker { client: client.clone(), block_import: Arc::new(Mutex::new(block_import)), env, sync_oracle: sync_oracle.clone(), force_authoring, keystore, - epoch_changes: babe_link.epoch_changes.clone(), + epoch_changes: sassafras_link.epoch_changes.clone(), config: config.clone(), }; - register_babe_inherent_data_provider(&inherent_data_providers, config.slot_duration())?; + register_sassafras_inherent_data_provider(&inherent_data_providers, config.slot_duration())?; sc_consensus_uncles::register_uncles_inherent_data_provider( client.clone(), select_chain.clone(), &inherent_data_providers, )?; - babe_info!("Starting BABE Authorship worker"); + info!(target: "sassafras", "Starting Sassafras authorship worker"); Ok(sc_consensus_slots::start_slot_worker( config.0, select_chain, worker, sync_oracle, inherent_data_providers, - babe_link.time_source, + sassafras_link.time_source, can_author_with, )) } -struct BabeWorker { +struct SassafrasWorker { client: Arc, block_import: Arc>, env: E, @@ -392,13 +332,13 @@ struct BabeWorker { config: Config, } -impl sc_consensus_slots::SimpleSlotWorker for BabeWorker where +impl sc_consensus_slots::SimpleSlotWorker for SassafrasWorker where B: BlockT, C: ProvideRuntimeApi + ProvideCache + HeaderBackend + HeaderMetadata, - C::Api: BabeApi, + C::Api: SassafrasApi, E: Environment, E::Proposer: Proposer>, I: BlockImport> + Send + Sync + 'static, @@ -415,7 +355,7 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeWork type BlockImport = I; fn logging_target(&self) -> &'static str { - "babe" + "sassafras" } fn block_import(&self) -> Arc> { @@ -449,7 +389,7 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeWork slot_number: SlotNumber, epoch_descriptor: &ViableEpochDescriptor, Epoch>, ) -> Option { - debug!(target: "babe", "Attempting to claim slot {}", slot_number); + debug!(target: "sassafras", "Attempting to claim slot {}", slot_number); let s = authorship::claim_slot( slot_number, self.epoch_changes.lock().viable_epoch( @@ -461,7 +401,7 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeWork ); if let Some(_) = s { - debug!(target: "babe", "Claimed slot {}", slot_number); + debug!(target: "sassafras", "Claimed slot {}", slot_number); } s @@ -473,7 +413,7 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeWork claim: &Self::Claim, ) -> Vec> { vec![ - as CompatibleDigestItem>::babe_pre_digest(claim.0.clone()), + as CompatibleDigestItem>::sassafras_pre_digest(claim.0.clone()), ] } @@ -489,7 +429,7 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeWork // sign the pre-sealed hash of the block and then // add it to a digest item. let signature = pair.sign(header_hash.as_ref()); - let digest_item = as CompatibleDigestItem>::babe_seal(signature); + let digest_item = as CompatibleDigestItem>::sassafras_seal(signature); BlockImportParams { origin: BlockOrigin::Own, @@ -504,7 +444,7 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeWork let mut intermediates = HashMap::new(); intermediates.insert( Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, + Box::new(SassafrasIntermediate:: { epoch_descriptor }) as Box, ); intermediates }, @@ -558,7 +498,7 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeWork let slot_duration = slot_info.duration << (slot_lenience / BACKOFF_STEP); if slot_lenience >= 1 { - debug!(target: "babe", "No block for {} slots. Applying 2^({}/{}) lenience", + debug!(target: "sassafras", "No block for {} slots. Applying 2^({}/{}) lenience", slot_lenience, slot_lenience, BACKOFF_STEP); } @@ -567,13 +507,13 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeWork } } -impl SlotWorker for BabeWorker where +impl SlotWorker for SassafrasWorker where B: BlockT, C: ProvideRuntimeApi + ProvideCache + HeaderBackend + HeaderMetadata + Send + Sync, - C::Api: BabeApi, + C::Api: SassafrasApi, E: Environment + Send + Sync, E::Proposer: Proposer>, I: BlockImport> + Send + Sync + 'static, @@ -587,7 +527,7 @@ impl SlotWorker for BabeWorker where } } -/// Extract the BABE pre digest from the given header. Pre-runtime digests are +/// Extract the Sassafras pre digest from the given header. Pre-runtime digests are /// mandatory, the function will return `Err` if none is found. fn find_pre_digest(header: &B::Header) -> Result> { @@ -602,29 +542,29 @@ fn find_pre_digest(header: &B::Header) -> Result> let mut pre_digest: Option<_> = None; for log in header.digest().logs() { - trace!(target: "babe", "Checking log {:?}, looking for pre runtime digest", log); - match (log.as_babe_pre_digest(), pre_digest.is_some()) { - (Some(_), true) => return Err(babe_err(Error::MultiplePreRuntimeDigests)), - (None, _) => trace!(target: "babe", "Ignoring digest not meant for us"), + trace!(target: "sassafras", "Checking log {:?}, looking for pre runtime digest", log); + match (log.as_sassafras_pre_digest(), pre_digest.is_some()) { + (Some(_), true) => return Err(Error::MultiplePreRuntimeDigests), + (None, _) => trace!(target: "sassafras", "Ignoring digest not meant for us"), (s, false) => pre_digest = s, } } - pre_digest.ok_or_else(|| babe_err(Error::NoPreRuntimeDigest)) + pre_digest.ok_or_else(|| Error::NoPreRuntimeDigest) } -/// Extract the BABE epoch change digest from the given header, if it exists. +/// Extract the Sassafras epoch change digest from the given header, if it exists. fn find_next_epoch_digest(header: &B::Header) -> Result, Error> where DigestItemFor: CompatibleDigestItem, { let mut epoch_digest: Option<_> = None; for log in header.digest().logs() { - trace!(target: "babe", "Checking log {:?}, looking for epoch change digest.", log); - let log = log.try_to::(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID)); + trace!(target: "sassafras", "Checking log {:?}, looking for epoch change digest.", log); + let log = log.try_to::(OpaqueDigestItemId::Consensus(&SASSAFRAS_ENGINE_ID)); match (log, epoch_digest.is_some()) { - (Some(ConsensusLog::NextEpochData(_)), true) => return Err(babe_err(Error::MultipleEpochChangeDigests)), + (Some(ConsensusLog::NextEpochData(_)), true) => return Err(Error::MultipleEpochChangeDigests), (Some(ConsensusLog::NextEpochData(epoch)), false) => epoch_digest = Some(epoch), - _ => trace!(target: "babe", "Ignoring digest not meant for us"), + _ => trace!(target: "sassafras", "Ignoring digest not meant for us"), } } @@ -640,9 +580,9 @@ impl SlotCompatible for TimeSource { &self, data: &InherentData, ) -> Result<(TimestampInherent, u64, std::time::Duration), sp_consensus::Error> { - trace!(target: "babe", "extract timestamp"); + trace!(target: "sassafras", "extract timestamp"); data.timestamp_inherent_data() - .and_then(|t| data.babe_inherent_data().map(|a| (t, a))) + .and_then(|t| data.sassafras_inherent_data().map(|a| (t, a))) .map_err(Into::into) .map_err(sp_consensus::Error::InherentData) .map(|(x, y)| (x, y, self.0.lock().0.take().unwrap_or_default())) @@ -651,13 +591,13 @@ impl SlotCompatible for TimeSource { /// State that must be shared between the import queue and the authoring logic. #[derive(Clone)] -pub struct BabeLink { +pub struct SassafrasLink { time_source: TimeSource, epoch_changes: SharedEpochChanges, config: Config, } -impl BabeLink { +impl SassafrasLink { /// Get the epoch changes of this link. pub fn epoch_changes(&self) -> &SharedEpochChanges { &self.epoch_changes @@ -669,8 +609,8 @@ impl BabeLink { } } -/// A verifier for Babe blocks. -pub struct BabeVerifier { +/// A verifier for Sassafras blocks. +pub struct SassafrasVerifier { client: Arc>, api: Arc, inherent_data_providers: sp_inherents::InherentDataProviders, @@ -679,7 +619,7 @@ pub struct BabeVerifier { time_source: TimeSource, } -impl BabeVerifier { +impl SassafrasVerifier { fn check_inherents( &self, block: Block, @@ -751,14 +691,14 @@ fn median_algorithm( } } -impl Verifier for BabeVerifier where +impl Verifier for SassafrasVerifier where Block: BlockT, B: Backend + 'static, E: CallExecutor + 'static + Clone + Send + Sync, RA: Send + Sync, PRA: ProvideRuntimeApi + Send + Sync + AuxStore + ProvideCache, PRA::Api: BlockBuilderApi - + BabeApi, + + SassafrasApi, { fn verify( &mut self, @@ -768,7 +708,7 @@ impl Verifier for BabeVerifier>, ) -> Result<(BlockImportParams, Option)>>), String> { trace!( - target: "babe", + target: "sassafras", "Verifying origin: {:?} header: {:?} justification: {:?} body: {:?}", origin, header, @@ -776,7 +716,7 @@ impl Verifier for BabeVerifier Verifier for BabeVerifier(v_params)? { CheckedHeader::Checked(pre_header, verified_info) => { - let babe_pre_digest = verified_info.pre_digest.as_babe_pre_digest() + let sassafras_pre_digest = verified_info.pre_digest.as_sassafras_pre_digest() .expect("check_header always returns a pre-digest digest item; qed"); - let slot_number = babe_pre_digest.slot_number(); + let slot_number = sassafras_pre_digest.slot_number(); let author = verified_info.author; @@ -830,14 +770,14 @@ impl Verifier for BabeVerifier Verifier for BabeVerifier Verifier for BabeVerifier ?pre_header); let mut intermediates = HashMap::new(); intermediates.insert( Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { + Box::new(SassafrasIntermediate:: { epoch_descriptor, }) as Box, ); @@ -892,8 +832,8 @@ impl Verifier for BabeVerifier { - debug!(target: "babe", "Checking {:?} failed; {:?}, {:?}.", hash, a, b); - telemetry!(CONSENSUS_DEBUG; "babe.header_too_far_in_future"; + debug!(target: "sassafras", "Checking {:?} failed; {:?}, {:?}.", hash, a, b); + telemetry!(CONSENSUS_DEBUG; "sassafras.header_too_far_in_future"; "hash" => ?hash, "a" => ?a, "b" => ?b ); Err(Error::::TooFarInFuture(hash).into()) @@ -902,18 +842,18 @@ impl Verifier for BabeVerifier = BasicQueue; +/// The Sassafras import queue type. +pub type SassafrasImportQueue = BasicQueue; -/// Register the babe inherent data provider, if not registered already. -fn register_babe_inherent_data_provider( +/// Register the Sassafras inherent data provider, if not registered already. +fn register_sassafras_inherent_data_provider( inherent_data_providers: &InherentDataProviders, slot_duration: u64, ) -> Result<(), sp_consensus::Error> { - debug!(target: "babe", "Registering"); - if !inherent_data_providers.has_provider(&sp_consensus_babe::inherents::INHERENT_IDENTIFIER) { + debug!(target: "sassafras", "Registering"); + if !inherent_data_providers.has_provider(&sp_consensus_sassafras::inherents::INHERENT_IDENTIFIER) { inherent_data_providers - .register_provider(sp_consensus_babe::inherents::InherentDataProvider::new(slot_duration)) + .register_provider(sp_consensus_sassafras::inherents::InherentDataProvider::new(slot_duration)) .map_err(Into::into) .map_err(sp_consensus::Error::InherentData) } else { @@ -921,7 +861,7 @@ fn register_babe_inherent_data_provider( } } -/// A block-import handler for BABE. +/// A block-import handler for Sassafras. /// /// This scans each imported block for epoch change signals. The signals are /// tracked in a tree (of all forks), and the import logic validates all epoch @@ -929,7 +869,7 @@ fn register_babe_inherent_data_provider( /// it is missing. /// /// The epoch change tree should be pruned as blocks are finalized. -pub struct BabeBlockImport { +pub struct SassafrasBlockImport { inner: I, client: Arc>, api: Arc, @@ -937,9 +877,9 @@ pub struct BabeBlockImport { config: Config, } -impl Clone for BabeBlockImport { +impl Clone for SassafrasBlockImport { fn clone(&self) -> Self { - BabeBlockImport { + SassafrasBlockImport { inner: self.inner.clone(), client: self.client.clone(), api: self.api.clone(), @@ -949,7 +889,7 @@ impl Clone for BabeBlockImport BabeBlockImport { +impl SassafrasBlockImport { fn new( client: Arc>, api: Arc, @@ -957,7 +897,7 @@ impl BabeBlockImport { block_import: I, config: Config, ) -> Self { - BabeBlockImport { + SassafrasBlockImport { client, api, inner: block_import, @@ -967,7 +907,7 @@ impl BabeBlockImport { } } -impl BlockImport for BabeBlockImport where +impl BlockImport for SassafrasBlockImport where Block: BlockT, I: BlockImport> + Send + Sync, I::Error: Into, @@ -976,7 +916,7 @@ impl BlockImport for BabeBlockImport: AuxStore, RA: Send + Sync, PRA: ProvideRuntimeApi + ProvideCache, - PRA::Api: BabeApi + ApiExt, + PRA::Api: SassafrasApi + ApiExt, { type Error = ConsensusError; type Transaction = sp_api::TransactionFor; @@ -998,28 +938,28 @@ impl BlockImport for BabeBlockImport(&block.header) - .expect("valid babe headers must contain a predigest; \ + .expect("valid sassafras headers must contain a predigest; \ header has been already verified; qed"); let slot_number = pre_digest.slot_number(); let parent_hash = *block.header.parent_hash(); let parent_header = self.client.header(&BlockId::Hash(parent_hash)) .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? - .ok_or_else(|| ConsensusError::ChainLookup(babe_err( - Error::::ParentUnavailable(parent_hash, hash) - ).into()))?; + .ok_or_else(|| ConsensusError::ChainLookup( + Error::::ParentUnavailable(parent_hash, hash).into() + ))?; let parent_slot = find_pre_digest::(&parent_header) .map(|d| d.slot_number()) - .expect("parent is non-genesis; valid BABE headers contain a pre-digest; \ + .expect("parent is non-genesis; valid Sassafras headers contain a pre-digest; \ header has already been verified; qed"); // make sure that slot number is strictly increasing if slot_number <= parent_slot { return Err( - ConsensusError::ClientImport(babe_err( - Error::::SlotNumberMustIncrease(parent_slot, slot_number) - ).into()) + ConsensusError::ClientImport( + Error::::SlotNumberMustIncrease(parent_slot, slot_number).into() + ) ); } @@ -1037,11 +977,11 @@ impl BlockImport for BabeBlockImport::ParentBlockNoAssociatedWeight(hash)).into() + Error::::ParentBlockNoAssociatedWeight(hash).into() ))? }; - let intermediate = block.take_intermediate::>( + let intermediate = block.take_intermediate::>( INTERMEDIATE_KEY )?; @@ -1062,7 +1002,7 @@ impl BlockImport for BabeBlockImport { return Err( ConsensusError::ClientImport( - babe_err(Error::::ExpectedEpochChange(hash, slot_number)).into(), + Error::::ExpectedEpochChange(hash, slot_number).into(), ) ); }, @@ -1087,15 +1027,18 @@ impl BlockImport for BabeBlockImport::FetchEpoch(parent_hash).into()) })?; - babe_info!("New epoch {} launching at block {} (block slot {} >= start slot {}).", - viable_epoch.as_ref().epoch_index, - hash, - slot_number, - viable_epoch.as_ref().start_slot); + info!(target: "sassafras", + "New epoch {} launching at block {} (block slot {} >= start slot {}).", + viable_epoch.as_ref().epoch_index, + hash, + slot_number, + viable_epoch.as_ref().start_slot); let next_epoch = viable_epoch.increment(next_epoch_descriptor); - babe_info!("Next epoch starts at slot {}", next_epoch.as_ref().start_slot); + info!(target: "sassafras", + "Next epoch starts at slot {}", + next_epoch.as_ref().start_slot); // prune the tree of epochs not part of the finalized chain or // that are not live anymore, and then track the given epoch change @@ -1122,7 +1065,7 @@ impl BlockImport for BabeBlockImport( Ok(()) } -/// Produce a BABE block-import object to be used later on in the construction of +/// Produce a Sassafras block-import object to be used later on in the construction of /// an import-queue. /// /// Also returns a link object used to correctly instantiate the import queue @@ -1235,14 +1178,14 @@ pub fn block_import( wrapped_block_import: I, client: Arc>, api: Arc, -) -> ClientResult<(BabeBlockImport, BabeLink)> where +) -> ClientResult<(SassafrasBlockImport, SassafrasLink)> where B: Backend, E: CallExecutor + Send + Sync, RA: Send + Sync, Client: AuxStore, { let epoch_changes = aux_schema::load_epoch_changes::(&*client)?; - let link = BabeLink { + let link = SassafrasLink { epoch_changes: epoch_changes.clone(), time_source: Default::default(), config: config.clone(), @@ -1256,7 +1199,7 @@ pub fn block_import( &mut epoch_changes.lock(), )?; - let import = BabeBlockImport::new( + let import = SassafrasBlockImport::new( client, api, epoch_changes, @@ -1267,41 +1210,41 @@ pub fn block_import( Ok((import, link)) } -/// Start an import queue for the BABE consensus algorithm. +/// Start an import queue for the Sassafras consensus algorithm. /// /// This method returns the import queue, some data that needs to be passed to the block authoring -/// logic (`BabeLink`), and a future that must be run to +/// logic (`SassafrasLink`), and a future that must be run to /// completion and is responsible for listening to finality notifications and /// pruning the epoch changes tree. /// -/// The block import object provided must be the `BabeBlockImport` or a wrapper +/// The block import object provided must be the `SassafrasBlockImport` or a wrapper /// of it, otherwise crucial import logic will be omitted. pub fn import_queue( - babe_link: BabeLink, + sassafras_link: SassafrasLink, block_import: I, justification_import: Option>, finality_proof_import: Option>, client: Arc>, api: Arc, inherent_data_providers: InherentDataProviders, -) -> ClientResult>> where +) -> ClientResult>> where B: Backend + 'static, I: BlockImport> + Send + Sync + 'static, E: CallExecutor + Clone + Send + Sync + 'static, RA: Send + Sync + 'static, PRA: ProvideRuntimeApi + ProvideCache + Send + Sync + AuxStore + 'static, - PRA::Api: BlockBuilderApi + BabeApi + ApiExt, + PRA::Api: BlockBuilderApi + SassafrasApi + ApiExt, { - register_babe_inherent_data_provider(&inherent_data_providers, babe_link.config.slot_duration)?; + register_sassafras_inherent_data_provider(&inherent_data_providers, sassafras_link.config.slot_duration)?; - let verifier = BabeVerifier { + let verifier = SassafrasVerifier { client: client.clone(), api, inherent_data_providers, - config: babe_link.config, - epoch_changes: babe_link.epoch_changes, - time_source: babe_link.time_source, + config: sassafras_link.config, + epoch_changes: sassafras_link.epoch_changes, + time_source: sassafras_link.time_source, }; Ok(BasicQueue::new( @@ -1311,42 +1254,3 @@ pub fn import_queue( finality_proof_import, )) } - -/// BABE test helpers. Utility methods for manually authoring blocks. -#[cfg(feature = "test-helpers")] -pub mod test_helpers { - use super::*; - - /// Try to claim the given slot and return a `BabePreDigest` if - /// successful. - pub fn claim_slot( - slot_number: u64, - parent: &B::Header, - client: &C, - keystore: &KeyStorePtr, - link: &BabeLink, - ) -> Option where - B: BlockT, - C: ProvideRuntimeApi + - ProvideCache + - HeaderBackend + - HeaderMetadata, - C::Api: BabeApi, - { - let epoch_changes = link.epoch_changes.lock(); - let epoch = epoch_changes.epoch_data_for_child_of( - descendent_query(client), - &parent.hash(), - parent.number().clone(), - slot_number, - |slot| link.config.genesis_epoch(slot), - ).unwrap().unwrap(); - - authorship::claim_slot( - slot_number, - &epoch, - &link.config, - keystore, - ).map(|(digest, _)| digest) - } -} diff --git a/client/consensus/sassafras/src/tests.rs b/client/consensus/sassafras/src/tests.rs deleted file mode 100644 index 0f0c2f2e471b1..0000000000000 --- a/client/consensus/sassafras/src/tests.rs +++ /dev/null @@ -1,812 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! BABE testsuite - -// FIXME #2532: need to allow deprecated until refactor is done -// https://github.com/paritytech/substrate/issues/2532 -#![allow(deprecated)] -use super::*; -use authorship::claim_slot; - -use sp_consensus_babe::{AuthorityPair, SlotNumber}; -use sc_block_builder::BlockBuilder; -use sp_consensus::{ - NoNetwork as DummyOracle, Proposal, RecordProof, - import_queue::{BoxBlockImport, BoxJustificationImport, BoxFinalityProofImport}, -}; -use sc_network_test::*; -use sc_network_test::{Block as TestBlock, PeersClient}; -use sc_network::config::{BoxFinalityProofRequestBuilder, ProtocolConfig}; -use sp_runtime::{generic::DigestItem, traits::{Block as BlockT, DigestFor}}; -use tokio::runtime::current_thread; -use sc_client_api::{BlockchainEvents, backend::TransactionFor}; -use log::debug; -use std::{time::Duration, cell::RefCell}; - -type Item = DigestItem; - -type Error = sp_blockchain::Error; - -type TestClient = sc_client::Client< - substrate_test_runtime_client::Backend, - substrate_test_runtime_client::Executor, - TestBlock, - substrate_test_runtime_client::runtime::RuntimeApi, ->; - -#[derive(Copy, Clone, PartialEq)] -enum Stage { - PreSeal, - PostSeal, -} - -type Mutator = Arc; - -#[derive(Clone)] -struct DummyFactory { - client: Arc, - epoch_changes: SharedEpochChanges, - config: Config, - mutator: Mutator, -} - -struct DummyProposer { - factory: DummyFactory, - parent_hash: Hash, - parent_number: u64, - parent_slot: SlotNumber, -} - -impl Environment for DummyFactory { - type CreateProposer = future::Ready>; - type Proposer = DummyProposer; - type Error = Error; - - fn init(&mut self, parent_header: &::Header) - -> Self::CreateProposer - { - - let parent_slot = crate::find_pre_digest::(parent_header) - .expect("parent header has a pre-digest") - .slot_number(); - - future::ready(Ok(DummyProposer { - factory: self.clone(), - parent_hash: parent_header.hash(), - parent_number: *parent_header.number(), - parent_slot, - })) - } -} - -impl DummyProposer { - fn propose_with(&mut self, pre_digests: DigestFor) - -> future::Ready< - Result< - Proposal< - TestBlock, - sc_client_api::TransactionFor - >, - Error - > - > - { - let block_builder = self.factory.client.new_block_at( - &BlockId::Hash(self.parent_hash), - pre_digests, - false, - ).unwrap(); - - let mut block = match block_builder.build().map_err(|e| e.into()) { - Ok(b) => b.block, - Err(e) => return future::ready(Err(e)), - }; - - let this_slot = crate::find_pre_digest::(block.header()) - .expect("baked block has valid pre-digest") - .slot_number(); - - // figure out if we should add a consensus digest, since the test runtime - // doesn't. - let epoch_changes = self.factory.epoch_changes.lock(); - let epoch = epoch_changes.epoch_data_for_child_of( - descendent_query(&*self.factory.client), - &self.parent_hash, - self.parent_number, - this_slot, - |slot| self.factory.config.genesis_epoch(slot), - ) - .expect("client has data to find epoch") - .expect("can compute epoch for baked block"); - - let first_in_epoch = self.parent_slot < epoch.start_slot; - if first_in_epoch { - // push a `Consensus` digest signalling next change. - // we just reuse the same randomness and authorities as the prior - // epoch. this will break when we add light client support, since - // that will re-check the randomness logic off-chain. - let digest_data = ConsensusLog::NextEpochData(NextEpochDescriptor { - authorities: epoch.authorities.clone(), - randomness: epoch.randomness.clone(), - }).encode(); - let digest = DigestItem::Consensus(BABE_ENGINE_ID, digest_data); - block.header.digest_mut().push(digest) - } - - // mutate the block header according to the mutator. - (self.factory.mutator)(&mut block.header, Stage::PreSeal); - - future::ready(Ok(Proposal { block, proof: None, storage_changes: Default::default() })) - } -} - -impl Proposer for DummyProposer { - type Error = Error; - type Transaction = sc_client_api::TransactionFor; - type Proposal = future::Ready, Error>>; - - fn propose( - &mut self, - _: InherentData, - pre_digests: DigestFor, - _: Duration, - _: RecordProof, - ) -> Self::Proposal { - self.propose_with(pre_digests) - } -} - -thread_local! { - static MUTATOR: RefCell = RefCell::new(Arc::new(|_, _|())); -} - -#[derive(Clone)] -struct PanickingBlockImport(B); - -impl> BlockImport for PanickingBlockImport { - type Error = B::Error; - type Transaction = B::Transaction; - - fn import_block( - &mut self, - block: BlockImportParams, - new_cache: HashMap>, - ) -> Result { - Ok(self.0.import_block(block, new_cache).expect("importing block failed")) - } - - fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { - Ok(self.0.check_block(block).expect("checking block failed")) - } -} - -pub struct BabeTestNet { - peers: Vec, DummySpecialization>>, -} - -type TestHeader = ::Header; -type TestExtrinsic = ::Extrinsic; - -pub struct TestVerifier { - inner: BabeVerifier< - substrate_test_runtime_client::Backend, - substrate_test_runtime_client::Executor, - TestBlock, - substrate_test_runtime_client::runtime::RuntimeApi, - PeersFullClient, - >, - mutator: Mutator, -} - -impl Verifier for TestVerifier { - /// Verify the given data and return the BlockImportParams and an optional - /// new set of validators to import. If not, err with an Error-Message - /// presented to the User in the logs. - fn verify( - &mut self, - origin: BlockOrigin, - mut header: TestHeader, - justification: Option, - body: Option>, - ) -> Result<(BlockImportParams, Option)>>), String> { - // apply post-sealing mutations (i.e. stripping seal, if desired). - (self.mutator)(&mut header, Stage::PostSeal); - Ok(self.inner.verify(origin, header, justification, body).expect("verification failed!")) - } -} - -pub struct PeerData { - link: BabeLink, - inherent_data_providers: InherentDataProviders, - block_import: Mutex< - Option>> - >, -} - -impl TestNetFactory for BabeTestNet { - type Specialization = DummySpecialization; - type Verifier = TestVerifier; - type PeerData = Option; - - /// Create new test network with peers and given config. - fn from_config(_config: &ProtocolConfig) -> Self { - debug!(target: "babe", "Creating test network from config"); - BabeTestNet { - peers: Vec::new(), - } - } - - fn make_block_import(&self, client: PeersClient) - -> ( - BlockImportAdapter, - Option>, - Option>, - Option>, - Option, - ) - { - let client = client.as_full().expect("only full clients are tested"); - let inherent_data_providers = InherentDataProviders::new(); - - let config = Config::get_or_compute(&*client).expect("config available"); - let (block_import, link) = crate::block_import( - config, - client.clone(), - client.clone(), - client.clone(), - ).expect("can initialize block-import"); - - let block_import = PanickingBlockImport(block_import); - - let data_block_import = Mutex::new( - Some(Box::new(block_import.clone()) as BoxBlockImport<_, _>) - ); - ( - BlockImportAdapter::new_full(block_import), - None, - None, - None, - Some(PeerData { link, inherent_data_providers, block_import: data_block_import }), - ) - } - - fn make_verifier( - &self, - client: PeersClient, - _cfg: &ProtocolConfig, - maybe_link: &Option, - ) - -> Self::Verifier - { - let client = client.as_full().expect("only full clients are used in test"); - trace!(target: "babe", "Creating a verifier"); - - // ensure block import and verifier are linked correctly. - let data = maybe_link.as_ref().expect("babe link always provided to verifier instantiation"); - - TestVerifier { - inner: BabeVerifier { - client: client.clone(), - api: client, - inherent_data_providers: data.inherent_data_providers.clone(), - config: data.link.config.clone(), - epoch_changes: data.link.epoch_changes.clone(), - time_source: data.link.time_source.clone(), - }, - mutator: MUTATOR.with(|m| m.borrow().clone()), - } - } - - fn peer(&mut self, i: usize) -> &mut Peer { - trace!(target: "babe", "Retreiving a peer"); - &mut self.peers[i] - } - - fn peers(&self) -> &Vec> { - trace!(target: "babe", "Retreiving peers"); - &self.peers - } - - fn mut_peers>)>( - &mut self, - closure: F, - ) { - closure(&mut self.peers); - } -} - -#[test] -#[should_panic] -fn rejects_empty_block() { - env_logger::try_init().unwrap(); - let mut net = BabeTestNet::new(3); - let block_builder = |builder: BlockBuilder<_, _, _>| { - builder.build().unwrap().block - }; - net.mut_peers(|peer| { - peer[0].generate_blocks(1, BlockOrigin::NetworkInitialSync, block_builder); - }) -} - -fn run_one_test( - mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static, -) { - let _ = env_logger::try_init(); - let mutator = Arc::new(mutator) as Mutator; - - MUTATOR.with(|m| *m.borrow_mut() = mutator.clone()); - let net = BabeTestNet::new(3); - - let peers = &[ - (0, "//Alice"), - (1, "//Bob"), - (2, "//Charlie"), - ]; - - let net = Arc::new(Mutex::new(net)); - let mut import_notifications = Vec::new(); - let mut runtime = current_thread::Runtime::new().unwrap(); - let mut keystore_paths = Vec::new(); - - for (peer_id, seed) in peers { - let mut net = net.lock(); - let peer = net.peer(*peer_id); - let client = peer.client().as_full().expect("Only full clients are used in tests").clone(); - let select_chain = peer.select_chain().expect("Full client has select_chain"); - - let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore = sc_keystore::Store::open(keystore_path.path(), None).expect("Creates keystore"); - keystore.write().insert_ephemeral_from_seed::(seed).expect("Generates authority key"); - keystore_paths.push(keystore_path); - - let mut got_own = false; - let mut got_other = false; - - let data = peer.data.as_ref().expect("babe link set up during initialization"); - - let environ = DummyFactory { - client: client.clone(), - config: data.link.config.clone(), - epoch_changes: data.link.epoch_changes.clone(), - mutator: mutator.clone(), - }; - - import_notifications.push( - // run each future until we get one of our own blocks with number higher than 5 - // that was produced locally. - client.import_notification_stream() - .take_while(move |n| future::ready(n.header.number() < &5 || { - if n.origin == BlockOrigin::Own { - got_own = true; - } else { - got_other = true; - } - - // continue until we have at least one block of our own - // and one of another peer. - !(got_own && got_other) - })) - .for_each(|_| future::ready(()) ) - ); - - - runtime.spawn(start_babe(BabeParams { - block_import: data.block_import.lock().take().expect("import set up during init"), - select_chain, - client, - env: environ, - sync_oracle: DummyOracle, - inherent_data_providers: data.inherent_data_providers.clone(), - force_authoring: false, - babe_link: data.link.clone(), - keystore, - can_author_with: sp_consensus::AlwaysCanAuthor, - }).expect("Starts babe").unit_error().compat()); - } - - runtime.spawn(futures01::future::poll_fn(move || { - net.lock().poll(); - Ok::<_, ()>(futures01::Async::NotReady::<()>) - })); - - runtime.block_on(future::join_all(import_notifications) - .unit_error().compat()).unwrap(); -} - -#[test] -fn authoring_blocks() { - run_one_test(|_, _| ()) -} - -#[test] -#[should_panic] -fn rejects_missing_inherent_digest() { - run_one_test(|header: &mut TestHeader, stage| { - let v = std::mem::replace(&mut header.digest_mut().logs, vec![]); - header.digest_mut().logs = v.into_iter() - .filter(|v| stage == Stage::PostSeal || v.as_babe_pre_digest().is_none()) - .collect() - }) -} - -#[test] -#[should_panic] -fn rejects_missing_seals() { - run_one_test(|header: &mut TestHeader, stage| { - let v = std::mem::replace(&mut header.digest_mut().logs, vec![]); - header.digest_mut().logs = v.into_iter() - .filter(|v| stage == Stage::PreSeal || v.as_babe_seal().is_none()) - .collect() - }) -} - -#[test] -#[should_panic] -fn rejects_missing_consensus_digests() { - run_one_test(|header: &mut TestHeader, stage| { - let v = std::mem::replace(&mut header.digest_mut().logs, vec![]); - header.digest_mut().logs = v.into_iter() - .filter(|v| stage == Stage::PostSeal || v.as_next_epoch_descriptor().is_none()) - .collect() - }); -} - -#[test] -fn wrong_consensus_engine_id_rejected() { - let _ = env_logger::try_init(); - let sig = AuthorityPair::generate().0.sign(b""); - let bad_seal: Item = DigestItem::Seal([0; 4], sig.to_vec()); - assert!(bad_seal.as_babe_pre_digest().is_none()); - assert!(bad_seal.as_babe_seal().is_none()) -} - -#[test] -fn malformed_pre_digest_rejected() { - let _ = env_logger::try_init(); - let bad_seal: Item = DigestItem::Seal(BABE_ENGINE_ID, [0; 64].to_vec()); - assert!(bad_seal.as_babe_pre_digest().is_none()); -} - -#[test] -fn sig_is_not_pre_digest() { - let _ = env_logger::try_init(); - let sig = AuthorityPair::generate().0.sign(b""); - let bad_seal: Item = DigestItem::Seal(BABE_ENGINE_ID, sig.to_vec()); - assert!(bad_seal.as_babe_pre_digest().is_none()); - assert!(bad_seal.as_babe_seal().is_some()) -} - -#[test] -fn can_author_block() { - let _ = env_logger::try_init(); - let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore = sc_keystore::Store::open(keystore_path.path(), None).expect("Creates keystore"); - let pair = keystore.write().insert_ephemeral_from_seed::("//Alice") - .expect("Generates authority pair"); - - let mut i = 0; - let epoch = Epoch { - start_slot: 0, - authorities: vec![(pair.public(), 1)], - randomness: [0; 32], - epoch_index: 1, - duration: 100, - }; - - let mut config = crate::BabeConfiguration { - slot_duration: 1000, - epoch_length: 100, - c: (3, 10), - genesis_authorities: Vec::new(), - randomness: [0; 32], - secondary_slots: true, - }; - - // with secondary slots enabled it should never be empty - match claim_slot(i, &epoch, &config, &keystore) { - None => i += 1, - Some(s) => debug!(target: "babe", "Authored block {:?}", s.0), - } - - // otherwise with only vrf-based primary slots we might need to try a couple - // of times. - config.secondary_slots = false; - loop { - match claim_slot(i, &epoch, &config, &keystore) { - None => i += 1, - Some(s) => { - debug!(target: "babe", "Authored block {:?}", s.0); - break; - } - } - } -} - -// Propose and import a new BABE block on top of the given parent. -fn propose_and_import_block( - parent: &TestHeader, - slot_number: Option, - proposer_factory: &mut DummyFactory, - block_import: &mut BoxBlockImport, -) -> sp_core::H256 { - let mut proposer = futures::executor::block_on(proposer_factory.init(parent)).unwrap(); - - let slot_number = slot_number.unwrap_or_else(|| { - let parent_pre_digest = find_pre_digest::(parent).unwrap(); - parent_pre_digest.slot_number() + 1 - }); - - let pre_digest = sp_runtime::generic::Digest { - logs: vec![ - Item::babe_pre_digest( - PreDigest::Secondary { - authority_index: 0, - slot_number, - }, - ), - ], - }; - - let parent_hash = parent.hash(); - - let mut block = futures::executor::block_on(proposer.propose_with(pre_digest)).unwrap().block; - - let epoch_descriptor = proposer_factory.epoch_changes.lock().epoch_descriptor_for_child_of( - descendent_query(&*proposer_factory.client), - &parent_hash, - *parent.number(), - slot_number, - ).unwrap().unwrap(); - - let seal = { - // sign the pre-sealed hash of the block and then - // add it to a digest item. - let pair = AuthorityPair::from_seed(&[1; 32]); - let pre_hash = block.header.hash(); - let signature = pair.sign(pre_hash.as_ref()); - Item::babe_seal(signature) - }; - - let post_hash = { - block.header.digest_mut().push(seal.clone()); - let h = block.header.hash(); - block.header.digest_mut().pop(); - h - }; - - let import_result = block_import.import_block( - BlockImportParams { - origin: BlockOrigin::Own, - header: block.header, - justification: None, - post_digests: vec![seal], - body: Some(block.extrinsics), - storage_changes: None, - finalized: false, - auxiliary: Vec::new(), - intermediates: { - let mut intermediates = HashMap::new(); - intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, - ); - intermediates - }, - fork_choice: Some(ForkChoiceStrategy::LongestChain), - allow_missing_state: false, - import_existing: false, - }, - Default::default(), - ).unwrap(); - - match import_result { - ImportResult::Imported(_) => {}, - _ => panic!("expected block to be imported"), - } - - post_hash -} - -#[test] -fn importing_block_one_sets_genesis_epoch() { - let mut net = BabeTestNet::new(1); - - let peer = net.peer(0); - let data = peer.data.as_ref().expect("babe link set up during initialization"); - let client = peer.client().as_full().expect("Only full clients are used in tests").clone(); - - let mut proposer_factory = DummyFactory { - client: client.clone(), - config: data.link.config.clone(), - epoch_changes: data.link.epoch_changes.clone(), - mutator: Arc::new(|_, _| ()), - }; - - let mut block_import = data.block_import.lock().take().expect("import set up during init"); - - let genesis_header = client.header(&BlockId::Number(0)).unwrap().unwrap(); - - let block_hash = propose_and_import_block( - &genesis_header, - Some(999), - &mut proposer_factory, - &mut block_import, - ); - - let genesis_epoch = data.link.config.genesis_epoch(999); - - let epoch_changes = data.link.epoch_changes.lock(); - let epoch_for_second_block = epoch_changes.epoch_data_for_child_of( - descendent_query(&*client), - &block_hash, - 1, - 1000, - |slot| data.link.config.genesis_epoch(slot), - ).unwrap().unwrap(); - - assert_eq!(epoch_for_second_block, genesis_epoch); -} - -#[test] -fn importing_epoch_change_block_prunes_tree() { - use sc_client_api::Finalizer; - - let mut net = BabeTestNet::new(1); - - let peer = net.peer(0); - let data = peer.data.as_ref().expect("babe link set up during initialization"); - - let client = peer.client().as_full().expect("Only full clients are used in tests").clone(); - let mut block_import = data.block_import.lock().take().expect("import set up during init"); - let epoch_changes = data.link.epoch_changes.clone(); - - let mut proposer_factory = DummyFactory { - client: client.clone(), - config: data.link.config.clone(), - epoch_changes: data.link.epoch_changes.clone(), - mutator: Arc::new(|_, _| ()), - }; - - // This is just boilerplate code for proposing and importing n valid BABE - // blocks that are built on top of the given parent. The proposer takes care - // of producing epoch change digests according to the epoch duration (which - // is set to 6 slots in the test runtime). - let mut propose_and_import_blocks = |parent_id, n| { - let mut hashes = Vec::new(); - let mut parent_header = client.header(&parent_id).unwrap().unwrap(); - - for _ in 0..n { - let block_hash = propose_and_import_block( - &parent_header, - None, - &mut proposer_factory, - &mut block_import, - ); - hashes.push(block_hash); - parent_header = client.header(&BlockId::Hash(block_hash)).unwrap().unwrap(); - } - - hashes - }; - - // This is the block tree that we're going to use in this test. Each node - // represents an epoch change block, the epoch duration is 6 slots. - // - // *---- F (#7) - // / *------ G (#19) - H (#25) - // / / - // A (#1) - B (#7) - C (#13) - D (#19) - E (#25) - // \ - // *------ I (#25) - - // Create and import the canon chain and keep track of fork blocks (A, C, D) - // from the diagram above. - let canon_hashes = propose_and_import_blocks(BlockId::Number(0), 30); - - // Create the forks - let fork_1 = propose_and_import_blocks(BlockId::Hash(canon_hashes[0]), 10); - let fork_2 = propose_and_import_blocks(BlockId::Hash(canon_hashes[12]), 15); - let fork_3 = propose_and_import_blocks(BlockId::Hash(canon_hashes[18]), 10); - - // We should be tracking a total of 9 epochs in the fork tree - assert_eq!( - epoch_changes.lock().tree().iter().count(), - 9, - ); - - // And only one root - assert_eq!( - epoch_changes.lock().tree().roots().count(), - 1, - ); - - // We finalize block #13 from the canon chain, so on the next epoch - // change the tree should be pruned, to not contain F (#7). - client.finalize_block(BlockId::Hash(canon_hashes[12]), None, false).unwrap(); - propose_and_import_blocks(BlockId::Hash(client.chain_info().best_hash), 7); - - // at this point no hashes from the first fork must exist on the tree - assert!( - !epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_1.contains(h)), - ); - - // but the epoch changes from the other forks must still exist - assert!( - epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_2.contains(h)) - ); - - assert!( - epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_3.contains(h)), - ); - - // finalizing block #25 from the canon chain should prune out the second fork - client.finalize_block(BlockId::Hash(canon_hashes[24]), None, false).unwrap(); - propose_and_import_blocks(BlockId::Hash(client.chain_info().best_hash), 8); - - // at this point no hashes from the second fork must exist on the tree - assert!( - !epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_2.contains(h)), - ); - - // while epoch changes from the last fork should still exist - assert!( - epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_3.contains(h)), - ); -} - -#[test] -#[should_panic] -fn verify_slots_are_strictly_increasing() { - let mut net = BabeTestNet::new(1); - - let peer = net.peer(0); - let data = peer.data.as_ref().expect("babe link set up during initialization"); - - let client = peer.client().as_full().expect("Only full clients are used in tests").clone(); - let mut block_import = data.block_import.lock().take().expect("import set up during init"); - - let mut proposer_factory = DummyFactory { - client: client.clone(), - config: data.link.config.clone(), - epoch_changes: data.link.epoch_changes.clone(), - mutator: Arc::new(|_, _| ()), - }; - - let genesis_header = client.header(&BlockId::Number(0)).unwrap().unwrap(); - - // we should have no issue importing this block - let b1 = propose_and_import_block( - &genesis_header, - Some(999), - &mut proposer_factory, - &mut block_import, - ); - - let b1 = client.header(&BlockId::Hash(b1)).unwrap().unwrap(); - - // we should fail to import this block since the slot number didn't increase. - // we will panic due to the `PanickingBlockImport` defined above. - propose_and_import_block( - &b1, - Some(999), - &mut proposer_factory, - &mut block_import, - ); -} diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index 70418b8aea1e3..f13c8ca2b6382 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -14,18 +14,18 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -//! Verification for BABE headers. +//! Verification for Sassafras headers. use schnorrkel::vrf::{VRFOutput, VRFProof}; use sp_runtime::{traits::Header, traits::DigestItemFor}; use sp_core::{Pair, Public}; -use sp_consensus_babe::{AuthoritySignature, SlotNumber, AuthorityIndex, AuthorityPair, AuthorityId}; -use sp_consensus_babe::digests::{PreDigest, CompatibleDigestItem}; +use sp_consensus_sassafras::{AuthoritySignature, SlotNumber, AuthorityIndex, AuthorityPair, AuthorityId}; +use sp_consensus_sassafras::digests::{PreDigest, CompatibleDigestItem}; use sc_consensus_slots::CheckedHeader; use log::{debug, trace}; -use super::{find_pre_digest, babe_err, Epoch, BlockT, Error}; +use super::{find_pre_digest, Epoch, BlockT, Error}; use super::authorship::{make_transcript, calculate_primary_threshold, check_primary_threshold, secondary_slot_author}; -/// BABE verification parameters +/// Sassafras verification parameters pub(super) struct VerificationParams<'a, B: 'a + BlockT> { /// the header being verified. pub(super) header: B::Header, @@ -37,7 +37,7 @@ pub(super) struct VerificationParams<'a, B: 'a + BlockT> { pub(super) slot_now: SlotNumber, /// epoch descriptor of the epoch this block _should_ be under, if it's valid. pub(super) epoch: &'a Epoch, - /// genesis config of this BABE chain. + /// genesis config of this Sassafras chain. pub(super) config: &'a super::Config, } @@ -48,7 +48,7 @@ pub(super) struct VerificationParams<'a, B: 'a + BlockT> { /// The seal must be the last digest. Otherwise, the whole header is considered /// unsigned. This is required for security and must not be changed. /// -/// This digest item will always return `Some` when used with `as_babe_pre_digest`. +/// This digest item will always return `Some` when used with `as_sassafras_pre_digest`. /// /// The given header can either be from a primary or secondary slot assignment, /// with each having different validation logic. @@ -65,68 +65,10 @@ pub(super) fn check_header( config, } = params; - let authorities = &epoch.authorities; - let pre_digest = pre_digest.map(Ok).unwrap_or_else(|| find_pre_digest::(&header))?; - - trace!(target: "babe", "Checking header"); - let seal = match header.digest_mut().pop() { - Some(x) => x, - None => return Err(babe_err(Error::HeaderUnsealed(header.hash()))), - }; - - let sig = seal.as_babe_seal().ok_or_else(|| { - babe_err(Error::HeaderBadSeal(header.hash())) - })?; - - // the pre-hash of the header doesn't include the seal - // and that's what we sign - let pre_hash = header.hash(); - - if pre_digest.slot_number() > slot_now { - header.digest_mut().push(seal); - return Ok(CheckedHeader::Deferred(header, pre_digest.slot_number())); - } - - let author = match authorities.get(pre_digest.authority_index() as usize) { - Some(author) => author.0.clone(), - None => return Err(babe_err(Error::SlotAuthorNotFound)), - }; - - match &pre_digest { - PreDigest::Primary { vrf_output, vrf_proof, authority_index, slot_number } => { - debug!(target: "babe", "Verifying Primary block"); - - let digest = (vrf_output, vrf_proof, *authority_index, *slot_number); - - check_primary_header::( - pre_hash, - digest, - sig, - &epoch, - config.c, - )?; - }, - PreDigest::Secondary { authority_index, slot_number } if config.secondary_slots => { - debug!(target: "babe", "Verifying Secondary block"); - - let digest = (*authority_index, *slot_number); - - check_secondary_header::( - pre_hash, - digest, - sig, - &epoch, - )?; - }, - _ => { - return Err(babe_err(Error::SecondarySlotAssignmentsDisabled)); - } - } - let info = VerifiedHeaderInfo { - pre_digest: CompatibleDigestItem::babe_pre_digest(pre_digest), + pre_digest: CompatibleDigestItem::sassafras_pre_digest(pre_digest), seal, - author, + author: Default::default(), }; Ok(CheckedHeader::Checked(header, info)) } @@ -136,82 +78,3 @@ pub(super) struct VerifiedHeaderInfo { pub(super) seal: DigestItemFor, pub(super) author: AuthorityId, } - -/// Check a primary slot proposal header. We validate that the given header is -/// properly signed by the expected authority, and that the contained VRF proof -/// is valid. Additionally, the weight of this block must increase compared to -/// its parent since it is a primary block. -fn check_primary_header( - pre_hash: B::Hash, - pre_digest: (&VRFOutput, &VRFProof, AuthorityIndex, SlotNumber), - signature: AuthoritySignature, - epoch: &Epoch, - c: (u64, u64), -) -> Result<(), Error> { - let (vrf_output, vrf_proof, authority_index, slot_number) = pre_digest; - - let author = &epoch.authorities[authority_index as usize].0; - - if AuthorityPair::verify(&signature, pre_hash, &author) { - let (inout, _) = { - let transcript = make_transcript( - &epoch.randomness, - slot_number, - epoch.epoch_index, - ); - - schnorrkel::PublicKey::from_bytes(author.as_slice()).and_then(|p| { - p.vrf_verify(transcript, vrf_output, vrf_proof) - }).map_err(|s| { - babe_err(Error::VRFVerificationFailed(s)) - })? - }; - - let threshold = calculate_primary_threshold( - c, - &epoch.authorities, - authority_index as usize, - ); - - if !check_primary_threshold(&inout, threshold) { - return Err(babe_err(Error::VRFVerificationOfBlockFailed(author.clone(), threshold))); - } - - Ok(()) - } else { - Err(babe_err(Error::BadSignature(pre_hash))) - } -} - -/// Check a secondary slot proposal header. We validate that the given header is -/// properly signed by the expected authority, which we have a deterministic way -/// of computing. Additionally, the weight of this block must stay the same -/// compared to its parent since it is a secondary block. -fn check_secondary_header( - pre_hash: B::Hash, - pre_digest: (AuthorityIndex, SlotNumber), - signature: AuthoritySignature, - epoch: &Epoch, -) -> Result<(), Error> { - let (authority_index, slot_number) = pre_digest; - - // check the signature is valid under the expected authority and - // chain state. - let expected_author = secondary_slot_author( - slot_number, - &epoch.authorities, - epoch.randomness, - ).ok_or_else(|| Error::NoSecondaryAuthorExpected)?; - - let author = &epoch.authorities[authority_index as usize].0; - - if expected_author != author { - return Err(Error::InvalidAuthor(expected_author.clone(), author.clone())); - } - - if AuthorityPair::verify(&signature, pre_hash.as_ref(), author) { - Ok(()) - } else { - Err(Error::BadSignature(pre_hash)) - } -} From f0ba404f678d3c5682059c55eda6743c657b2629 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Sun, 16 Feb 2020 15:39:17 +0100 Subject: [PATCH 33/75] [WIP] Move back epoch definition --- client/consensus/sassafras/src/lib.rs | 43 +++++++++++++++++++-------- 1 file changed, 30 insertions(+), 13 deletions(-) diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 2ce037b35b408..d1b5619e42810 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -81,32 +81,49 @@ mod authorship; #[cfg(test)] mod tests; -/// Sassafras epoch information -#[derive(Decode, Encode, Default, PartialEq, Eq, Clone, Debug)] +/// Validator set of a particular epoch, can be either publishing or validating. +#[derive(Debug, Clone, Encode, Decode)] +pub struct ValidatorSet { + /// Proofs of all VRFs collected. + pub proofs: Vec, + /// The authorities and their weights. + pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, + /// Randomness for this epoch. + pub randomness: Randomness, +} + +/// Epoch data for Sassafras +#[derive(Debug, Clone, Encode, Decode)] pub struct Epoch { - /// The epoch index - pub epoch_index: u64, - /// The starting slot of the epoch, + /// Start slot of the epoch. pub start_slot: SlotNumber, - /// The duration of this epoch + /// Duration of this epoch. pub duration: SlotNumber, - /// The authorities and their weights - pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, - /// Randomness for this epoch - pub randomness: [u8; VRF_OUTPUT_LENGTH], + /// Epoch index. + pub epoch_index: u64, + + /// Publishing validator set. The set will start validating block in the next epoch. + pub publishing: ValidatorSet, + /// Validating validator set. The set validates block in the current epoch. + pub validating: ValidatorSet, } impl EpochT for Epoch { - type NextEpochDescriptor = NextEpochDescriptor; type SlotNumber = SlotNumber; + type NextEpochDescriptor = NextEpochDescriptor; fn increment(&self, descriptor: NextEpochDescriptor) -> Epoch { Epoch { epoch_index: self.epoch_index + 1, start_slot: self.start_slot + self.duration, duration: self.duration, - authorities: descriptor.authorities, - randomness: descriptor.randomness, + + validating: self.publishing.clone(), + publishing: ValidatorSet { + proofs: Vec::new(), + authorities: descriptor.authorities, + randomness: descriptor.randomness, + }, } } From 3ff9717936d7e845c29ba693a79fb5d5d3eec19f Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Sun, 16 Feb 2020 16:02:00 +0100 Subject: [PATCH 34/75] [WIP] Fix consensus log --- Cargo.lock | 29 +++++++++++++++++-- client/consensus/sassafras/src/authorship.rs | 2 +- .../consensus/sassafras/src/verification.rs | 1 - primitives/consensus/sassafras/Cargo.toml | 2 +- primitives/consensus/sassafras/src/digests.rs | 23 +++++---------- primitives/consensus/sassafras/src/lib.rs | 11 +++++++ primitives/consensus/sassafras/src/vrf.rs | 4 +-- 7 files changed, 49 insertions(+), 23 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e8efe61094c34..7cbbc0b4fe90d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5839,27 +5839,52 @@ dependencies = [ [[package]] name = "sc-consensus-sassafras" -version = "2.0.0" +version = "0.8.0" dependencies = [ "derive_more", + "env_logger 0.7.1", + "fork-tree", + "futures 0.1.29", + "futures 0.3.4", + "futures-timer 3.0.1", "log 0.4.8", "merlin", + "num-bigint", + "num-rational", + "num-traits", "parity-scale-codec", "parking_lot 0.10.0", + "pdqselect", + "rand 0.7.3", + "sc-block-builder", "sc-client", "sc-client-api", "sc-consensus-epochs", "sc-consensus-slots", + "sc-consensus-uncles", + "sc-executor", + "sc-keystore", + "sc-network", + "sc-network-test", + "sc-service", + "sc-telemetry", "schnorrkel", "sp-api", + "sp-application-crypto", "sp-block-builder", "sp-blockchain", "sp-consensus", "sp-consensus-sassafras", "sp-core", "sp-inherents", + "sp-io", + "sp-keyring", "sp-runtime", "sp-timestamp", + "sp-version", + "substrate-test-runtime-client", + "tempfile", + "tokio 0.1.22", ] [[package]] @@ -6982,7 +7007,7 @@ dependencies = [ [[package]] name = "sp-consensus-sassafras" -version = "2.0.0" +version = "0.8.0" dependencies = [ "parity-scale-codec", "schnorrkel", diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 09493f029777d..f83f37c6e2f5b 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -18,7 +18,7 @@ use merlin::Transcript; use sp_consensus_sassafras::{ - AuthorityId, SassafrasAuthorityWeight, SASSAFRAS_ENGINE_ID, SASSAFRAS_VRF_PREFIX, + AuthorityId, SassafrasAuthorityWeight, SASSAFRAS_ENGINE_ID, SlotNumber, AuthorityPair, SassafrasConfiguration }; use sp_consensus_sassafras::digests::PreDigest; diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index f13c8ca2b6382..a505078903ff8 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -23,7 +23,6 @@ use sp_consensus_sassafras::digests::{PreDigest, CompatibleDigestItem}; use sc_consensus_slots::CheckedHeader; use log::{debug, trace}; use super::{find_pre_digest, Epoch, BlockT, Error}; -use super::authorship::{make_transcript, calculate_primary_threshold, check_primary_threshold, secondary_slot_author}; /// Sassafras verification parameters pub(super) struct VerificationParams<'a, B: 'a + BlockT> { diff --git a/primitives/consensus/sassafras/Cargo.toml b/primitives/consensus/sassafras/Cargo.toml index 8586b99478a5e..ec179bf6eab65 100644 --- a/primitives/consensus/sassafras/Cargo.toml +++ b/primitives/consensus/sassafras/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus-sassafras" -version = "2.0.0" +version = "0.8.0" authors = ["Parity Technologies "] description = "Primitives for Sassafras consensus" edition = "2018" diff --git a/primitives/consensus/sassafras/src/digests.rs b/primitives/consensus/sassafras/src/digests.rs index 2f4ca291250c9..f772ce092b58a 100644 --- a/primitives/consensus/sassafras/src/digests.rs +++ b/primitives/consensus/sassafras/src/digests.rs @@ -64,16 +64,16 @@ impl CompatibleDigestItem for DigestItem where fn as_sassafras_next_epoch_descriptor(&self) -> Option { self.try_to(OpaqueDigestItemId::Consensus(&SASSAFRAS_ENGINE_ID)) - .and_then(|x: ConsensusDigest| match x { - ConsensusDigest::NextEpoch(n) => Some(n), + .and_then(|x: super::ConsensusLog| match x { + super::ConsensusLog::NextEpochData(n) => Some(n), _ => None, }) } fn as_sassafras_post_block_descriptor(&self) -> Option { self.try_to(OpaqueDigestItemId::Consensus(&SASSAFRAS_ENGINE_ID)) - .and_then(|x: ConsensusDigest| match x { - ConsensusDigest::PostBlock(p) => Some(p), + .and_then(|x: super::ConsensusLog| match x { + super::ConsensusLog::PostBlockData(p) => Some(p), _ => None, }) } @@ -87,7 +87,7 @@ impl CompatibleDigestItem for DigestItem where /// as `vrf_output`. /// /// This digest is included in every block, generated by Sassafras consensus engine. -#[derive(Clone, RuntimeDebug, Encode, Decode)] +#[derive(Clone, RuntimeDebug, Encode, Decode, PartialEq, Eq)] pub struct PreDigest { /// Index of ticket VRF proof that has been previously committed. pub ticket_vrf_index: VRFIndex, @@ -103,19 +103,10 @@ pub struct PreDigest { pub post_vrf_output: VRFOutput, } -/// Consensus logs. -#[derive(Clone, RuntimeDebug, Encode, Decode)] -pub enum ConsensusDigest { - /// Next epoch descriptor digest. - NextEpoch(NextEpochDescriptor), - /// Post block descriptor digest. - PostBlock(PostBlockDescriptor), -} - /// Post-digest about next epoch information. /// /// This digest is generated by runtime, at the beginning of every epoch. -#[derive(Clone, RuntimeDebug, Encode, Decode)] +#[derive(Clone, RuntimeDebug, Encode, Decode, PartialEq, Eq)] pub struct NextEpochDescriptor { /// The authorities that generate VRF proofs. Note that those keys will only be generating /// blocks two epochs later. @@ -128,7 +119,7 @@ pub struct NextEpochDescriptor { /// Post-digest about post-block information such as ticket commitments. /// /// This digest is generated by runtime, optional, and can be included at every block. -#[derive(Clone, RuntimeDebug, Encode, Decode)] +#[derive(Clone, RuntimeDebug, Encode, Decode, PartialEq, Eq)] pub struct PostBlockDescriptor { /// Commitments of tickets. pub commitments: Vec, diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index c55194f0f243a..7f2ca1020812d 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -76,6 +76,17 @@ pub type SassafrasAuthorityWeight = u64; /// The weight of a Sassafras block. pub type SassafrasBlockWeight = u32; +/// An consensus log item for Sassafras. +#[derive(Decode, Encode, Clone, PartialEq, Eq, RuntimeDebug)] +pub enum ConsensusLog { + /// The epoch has changed. + NextEpochData(digests::NextEpochDescriptor), + /// Commitments to be included in the current block. + PostBlockData(digests::PostBlockDescriptor), + /// Disable the authority with given index. + OnDisabled(AuthorityIndex), +} + /// Configuration data used by the Sassafras consensus engine. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] pub struct SassafrasConfiguration { diff --git a/primitives/consensus/sassafras/src/vrf.rs b/primitives/consensus/sassafras/src/vrf.rs index 93c160ae46e4d..9987ede1e9bde 100644 --- a/primitives/consensus/sassafras/src/vrf.rs +++ b/primitives/consensus/sassafras/src/vrf.rs @@ -10,7 +10,7 @@ pub use schnorrkel::vrf::{VRF_PROOF_LENGTH, VRF_OUTPUT_LENGTH}; pub struct RawVRFOutput(pub [u8; VRF_OUTPUT_LENGTH]); #[cfg(feature = "std")] -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct VRFOutput(pub schnorrkel::vrf::VRFOutput); #[cfg(not(feature = "std"))] @@ -80,7 +80,7 @@ impl core::cmp::PartialEq for RawVRFProof { impl core::cmp::Eq for RawVRFProof { } #[cfg(feature = "std")] -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct VRFProof(pub schnorrkel::vrf::VRFProof); #[cfg(not(feature = "std"))] From 44f4fa398535e140e831b06d3a505b3ccc49bdac Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Sun, 16 Feb 2020 18:43:54 +0100 Subject: [PATCH 35/75] Finish skeleton --- client/consensus/sassafras/src/authorship.rs | 6 -- client/consensus/sassafras/src/lib.rs | 36 +++++++---- .../consensus/sassafras/src/verification.rs | 24 ++----- primitives/consensus/sassafras/src/digests.rs | 63 +++++++++++++++---- 4 files changed, 78 insertions(+), 51 deletions(-) diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index f83f37c6e2f5b..4090c1a71d21b 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -16,16 +16,10 @@ //! Sassafras authority selection and slot claiming. -use merlin::Transcript; use sp_consensus_sassafras::{ - AuthorityId, SassafrasAuthorityWeight, SASSAFRAS_ENGINE_ID, SlotNumber, AuthorityPair, SassafrasConfiguration }; use sp_consensus_sassafras::digests::PreDigest; -use sp_core::{U256, blake2_256}; -use codec::Encode; -use schnorrkel::vrf::VRFInOut; -use sp_core::Pair; use sc_keystore::KeyStorePtr; use super::Epoch; diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index d1b5619e42810..69554e5b4f7a5 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -19,15 +19,15 @@ pub use sp_consensus_sassafras::{ SassafrasApi, ConsensusLog, SASSAFRAS_ENGINE_ID, SlotNumber, SassafrasConfiguration, AuthorityId, AuthorityPair, AuthoritySignature, - SassafrasAuthorityWeight, VRF_OUTPUT_LENGTH, + SassafrasAuthorityWeight, VRF_OUTPUT_LENGTH, VRFProof, Randomness, digests::{PreDigest, CompatibleDigestItem, NextEpochDescriptor}, }; pub use sp_consensus::SyncOracle; + use std::{ collections::HashMap, sync::Arc, u64, pin::Pin, time::{Instant, Duration}, - any::Any, borrow::Cow + any::Any, borrow::Cow, convert::TryInto, }; -use sp_consensus_sassafras; use sp_consensus::{ImportResult, CanAuthorWith}; use sp_consensus::import_queue::{ BoxJustificationImport, BoxFinalityProofImport, @@ -45,11 +45,10 @@ use sc_telemetry::{telemetry, CONSENSUS_TRACE, CONSENSUS_DEBUG}; use sp_consensus::{ self, BlockImport, Environment, Proposer, BlockCheckParams, ForkChoiceStrategy, BlockImportParams, BlockOrigin, Error as ConsensusError, - SelectChain, SlotData, + SelectChain, SlotData, import_queue::{Verifier, BasicQueue, CacheKeyId}, }; -use sp_consensus_sassafras::inherents::SassafrasInherentData; +use sp_consensus_sassafras::{self, inherents::SassafrasInherentData}; use sp_timestamp::{TimestampInherentData, InherentType as TimestampInherent}; -use sp_consensus::import_queue::{Verifier, BasicQueue, CacheKeyId}; use sc_client_api::{ backend::{AuxStore, Backend}, call_executor::CallExecutor, @@ -222,15 +221,28 @@ impl Config { } } - /// Create the genesis epoch (epoch #0). This is defined to start at the slot of - /// the first block, so that has to be provided. + /// Create the genesis epoch (epoch #0) pub fn genesis_epoch(&self, slot_number: SlotNumber) -> Epoch { + let proofs = self.genesis_proofs.clone() + .into_iter() + .map(|p| p.try_into().expect("Genesis proofs are invalid")) + .collect::>(); + Epoch { epoch_index: 0, start_slot: slot_number, duration: self.epoch_length, - authorities: self.genesis_authorities.clone(), - randomness: self.randomness.clone(), + + validating: ValidatorSet { + proofs: proofs.clone(), + authorities: self.genesis_authorities.clone(), + randomness: self.randomness.clone(), + }, + publishing: ValidatorSet { + proofs, + authorities: self.genesis_authorities.clone(), + randomness: self.randomness.clone(), + }, } } } @@ -397,7 +409,7 @@ impl sc_consensus_slots::SimpleSlotWorker for Sassafra fn authorities_len(&self, epoch_descriptor: &Self::EpochData) -> Option { self.epoch_changes.lock() .viable_epoch(&epoch_descriptor, |slot| self.config.genesis_epoch(slot)) - .map(|epoch| epoch.as_ref().authorities.len()) + .map(|epoch| epoch.as_ref().validating.authorities.len()) } fn claim_slot( @@ -554,7 +566,7 @@ fn find_pre_digest(header: &B::Header) -> Result> return Ok(PreDigest::Secondary { slot_number: 0, authority_index: 0, - }); + }) } let mut pre_digest: Option<_> = None; diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index a505078903ff8..2b98d126bc8a8 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -15,14 +15,11 @@ // along with Substrate. If not, see . //! Verification for Sassafras headers. -use schnorrkel::vrf::{VRFOutput, VRFProof}; -use sp_runtime::{traits::Header, traits::DigestItemFor}; -use sp_core::{Pair, Public}; -use sp_consensus_sassafras::{AuthoritySignature, SlotNumber, AuthorityIndex, AuthorityPair, AuthorityId}; +use sp_runtime::traits::DigestItemFor; +use sp_consensus_sassafras::{SlotNumber, AuthorityId}; use sp_consensus_sassafras::digests::{PreDigest, CompatibleDigestItem}; use sc_consensus_slots::CheckedHeader; -use log::{debug, trace}; -use super::{find_pre_digest, Epoch, BlockT, Error}; +use super::{Epoch, BlockT, Error}; /// Sassafras verification parameters pub(super) struct VerificationParams<'a, B: 'a + BlockT> { @@ -56,20 +53,7 @@ pub(super) fn check_header( ) -> Result>, Error> where DigestItemFor: CompatibleDigestItem, { - let VerificationParams { - mut header, - pre_digest, - slot_now, - epoch, - config, - } = params; - - let info = VerifiedHeaderInfo { - pre_digest: CompatibleDigestItem::sassafras_pre_digest(pre_digest), - seal, - author: Default::default(), - }; - Ok(CheckedHeader::Checked(header, info)) + unimplemented!() } pub(super) struct VerifiedHeaderInfo { diff --git a/primitives/consensus/sassafras/src/digests.rs b/primitives/consensus/sassafras/src/digests.rs index f772ce092b58a..af1ffdbf2035b 100644 --- a/primitives/consensus/sassafras/src/digests.rs +++ b/primitives/consensus/sassafras/src/digests.rs @@ -88,19 +88,56 @@ impl CompatibleDigestItem for DigestItem where /// /// This digest is included in every block, generated by Sassafras consensus engine. #[derive(Clone, RuntimeDebug, Encode, Decode, PartialEq, Eq)] -pub struct PreDigest { - /// Index of ticket VRF proof that has been previously committed. - pub ticket_vrf_index: VRFIndex, - /// Reveal of tocket VRF output. - pub ticket_vrf_output: VRFOutput, - /// Validator index. - pub authority_index: AuthorityIndex, - /// Corresponding slot number. - pub slot: SlotNumber, - /// Secondary "Post Block VRF" proof. - pub post_vrf_proof: VRFProof, - /// Secondary "Post Block VRF" output. - pub post_vrf_output: VRFOutput, +pub enum PreDigest { + /// A primary VRF-based slot-assignment. + Primary { + /// Index of ticket VRF proof that has been previously committed. + ticket_vrf_index: VRFIndex, + /// Reveal of tocket VRF output. + ticket_vrf_output: VRFOutput, + /// Validator index. + authority_index: AuthorityIndex, + /// Corresponding slot number. + slot_number: SlotNumber, + /// Secondary "Post Block VRF" proof. + post_vrf_proof: VRFProof, + /// Secondary "Post Block VRF" output. + post_vrf_output: VRFOutput, + }, + /// A secondary deterministic slot assignment. + Secondary { + /// Authority index. + authority_index: AuthorityIndex, + /// Slot number. + slot_number: SlotNumber, + }, +} + +impl PreDigest { + /// Returns the slot number of the pre digest. + pub fn authority_index(&self) -> AuthorityIndex { + match self { + PreDigest::Primary { authority_index, .. } => *authority_index, + PreDigest::Secondary { authority_index, .. } => *authority_index, + } + } + + /// Returns the slot number of the pre digest. + pub fn slot_number(&self) -> SlotNumber { + match self { + PreDigest::Primary { slot_number, .. } => *slot_number, + PreDigest::Secondary { slot_number, .. } => *slot_number, + } + } + + /// Returns the weight _added_ by this digest, not the cumulative weight + /// of the chain. + pub fn added_weight(&self) -> super::SassafrasBlockWeight { + match self { + PreDigest::Primary { .. } => 1, + PreDigest::Secondary { .. } => 0, + } + } } /// Post-digest about next epoch information. From f8c55e06c1a0d10ff049b2cc53a06305d7a37594 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Sun, 16 Feb 2020 18:53:10 +0100 Subject: [PATCH 36/75] Allow additional commitments to be posted directly at pre-digest Avoid dependency of pallet-sassafras for now. --- client/consensus/sassafras/src/lib.rs | 1 + primitives/consensus/sassafras/src/digests.rs | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 69554e5b4f7a5..e9cf0176421cb 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -566,6 +566,7 @@ fn find_pre_digest(header: &B::Header) -> Result> return Ok(PreDigest::Secondary { slot_number: 0, authority_index: 0, + commitments: Vec::new(), }) } diff --git a/primitives/consensus/sassafras/src/digests.rs b/primitives/consensus/sassafras/src/digests.rs index af1ffdbf2035b..f10091ae2a2eb 100644 --- a/primitives/consensus/sassafras/src/digests.rs +++ b/primitives/consensus/sassafras/src/digests.rs @@ -103,6 +103,8 @@ pub enum PreDigest { post_vrf_proof: VRFProof, /// Secondary "Post Block VRF" output. post_vrf_output: VRFOutput, + /// Additional commitments posted directly at pre-digest. + commitments: Vec, }, /// A secondary deterministic slot assignment. Secondary { @@ -110,6 +112,8 @@ pub enum PreDigest { authority_index: AuthorityIndex, /// Slot number. slot_number: SlotNumber, + /// Additional commitments posted directly at pre-digest. + commitments: Vec, }, } From dc687e85b783038b0abc463da891b5f4a199337d Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Sun, 16 Feb 2020 18:53:44 +0100 Subject: [PATCH 37/75] Remove unused runtime api --- primitives/consensus/sassafras/src/lib.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index 7f2ca1020812d..bde59c0cfef3c 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -120,8 +120,5 @@ sp_api::decl_runtime_apis! { pub trait SassafrasApi { /// Return the configuration for Sassafras. fn configuration() -> SassafrasConfiguration; - - /// Return the proofs appended at the current block. - fn proofs() -> Vec; } } From 54fc67b902977d98deebf355be9194ec176ce65a Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Sun, 16 Feb 2020 19:07:52 +0100 Subject: [PATCH 38/75] Init sassafras-template so that testing can be easier --- Cargo.lock | 60 +++ Cargo.toml | 2 + bin/sassafras-template/LICENSE | 24 ++ bin/sassafras-template/README.md | 76 ++++ bin/sassafras-template/node/Cargo.toml | 38 ++ bin/sassafras-template/node/build.rs | 9 + bin/sassafras-template/node/src/chain_spec.rs | 153 ++++++++ bin/sassafras-template/node/src/cli.rs | 11 + bin/sassafras-template/node/src/command.rs | 46 +++ bin/sassafras-template/node/src/main.rs | 25 ++ bin/sassafras-template/node/src/service.rs | 242 ++++++++++++ bin/sassafras-template/runtime/Cargo.toml | 67 ++++ bin/sassafras-template/runtime/build.rs | 10 + bin/sassafras-template/runtime/src/lib.rs | 362 ++++++++++++++++++ bin/sassafras-template/scripts/init.sh | 12 + 15 files changed, 1137 insertions(+) create mode 100644 bin/sassafras-template/LICENSE create mode 100644 bin/sassafras-template/README.md create mode 100644 bin/sassafras-template/node/Cargo.toml create mode 100644 bin/sassafras-template/node/build.rs create mode 100644 bin/sassafras-template/node/src/chain_spec.rs create mode 100644 bin/sassafras-template/node/src/cli.rs create mode 100644 bin/sassafras-template/node/src/command.rs create mode 100644 bin/sassafras-template/node/src/main.rs create mode 100644 bin/sassafras-template/node/src/service.rs create mode 100644 bin/sassafras-template/runtime/Cargo.toml create mode 100644 bin/sassafras-template/runtime/build.rs create mode 100644 bin/sassafras-template/runtime/src/lib.rs create mode 100755 bin/sassafras-template/scripts/init.sh diff --git a/Cargo.lock b/Cargo.lock index 7cbbc0b4fe90d..8a4fdb80a26cc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5459,6 +5459,66 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "sassafras-template" +version = "2.0.0" +dependencies = [ + "futures 0.3.4", + "log 0.4.8", + "sassafras-template-runtime", + "sc-basic-authorship", + "sc-cli", + "sc-client", + "sc-consensus-aura", + "sc-executor", + "sc-finality-grandpa", + "sc-network", + "sc-service", + "sc-transaction-pool", + "sp-consensus", + "sp-consensus-aura", + "sp-core", + "sp-finality-grandpa", + "sp-inherents", + "sp-runtime", + "sp-transaction-pool", + "structopt", + "substrate-build-script-utils", + "vergen", +] + +[[package]] +name = "sassafras-template-runtime" +version = "2.0.0" +dependencies = [ + "frame-executive", + "frame-support", + "frame-system", + "pallet-aura", + "pallet-balances", + "pallet-grandpa", + "pallet-indices", + "pallet-randomness-collective-flip", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "parity-scale-codec", + "serde", + "sp-api", + "sp-block-builder", + "sp-consensus-aura", + "sp-core", + "sp-inherents", + "sp-io", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std", + "sp-transaction-pool", + "sp-version", + "substrate-wasm-builder-runner", +] + [[package]] name = "sc-authority-discovery" version = "0.8.0" diff --git a/Cargo.toml b/Cargo.toml index c5483a8b26ee9..c903954aafb29 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,6 +3,8 @@ members = [ "bin/node-template/node", "bin/node-template/runtime", "bin/node-template/pallets/template", + "bin/sassafras-template/node", + "bin/sassafras-template/runtime", "bin/node/cli", "bin/node/executor", "bin/node/primitives", diff --git a/bin/sassafras-template/LICENSE b/bin/sassafras-template/LICENSE new file mode 100644 index 0000000000000..cf1ab25da0349 --- /dev/null +++ b/bin/sassafras-template/LICENSE @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/bin/sassafras-template/README.md b/bin/sassafras-template/README.md new file mode 100644 index 0000000000000..c411dbeef5bcc --- /dev/null +++ b/bin/sassafras-template/README.md @@ -0,0 +1,76 @@ +# Substrate Node Template + +A new SRML-based Substrate node, ready for hacking. + +## Build + +Install Rust: + +```bash +curl https://sh.rustup.rs -sSf | sh +``` + +Initialize your Wasm Build environment: + +```bash +./scripts/init.sh +``` + +Build Wasm and native code: + +```bash +cargo build --release +``` + +## Run + +### Single node development chain + +Purge any existing developer chain state: + +```bash +./target/release/node-template purge-chain --dev +``` + +Start a development chain with: + +```bash +./target/release/node-template --dev +``` + +Detailed logs may be shown by running the node with the following environment variables set: `RUST_LOG=debug RUST_BACKTRACE=1 cargo run -- --dev`. + +### Multi-node local testnet + +If you want to see the multi-node consensus algorithm in action locally, then you can create a local testnet with two validator nodes for Alice and Bob, who are the initial authorities of the genesis chain that have been endowed with testnet units. + +Optionally, give each node a name and expose them so they are listed on the Polkadot [telemetry site](https://telemetry.polkadot.io/#/Local%20Testnet). + +You'll need two terminal windows open. + +We'll start Alice's substrate node first on default TCP port 30333 with her chain database stored locally at `/tmp/alice`. The bootnode ID of her node is `QmRpheLN4JWdAnY7HGJfWFNbfkQCb6tFf4vvA6hgjMZKrR`, which is generated from the `--node-key` value that we specify below: + +```bash +cargo run -- \ + --base-path /tmp/alice \ + --chain=local \ + --alice \ + --node-key 0000000000000000000000000000000000000000000000000000000000000001 \ + --telemetry-url ws://telemetry.polkadot.io:1024 \ + --validator +``` + +In the second terminal, we'll start Bob's substrate node on a different TCP port of 30334, and with his chain database stored locally at `/tmp/bob`. We'll specify a value for the `--bootnodes` option that will connect his node to Alice's bootnode ID on TCP port 30333: + +```bash +cargo run -- \ + --base-path /tmp/bob \ + --bootnodes /ip4/127.0.0.1/tcp/30333/p2p/QmRpheLN4JWdAnY7HGJfWFNbfkQCb6tFf4vvA6hgjMZKrR \ + --chain=local \ + --bob \ + --port 30334 \ + --telemetry-url ws://telemetry.polkadot.io:1024 \ + --validator +``` + +Additional CLI usage options are available and may be shown by running `cargo run -- --help`. diff --git a/bin/sassafras-template/node/Cargo.toml b/bin/sassafras-template/node/Cargo.toml new file mode 100644 index 0000000000000..7732fe335e9d1 --- /dev/null +++ b/bin/sassafras-template/node/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "sassafras-template" +version = "2.0.0" +authors = ["Anonymous"] +edition = "2018" +license = "Unlicense" +build = "build.rs" + +[[bin]] +name = "sassafras-template" + +[dependencies] +futures = "0.3.1" +log = "0.4.8" +structopt = "0.3.8" + +sc-cli = { version = "0.8.0", path = "../../../client/cli" } +sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sc-executor = { version = "0.8", path = "../../../client/executor" } +sc-service = { version = "0.8", path = "../../../client/service" } +sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } +sc-transaction-pool = { version = "2.0.0", path = "../../../client/transaction-pool" } +sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } +sc-network = { version = "0.8", path = "../../../client/network" } +sc-consensus-aura = { version = "0.8", path = "../../../client/consensus/aura" } +sp-consensus-aura = { version = "0.8", path = "../../../primitives/consensus/aura" } +sp-consensus = { version = "0.8", path = "../../../primitives/consensus/common" } +grandpa = { version = "0.8", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } +grandpa-primitives = { version = "2.0.0", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } +sc-client = { version = "0.8", path = "../../../client/" } +sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sc-basic-authorship = { path = "../../../client/basic-authorship" } + +sassafras-template-runtime = { version = "2.0.0", path = "../runtime" } + +[build-dependencies] +vergen = "3.0.4" +build-script-utils = { version = "2.0.0", package = "substrate-build-script-utils", path = "../../../utils/build-script-utils" } diff --git a/bin/sassafras-template/node/build.rs b/bin/sassafras-template/node/build.rs new file mode 100644 index 0000000000000..222cbb409285b --- /dev/null +++ b/bin/sassafras-template/node/build.rs @@ -0,0 +1,9 @@ +use vergen::{ConstantsFlags, generate_cargo_keys}; + +const ERROR_MSG: &str = "Failed to generate metadata files"; + +fn main() { + generate_cargo_keys(ConstantsFlags::SHA_SHORT).expect(ERROR_MSG); + + build_script_utils::rerun_if_git_head_changed(); +} diff --git a/bin/sassafras-template/node/src/chain_spec.rs b/bin/sassafras-template/node/src/chain_spec.rs new file mode 100644 index 0000000000000..4ee3b0805e12c --- /dev/null +++ b/bin/sassafras-template/node/src/chain_spec.rs @@ -0,0 +1,153 @@ +use sp_core::{Pair, Public, sr25519}; +use sassafras_template_runtime::{ + AccountId, AuraConfig, BalancesConfig, GenesisConfig, GrandpaConfig, + SudoConfig, IndicesConfig, SystemConfig, WASM_BINARY, Signature +}; +use sp_consensus_aura::sr25519::{AuthorityId as AuraId}; +use grandpa_primitives::{AuthorityId as GrandpaId}; +use sc_service; +use sp_runtime::traits::{Verify, IdentifyAccount}; + +// Note this is the URL for the telemetry server +//const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; + +/// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type. +pub type ChainSpec = sc_service::ChainSpec; + +/// The chain specification option. This is expected to come in from the CLI and +/// is little more than one of a number of alternatives which can easily be converted +/// from a string (`--chain=...`) into a `ChainSpec`. +#[derive(Clone, Debug)] +pub enum Alternative { + /// Whatever the current runtime is, with just Alice as an auth. + Development, + /// Whatever the current runtime is, with simple Alice/Bob auths. + LocalTestnet, +} + +/// Helper function to generate a crypto pair from seed +pub fn get_from_seed(seed: &str) -> ::Public { + TPublic::Pair::from_string(&format!("//{}", seed), None) + .expect("static values are valid; qed") + .public() +} + +type AccountPublic = ::Signer; + +/// Helper function to generate an account ID from seed +pub fn get_account_id_from_seed(seed: &str) -> AccountId where + AccountPublic: From<::Public> +{ + AccountPublic::from(get_from_seed::(seed)).into_account() +} + +/// Helper function to generate an authority key for Aura +pub fn get_authority_keys_from_seed(s: &str) -> (AuraId, GrandpaId) { + ( + get_from_seed::(s), + get_from_seed::(s), + ) +} + +impl Alternative { + /// Get an actual chain config from one of the alternatives. + pub(crate) fn load(self) -> Result { + Ok(match self { + Alternative::Development => ChainSpec::from_genesis( + "Development", + "dev", + || testnet_genesis( + vec![ + get_authority_keys_from_seed("Alice"), + ], + get_account_id_from_seed::("Alice"), + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + ], + true, + ), + vec![], + None, + None, + None, + None + ), + Alternative::LocalTestnet => ChainSpec::from_genesis( + "Local Testnet", + "local_testnet", + || testnet_genesis( + vec![ + get_authority_keys_from_seed("Alice"), + get_authority_keys_from_seed("Bob"), + ], + get_account_id_from_seed::("Alice"), + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + true, + ), + vec![], + None, + None, + None, + None + ), + }) + } + + pub(crate) fn from(s: &str) -> Option { + match s { + "dev" => Some(Alternative::Development), + "" | "local" => Some(Alternative::LocalTestnet), + _ => None, + } + } +} + +fn testnet_genesis(initial_authorities: Vec<(AuraId, GrandpaId)>, + root_key: AccountId, + endowed_accounts: Vec, + _enable_println: bool) -> GenesisConfig { + GenesisConfig { + system: Some(SystemConfig { + code: WASM_BINARY.to_vec(), + changes_trie_config: Default::default(), + }), + indices: Some(IndicesConfig { + ids: endowed_accounts.clone(), + }), + balances: Some(BalancesConfig { + balances: endowed_accounts.iter().cloned().map(|k|(k, 1 << 60)).collect(), + }), + sudo: Some(SudoConfig { + key: root_key, + }), + aura: Some(AuraConfig { + authorities: initial_authorities.iter().map(|x| (x.0.clone())).collect(), + }), + grandpa: Some(GrandpaConfig { + authorities: initial_authorities.iter().map(|x| (x.1.clone(), 1)).collect(), + }), + } +} + +pub fn load_spec(id: &str) -> Result, String> { + Ok(match Alternative::from(id) { + Some(spec) => Some(spec.load()?), + None => None, + }) +} diff --git a/bin/sassafras-template/node/src/cli.rs b/bin/sassafras-template/node/src/cli.rs new file mode 100644 index 0000000000000..0091ef7d75912 --- /dev/null +++ b/bin/sassafras-template/node/src/cli.rs @@ -0,0 +1,11 @@ +use sc_cli::{RunCmd, Subcommand}; +use structopt::StructOpt; + +#[derive(Debug, StructOpt)] +pub struct Cli { + #[structopt(subcommand)] + pub subcommand: Option, + + #[structopt(flatten)] + pub run: RunCmd, +} diff --git a/bin/sassafras-template/node/src/command.rs b/bin/sassafras-template/node/src/command.rs new file mode 100644 index 0000000000000..e7e386703deee --- /dev/null +++ b/bin/sassafras-template/node/src/command.rs @@ -0,0 +1,46 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use sp_consensus_aura::sr25519::{AuthorityPair as AuraPair}; +use sc_cli::{VersionInfo, error}; +use crate::service; +use crate::chain_spec; +use crate::cli::Cli; + +/// Parse and run command line arguments +pub fn run(version: VersionInfo) -> error::Result<()> { + let opt = sc_cli::from_args::(&version); + + let config = sc_service::Configuration::new(&version); + + match opt.subcommand { + Some(subcommand) => sc_cli::run_subcommand( + config, + subcommand, + chain_spec::load_spec, + |config: _| Ok(new_full_start!(config).0), + &version, + ), + None => sc_cli::run( + config, + opt.run, + service::new_light, + service::new_full, + chain_spec::load_spec, + &version, + ) + } +} diff --git a/bin/sassafras-template/node/src/main.rs b/bin/sassafras-template/node/src/main.rs new file mode 100644 index 0000000000000..9d0a57d77a851 --- /dev/null +++ b/bin/sassafras-template/node/src/main.rs @@ -0,0 +1,25 @@ +//! Substrate Node Template CLI library. +#![warn(missing_docs)] + +mod chain_spec; +#[macro_use] +mod service; +mod cli; +mod command; + +pub use sc_cli::{VersionInfo, error}; + +fn main() -> Result<(), error::Error> { + let version = VersionInfo { + name: "Substrate Node", + commit: env!("VERGEN_SHA_SHORT"), + version: env!("CARGO_PKG_VERSION"), + executable_name: "node-template", + author: "Anonymous", + description: "Template Node", + support_url: "support.anonymous.an", + copyright_start_year: 2017, + }; + + command::run(version) +} diff --git a/bin/sassafras-template/node/src/service.rs b/bin/sassafras-template/node/src/service.rs new file mode 100644 index 0000000000000..0de7d0052e689 --- /dev/null +++ b/bin/sassafras-template/node/src/service.rs @@ -0,0 +1,242 @@ +//! Service and ServiceFactory implementation. Specialized wrapper over substrate service. + +use std::sync::Arc; +use std::time::Duration; +use sc_client::LongestChain; +use sassafras_template_runtime::{self, GenesisConfig, opaque::Block, RuntimeApi}; +use sc_service::{error::{Error as ServiceError}, AbstractService, Configuration, ServiceBuilder}; +use sp_inherents::InherentDataProviders; +use sc_network::{construct_simple_protocol}; +use sc_executor::native_executor_instance; +pub use sc_executor::NativeExecutor; +use sp_consensus_aura::sr25519::{AuthorityPair as AuraPair}; +use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}; + +// Our native executor instance. +native_executor_instance!( + pub Executor, + sassafras_template_runtime::api::dispatch, + sassafras_template_runtime::native_version, +); + +construct_simple_protocol! { + /// Demo protocol attachment for substrate. + pub struct NodeProtocol where Block = Block { } +} + +/// Starts a `ServiceBuilder` for a full service. +/// +/// Use this macro if you don't actually need the full service, but just the builder in order to +/// be able to perform chain operations. +macro_rules! new_full_start { + ($config:expr) => {{ + let mut import_setup = None; + let inherent_data_providers = sp_inherents::InherentDataProviders::new(); + + let builder = sc_service::ServiceBuilder::new_full::< + sassafras_template_runtime::opaque::Block, sassafras_template_runtime::RuntimeApi, crate::service::Executor + >($config)? + .with_select_chain(|_config, backend| { + Ok(sc_client::LongestChain::new(backend.clone())) + })? + .with_transaction_pool(|config, client, _fetcher| { + let pool_api = sc_transaction_pool::FullChainApi::new(client.clone()); + let pool = sc_transaction_pool::BasicPool::new(config, std::sync::Arc::new(pool_api)); + Ok(pool) + })? + .with_import_queue(|_config, client, mut select_chain, transaction_pool| { + let select_chain = select_chain.take() + .ok_or_else(|| sc_service::Error::SelectChainRequired)?; + + let (grandpa_block_import, grandpa_link) = + grandpa::block_import::<_, _, _, sassafras_template_runtime::RuntimeApi, _>( + client.clone(), &*client, select_chain + )?; + + let aura_block_import = sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new( + grandpa_block_import.clone(), client.clone(), + ); + + let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _>( + sc_consensus_aura::SlotDuration::get_or_compute(&*client)?, + aura_block_import, + Some(Box::new(grandpa_block_import.clone())), + None, + client, + inherent_data_providers.clone(), + Some(transaction_pool), + )?; + + import_setup = Some((grandpa_block_import, grandpa_link)); + + Ok(import_queue) + })?; + + (builder, import_setup, inherent_data_providers) + }} +} + +/// Builds a new service for a full client. +pub fn new_full(config: Configuration) + -> Result +{ + let is_authority = config.roles.is_authority(); + let force_authoring = config.force_authoring; + let name = config.name.clone(); + let disable_grandpa = config.disable_grandpa; + + // sentry nodes announce themselves as authorities to the network + // and should run the same protocols authorities do, but it should + // never actively participate in any consensus process. + let participates_in_consensus = is_authority && !config.sentry_mode; + + let (builder, mut import_setup, inherent_data_providers) = new_full_start!(config); + + let (block_import, grandpa_link) = + import_setup.take() + .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); + + let service = builder.with_network_protocol(|_| Ok(NodeProtocol::new()))? + .with_finality_proof_provider(|client, backend| + Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, client)) as _) + )? + .build()?; + + if participates_in_consensus { + let proposer = sc_basic_authorship::ProposerFactory { + client: service.client(), + transaction_pool: service.transaction_pool(), + }; + + let client = service.client(); + let select_chain = service.select_chain() + .ok_or(ServiceError::SelectChainRequired)?; + + let can_author_with = + sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); + + let aura = sc_consensus_aura::start_aura::<_, _, _, _, _, AuraPair, _, _, _>( + sc_consensus_aura::SlotDuration::get_or_compute(&*client)?, + client, + select_chain, + block_import, + proposer, + service.network(), + inherent_data_providers.clone(), + force_authoring, + service.keystore(), + can_author_with, + )?; + + // the AURA authoring task is considered essential, i.e. if it + // fails we take down the service with it. + service.spawn_essential_task("aura", aura); + } + + // if the node isn't actively participating in consensus then it doesn't + // need a keystore, regardless of which protocol we use below. + let keystore = if participates_in_consensus { + Some(service.keystore()) + } else { + None + }; + + let grandpa_config = grandpa::Config { + // FIXME #1578 make this available through chainspec + gossip_duration: Duration::from_millis(333), + justification_period: 512, + name: Some(name), + observer_enabled: true, + keystore, + is_authority, + }; + + match (is_authority, disable_grandpa) { + (false, false) => { + // start the lightweight GRANDPA observer + service.spawn_task("grandpa-observer", grandpa::run_grandpa_observer( + grandpa_config, + grandpa_link, + service.network(), + service.on_exit(), + service.spawn_task_handle(), + )?); + }, + (true, false) => { + // start the full GRANDPA voter + let voter_config = grandpa::GrandpaParams { + config: grandpa_config, + link: grandpa_link, + network: service.network(), + inherent_data_providers: inherent_data_providers.clone(), + on_exit: service.on_exit(), + telemetry_on_connect: Some(service.telemetry_on_connect_stream()), + voting_rule: grandpa::VotingRulesBuilder::default().build(), + executor: service.spawn_task_handle(), + }; + + // the GRANDPA voter task is considered infallible, i.e. + // if it fails we take down the service with it. + service.spawn_essential_task("grandpa", grandpa::run_grandpa_voter(voter_config)?); + }, + (_, true) => { + grandpa::setup_disabled_grandpa( + service.client(), + &inherent_data_providers, + service.network(), + )?; + }, + } + + Ok(service) +} + +/// Builds a new service for a light client. +pub fn new_light(config: Configuration) + -> Result +{ + let inherent_data_providers = InherentDataProviders::new(); + + ServiceBuilder::new_light::(config)? + .with_select_chain(|_config, backend| { + Ok(LongestChain::new(backend.clone())) + })? + .with_transaction_pool(|config, client, fetcher| { + let fetcher = fetcher + .ok_or_else(|| "Trying to start light transaction pool without active fetcher")?; + + let pool_api = sc_transaction_pool::LightChainApi::new(client.clone(), fetcher.clone()); + let pool = sc_transaction_pool::BasicPool::with_revalidation_type( + config, Arc::new(pool_api), sc_transaction_pool::RevalidationType::Light, + ); + Ok(pool) + })? + .with_import_queue_and_fprb(|_config, client, backend, fetcher, _select_chain, _tx_pool| { + let fetch_checker = fetcher + .map(|fetcher| fetcher.checker().clone()) + .ok_or_else(|| "Trying to start light import queue without active fetch checker")?; + let grandpa_block_import = grandpa::light_block_import::<_, _, _, RuntimeApi>( + client.clone(), backend, &*client.clone(), Arc::new(fetch_checker), + )?; + let finality_proof_import = grandpa_block_import.clone(); + let finality_proof_request_builder = + finality_proof_import.create_finality_proof_request_builder(); + + let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, ()>( + sc_consensus_aura::SlotDuration::get_or_compute(&*client)?, + grandpa_block_import, + None, + Some(Box::new(finality_proof_import)), + client, + inherent_data_providers.clone(), + None, + )?; + + Ok((import_queue, finality_proof_request_builder)) + })? + .with_network_protocol(|_| Ok(NodeProtocol::new()))? + .with_finality_proof_provider(|client, backend| + Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, client)) as _) + )? + .build() +} diff --git a/bin/sassafras-template/runtime/Cargo.toml b/bin/sassafras-template/runtime/Cargo.toml new file mode 100644 index 0000000000000..50885c0e20908 --- /dev/null +++ b/bin/sassafras-template/runtime/Cargo.toml @@ -0,0 +1,67 @@ +[package] +name = "sassafras-template-runtime" +version = "2.0.0" +authors = ["Anonymous"] +edition = "2018" +license = "Unlicense" + +[dependencies] +codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } + +aura = { version = "2.0.0", default-features = false, package = "pallet-aura", path = "../../../frame/aura" } +balances = { version = "2.0.0", default-features = false, package = "pallet-balances", path = "../../../frame/balances" } +frame-support = { version = "2.0.0", default-features = false, path = "../../../frame/support" } +grandpa = { version = "2.0.0", default-features = false, package = "pallet-grandpa", path = "../../../frame/grandpa" } +indices = { version = "2.0.0", default-features = false, package = "pallet-indices", path = "../../../frame/indices" } +randomness-collective-flip = { version = "2.0.0", default-features = false, package = "pallet-randomness-collective-flip", path = "../../../frame/randomness-collective-flip" } +sudo = { version = "2.0.0", default-features = false, package = "pallet-sudo", path = "../../../frame/sudo" } +system = { version = "2.0.0", default-features = false, package = "frame-system", path = "../../../frame/system" } +timestamp = { version = "2.0.0", default-features = false, package = "pallet-timestamp", path = "../../../frame/timestamp" } +transaction-payment = { version = "2.0.0", default-features = false, package = "pallet-transaction-payment", path = "../../../frame/transaction-payment" } +frame-executive = { version = "2.0.0", default-features = false, path = "../../../frame/executive" } +serde = { version = "1.0.101", optional = true, features = ["derive"] } +sp-api = { version = "2.0.0", default-features = false, path = "../../../primitives/api" } +sp-block-builder = { path = "../../../primitives/block-builder", default-features = false} +sp-consensus-aura = { version = "0.8", default-features = false, path = "../../../primitives/consensus/aura" } +sp-core = { version = "2.0.0", default-features = false, path = "../../../primitives/core" } +sp-inherents = { path = "../../../primitives/inherents", default-features = false} +sp-io = { version = "2.0.0", default-features = false, path = "../../../primitives/io" } +sp-offchain = { version = "2.0.0", default-features = false, path = "../../../primitives/offchain" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-session = { version = "2.0.0", default-features = false, path = "../../../primitives/session" } +sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } +sp-transaction-pool = { version = "2.0.0", default-features = false, path = "../../../primitives/transaction-pool" } +sp-version = { version = "2.0.0", default-features = false, path = "../../../primitives/version" } + +[build-dependencies] +wasm-builder-runner = { version = "1.0.4", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } + +[features] +default = ["std"] +std = [ + "aura/std", + "balances/std", + "codec/std", + "frame-executive/std", + "frame-support/std", + "grandpa/std", + "indices/std", + "randomness-collective-flip/std", + "serde", + "sp-api/std", + "sp-block-builder/std", + "sp-consensus-aura/std", + "sp-core/std", + "sp-inherents/std", + "sp-io/std", + "sp-offchain/std", + "sp-runtime/std", + "sp-session/std", + "sp-std/std", + "sp-transaction-pool/std", + "sp-version/std", + "sudo/std", + "system/std", + "timestamp/std", + "transaction-payment/std", +] diff --git a/bin/sassafras-template/runtime/build.rs b/bin/sassafras-template/runtime/build.rs new file mode 100644 index 0000000000000..39f7f56feb0b1 --- /dev/null +++ b/bin/sassafras-template/runtime/build.rs @@ -0,0 +1,10 @@ +use wasm_builder_runner::WasmBuilder; + +fn main() { + WasmBuilder::new() + .with_current_project() + .with_wasm_builder_from_crates("1.0.9") + .export_heap_base() + .import_memory() + .build() +} diff --git a/bin/sassafras-template/runtime/src/lib.rs b/bin/sassafras-template/runtime/src/lib.rs new file mode 100644 index 0000000000000..6ff8f956719f9 --- /dev/null +++ b/bin/sassafras-template/runtime/src/lib.rs @@ -0,0 +1,362 @@ +//! The Substrate Node Template runtime. This can be compiled with `#[no_std]`, ready for Wasm. + +#![cfg_attr(not(feature = "std"), no_std)] +// `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. +#![recursion_limit="256"] + +// Make the WASM binary available. +#[cfg(feature = "std")] +include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); + +use sp_std::prelude::*; +use sp_core::OpaqueMetadata; +use sp_runtime::{ + ApplyExtrinsicResult, transaction_validity::TransactionValidity, generic, create_runtime_str, + impl_opaque_keys, MultiSignature +}; +use sp_runtime::traits::{ + BlakeTwo256, Block as BlockT, StaticLookup, Verify, ConvertInto, IdentifyAccount +}; +use sp_api::impl_runtime_apis; +use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use grandpa::AuthorityList as GrandpaAuthorityList; +use grandpa::fg_primitives; +use sp_version::RuntimeVersion; +#[cfg(feature = "std")] +use sp_version::NativeVersion; + +// A few exports that help ease life for downstream crates. +#[cfg(any(feature = "std", test))] +pub use sp_runtime::BuildStorage; +pub use timestamp::Call as TimestampCall; +pub use balances::Call as BalancesCall; +pub use sp_runtime::{Permill, Perbill}; +pub use frame_support::{ + StorageValue, construct_runtime, parameter_types, + traits::Randomness, + weights::Weight, +}; + +/// An index to a block. +pub type BlockNumber = u32; + +/// Alias to 512-bit hash when used in the context of a transaction signature on the chain. +pub type Signature = MultiSignature; + +/// Some way of identifying an account on the chain. We intentionally make it equivalent +/// to the public key of our transaction signing scheme. +pub type AccountId = <::Signer as IdentifyAccount>::AccountId; + +/// The type for looking up accounts. We don't expect more than 4 billion of them, but you +/// never know... +pub type AccountIndex = u32; + +/// Balance of an account. +pub type Balance = u128; + +/// Index of a transaction in the chain. +pub type Index = u32; + +/// A hash of some data used by the chain. +pub type Hash = sp_core::H256; + +/// Digest item type. +pub type DigestItem = generic::DigestItem; + +/// Opaque types. These are used by the CLI to instantiate machinery that don't need to know +/// the specifics of the runtime. They can then be made to be agnostic over specific formats +/// of data like extrinsics, allowing for them to continue syncing the network through upgrades +/// to even the core datastructures. +pub mod opaque { + use super::*; + + pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic; + + /// Opaque block header type. + pub type Header = generic::Header; + /// Opaque block type. + pub type Block = generic::Block; + /// Opaque block identifier type. + pub type BlockId = generic::BlockId; + + impl_opaque_keys! { + pub struct SessionKeys { + pub aura: Aura, + pub grandpa: Grandpa, + } + } +} + +/// This runtime version. +pub const VERSION: RuntimeVersion = RuntimeVersion { + spec_name: create_runtime_str!("node-template"), + impl_name: create_runtime_str!("node-template"), + authoring_version: 1, + spec_version: 1, + impl_version: 1, + apis: RUNTIME_API_VERSIONS, +}; + +pub const MILLISECS_PER_BLOCK: u64 = 6000; + +pub const SLOT_DURATION: u64 = MILLISECS_PER_BLOCK; + +// These time units are defined in number of blocks. +pub const MINUTES: BlockNumber = 60_000 / (MILLISECS_PER_BLOCK as BlockNumber); +pub const HOURS: BlockNumber = MINUTES * 60; +pub const DAYS: BlockNumber = HOURS * 24; + +/// The version infromation used to identify this runtime when compiled natively. +#[cfg(feature = "std")] +pub fn native_version() -> NativeVersion { + NativeVersion { + runtime_version: VERSION, + can_author_with: Default::default(), + } +} + +parameter_types! { + pub const BlockHashCount: BlockNumber = 250; + pub const MaximumBlockWeight: Weight = 1_000_000; + pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); + pub const MaximumBlockLength: u32 = 5 * 1024 * 1024; + pub const Version: RuntimeVersion = VERSION; +} + +impl system::Trait for Runtime { + /// The identifier used to distinguish between accounts. + type AccountId = AccountId; + /// The aggregated dispatch type that is available for extrinsics. + type Call = Call; + /// The lookup mechanism to get account ID from whatever is passed in dispatchers. + type Lookup = Indices; + /// The index type for storing how many extrinsics an account has signed. + type Index = Index; + /// The index type for blocks. + type BlockNumber = BlockNumber; + /// The type for hashing blocks and tries. + type Hash = Hash; + /// The hashing algorithm used. + type Hashing = BlakeTwo256; + /// The header type. + type Header = generic::Header; + /// The ubiquitous event type. + type Event = Event; + /// The ubiquitous origin type. + type Origin = Origin; + /// Maximum number of block number to block hash mappings to keep (oldest pruned first). + type BlockHashCount = BlockHashCount; + /// Maximum weight of each block. + type MaximumBlockWeight = MaximumBlockWeight; + /// Maximum size of all encoded transactions (in bytes) that are allowed in one block. + type MaximumBlockLength = MaximumBlockLength; + /// Portion of the block weight that is available to all normal transactions. + type AvailableBlockRatio = AvailableBlockRatio; + /// Version of the runtime. + type Version = Version; + /// Converts a module to the index of the module in `construct_runtime!`. + /// + /// This type is being generated by `construct_runtime!`. + type ModuleToIndex = ModuleToIndex; +} + +impl aura::Trait for Runtime { + type AuthorityId = AuraId; +} + +impl grandpa::Trait for Runtime { + type Event = Event; +} + +impl indices::Trait for Runtime { + /// The type for recording indexing into the account enumeration. If this ever overflows, there + /// will be problems! + type AccountIndex = AccountIndex; + /// Use the standard means of resolving an index hint from an id. + type ResolveHint = indices::SimpleResolveHint; + /// Determine whether an account is dead. + type IsDeadAccount = Balances; + /// The ubiquitous event type. + type Event = Event; +} + +parameter_types! { + pub const MinimumPeriod: u64 = SLOT_DURATION / 2; +} + +impl timestamp::Trait for Runtime { + /// A timestamp: milliseconds since the unix epoch. + type Moment = u64; + type OnTimestampSet = Aura; + type MinimumPeriod = MinimumPeriod; +} + +parameter_types! { + pub const ExistentialDeposit: u128 = 500; + pub const CreationFee: u128 = 0; +} + +impl balances::Trait for Runtime { + /// The type for recording an account's balance. + type Balance = Balance; + /// What to do if an account is fully reaped from the system. + type OnReapAccount = System; + /// What to do if a new account is created. + type OnNewAccount = Indices; + /// The ubiquitous event type. + type Event = Event; + type DustRemoval = (); + type TransferPayment = (); + type ExistentialDeposit = ExistentialDeposit; + type CreationFee = CreationFee; +} + +parameter_types! { + pub const TransactionBaseFee: Balance = 0; + pub const TransactionByteFee: Balance = 1; +} + +impl transaction_payment::Trait for Runtime { + type Currency = balances::Module; + type OnTransactionPayment = (); + type TransactionBaseFee = TransactionBaseFee; + type TransactionByteFee = TransactionByteFee; + type WeightToFee = ConvertInto; + type FeeMultiplierUpdate = (); +} + +impl sudo::Trait for Runtime { + type Event = Event; + type Proposal = Call; +} + +construct_runtime!( + pub enum Runtime where + Block = Block, + NodeBlock = opaque::Block, + UncheckedExtrinsic = UncheckedExtrinsic + { + System: system::{Module, Call, Storage, Config, Event}, + Timestamp: timestamp::{Module, Call, Storage, Inherent}, + Aura: aura::{Module, Config, Inherent(Timestamp)}, + Grandpa: grandpa::{Module, Call, Storage, Config, Event}, + Indices: indices, + Balances: balances, + TransactionPayment: transaction_payment::{Module, Storage}, + Sudo: sudo, + RandomnessCollectiveFlip: randomness_collective_flip::{Module, Call, Storage}, + } +); + +/// The address format for describing accounts. +pub type Address = ::Source; +/// Block header type as expected by this runtime. +pub type Header = generic::Header; +/// Block type as expected by this runtime. +pub type Block = generic::Block; +/// A Block signed with a Justification +pub type SignedBlock = generic::SignedBlock; +/// BlockId type as expected by this runtime. +pub type BlockId = generic::BlockId; +/// The SignedExtension to the basic transaction logic. +pub type SignedExtra = ( + system::CheckVersion, + system::CheckGenesis, + system::CheckEra, + system::CheckNonce, + system::CheckWeight, + transaction_payment::ChargeTransactionPayment +); +/// Unchecked extrinsic type as expected by this runtime. +pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; +/// Extrinsic type that has already been checked. +pub type CheckedExtrinsic = generic::CheckedExtrinsic; +/// Executive: handles dispatch to the various modules. +pub type Executive = frame_executive::Executive, Runtime, AllModules>; + +impl_runtime_apis! { + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block) + } + + fn initialize_block(header: &::Header) { + Executive::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + Runtime::metadata().into() + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents( + block: Block, + data: sp_inherents::InherentData, + ) -> sp_inherents::CheckInherentsResult { + data.check_extrinsics(&block) + } + + fn random_seed() -> ::Hash { + RandomnessCollectiveFlip::random_seed() + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction(tx: ::Extrinsic) -> TransactionValidity { + Executive::validate_transaction(tx) + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + Executive::offchain_worker(header) + } + } + + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> u64 { + Aura::slot_duration() + } + + fn authorities() -> Vec { + Aura::authorities() + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(seed: Option>) -> Vec { + opaque::SessionKeys::generate(seed) + } + + fn decode_session_keys( + encoded: Vec, + ) -> Option, sp_core::crypto::KeyTypeId)>> { + opaque::SessionKeys::decode_into_raw_public_keys(&encoded) + } + } + + impl fg_primitives::GrandpaApi for Runtime { + fn grandpa_authorities() -> GrandpaAuthorityList { + Grandpa::grandpa_authorities() + } + } +} diff --git a/bin/sassafras-template/scripts/init.sh b/bin/sassafras-template/scripts/init.sh new file mode 100755 index 0000000000000..1405a41ef333e --- /dev/null +++ b/bin/sassafras-template/scripts/init.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e + +echo "*** Initializing WASM build environment" + +if [ -z $CI_PROJECT_NAME ] ; then + rustup update nightly + rustup update stable +fi + +rustup target add wasm32-unknown-unknown --toolchain nightly From c25faa00fea23d29e91bac63553b499cce9b9bcc Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Sun, 16 Feb 2020 20:48:25 +0100 Subject: [PATCH 39/75] [WIP] Copy pallet-babe to pallet-sassafras --- Cargo.lock | 10 +- bin/sassafras-template/node/Cargo.toml | 6 +- bin/sassafras-template/node/src/chain_spec.rs | 12 +- bin/sassafras-template/node/src/command.rs | 1 - bin/sassafras-template/node/src/service.rs | 17 +- bin/sassafras-template/runtime/Cargo.toml | 9 +- bin/sassafras-template/runtime/src/lib.rs | 42 +- frame/sassafras/Cargo.toml | 40 ++ frame/sassafras/src/lib.rs | 553 ++++++++++++++++++ primitives/consensus/sassafras/src/digests.rs | 14 +- primitives/consensus/sassafras/src/lib.rs | 3 +- primitives/consensus/sassafras/src/vrf.rs | 21 +- 12 files changed, 653 insertions(+), 75 deletions(-) create mode 100644 frame/sassafras/Cargo.toml create mode 100644 frame/sassafras/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index 8a4fdb80a26cc..df05af51c20fb 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5469,16 +5469,14 @@ dependencies = [ "sc-basic-authorship", "sc-cli", "sc-client", - "sc-consensus-aura", + "sc-consensus-sassafras", "sc-executor", - "sc-finality-grandpa", "sc-network", "sc-service", "sc-transaction-pool", "sp-consensus", - "sp-consensus-aura", + "sp-consensus-sassafras", "sp-core", - "sp-finality-grandpa", "sp-inherents", "sp-runtime", "sp-transaction-pool", @@ -5494,9 +5492,7 @@ dependencies = [ "frame-executive", "frame-support", "frame-system", - "pallet-aura", "pallet-balances", - "pallet-grandpa", "pallet-indices", "pallet-randomness-collective-flip", "pallet-sudo", @@ -5506,7 +5502,7 @@ dependencies = [ "serde", "sp-api", "sp-block-builder", - "sp-consensus-aura", + "sp-consensus-sassafras", "sp-core", "sp-inherents", "sp-io", diff --git a/bin/sassafras-template/node/Cargo.toml b/bin/sassafras-template/node/Cargo.toml index 7732fe335e9d1..865923fbb027c 100644 --- a/bin/sassafras-template/node/Cargo.toml +++ b/bin/sassafras-template/node/Cargo.toml @@ -22,11 +22,9 @@ sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } sc-transaction-pool = { version = "2.0.0", path = "../../../client/transaction-pool" } sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } sc-network = { version = "0.8", path = "../../../client/network" } -sc-consensus-aura = { version = "0.8", path = "../../../client/consensus/aura" } -sp-consensus-aura = { version = "0.8", path = "../../../primitives/consensus/aura" } +sc-consensus-sassafras = { version = "0.8", path = "../../../client/consensus/sassafras" } +sp-consensus-sassafras = { version = "0.8", path = "../../../primitives/consensus/sassafras" } sp-consensus = { version = "0.8", path = "../../../primitives/consensus/common" } -grandpa = { version = "0.8", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } -grandpa-primitives = { version = "2.0.0", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } sc-client = { version = "0.8", path = "../../../client/" } sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } sc-basic-authorship = { path = "../../../client/basic-authorship" } diff --git a/bin/sassafras-template/node/src/chain_spec.rs b/bin/sassafras-template/node/src/chain_spec.rs index 4ee3b0805e12c..00df7d4fb99fe 100644 --- a/bin/sassafras-template/node/src/chain_spec.rs +++ b/bin/sassafras-template/node/src/chain_spec.rs @@ -1,10 +1,9 @@ use sp_core::{Pair, Public, sr25519}; use sassafras_template_runtime::{ - AccountId, AuraConfig, BalancesConfig, GenesisConfig, GrandpaConfig, + AccountId, BalancesConfig, GenesisConfig, SudoConfig, IndicesConfig, SystemConfig, WASM_BINARY, Signature }; -use sp_consensus_aura::sr25519::{AuthorityId as AuraId}; -use grandpa_primitives::{AuthorityId as GrandpaId}; +use sp_consensus_sassafras::AuthorityId as SassafrasId; use sc_service; use sp_runtime::traits::{Verify, IdentifyAccount}; @@ -42,11 +41,8 @@ pub fn get_account_id_from_seed(seed: &str) -> AccountId where } /// Helper function to generate an authority key for Aura -pub fn get_authority_keys_from_seed(s: &str) -> (AuraId, GrandpaId) { - ( - get_from_seed::(s), - get_from_seed::(s), - ) +pub fn get_authority_keys_from_seed(s: &str) -> SassafrasId { + get_from_seed::(s) } impl Alternative { diff --git a/bin/sassafras-template/node/src/command.rs b/bin/sassafras-template/node/src/command.rs index e7e386703deee..598b3345bca40 100644 --- a/bin/sassafras-template/node/src/command.rs +++ b/bin/sassafras-template/node/src/command.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use sp_consensus_aura::sr25519::{AuthorityPair as AuraPair}; use sc_cli::{VersionInfo, error}; use crate::service; use crate::chain_spec; diff --git a/bin/sassafras-template/node/src/service.rs b/bin/sassafras-template/node/src/service.rs index 0de7d0052e689..f0484d36a3267 100644 --- a/bin/sassafras-template/node/src/service.rs +++ b/bin/sassafras-template/node/src/service.rs @@ -9,8 +9,6 @@ use sp_inherents::InherentDataProviders; use sc_network::{construct_simple_protocol}; use sc_executor::native_executor_instance; pub use sc_executor::NativeExecutor; -use sp_consensus_aura::sr25519::{AuthorityPair as AuraPair}; -use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}; // Our native executor instance. native_executor_instance!( @@ -48,10 +46,17 @@ macro_rules! new_full_start { let select_chain = select_chain.take() .ok_or_else(|| sc_service::Error::SelectChainRequired)?; - let (grandpa_block_import, grandpa_link) = - grandpa::block_import::<_, _, _, sassafras_template_runtime::RuntimeApi, _>( - client.clone(), &*client, select_chain - )?; + let (block_import, sassafras_link) = sc_consensus_sassafras::block_import( + sc_consensus_sassafras::Config::get_or_compute(&*client)?, + client.clone(), + client.clone(), + client.clone(), + )?; + + let import_queue = sc_consensus_sassafras::import_queue( + sassafras_link.clone(), + block_import.clone(, + let aura_block_import = sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new( grandpa_block_import.clone(), client.clone(), diff --git a/bin/sassafras-template/runtime/Cargo.toml b/bin/sassafras-template/runtime/Cargo.toml index 50885c0e20908..7f620105f7d64 100644 --- a/bin/sassafras-template/runtime/Cargo.toml +++ b/bin/sassafras-template/runtime/Cargo.toml @@ -7,11 +7,8 @@ license = "Unlicense" [dependencies] codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } - -aura = { version = "2.0.0", default-features = false, package = "pallet-aura", path = "../../../frame/aura" } balances = { version = "2.0.0", default-features = false, package = "pallet-balances", path = "../../../frame/balances" } frame-support = { version = "2.0.0", default-features = false, path = "../../../frame/support" } -grandpa = { version = "2.0.0", default-features = false, package = "pallet-grandpa", path = "../../../frame/grandpa" } indices = { version = "2.0.0", default-features = false, package = "pallet-indices", path = "../../../frame/indices" } randomness-collective-flip = { version = "2.0.0", default-features = false, package = "pallet-randomness-collective-flip", path = "../../../frame/randomness-collective-flip" } sudo = { version = "2.0.0", default-features = false, package = "pallet-sudo", path = "../../../frame/sudo" } @@ -22,7 +19,7 @@ frame-executive = { version = "2.0.0", default-features = false, path = "../../. serde = { version = "1.0.101", optional = true, features = ["derive"] } sp-api = { version = "2.0.0", default-features = false, path = "../../../primitives/api" } sp-block-builder = { path = "../../../primitives/block-builder", default-features = false} -sp-consensus-aura = { version = "0.8", default-features = false, path = "../../../primitives/consensus/aura" } +sp-consensus-sassafras = { version = "0.8", default-features = false, path = "../../../primitives/consensus/sassafras" } sp-core = { version = "2.0.0", default-features = false, path = "../../../primitives/core" } sp-inherents = { path = "../../../primitives/inherents", default-features = false} sp-io = { version = "2.0.0", default-features = false, path = "../../../primitives/io" } @@ -39,18 +36,16 @@ wasm-builder-runner = { version = "1.0.4", package = "substrate-wasm-builder-run [features] default = ["std"] std = [ - "aura/std", "balances/std", "codec/std", "frame-executive/std", "frame-support/std", - "grandpa/std", "indices/std", "randomness-collective-flip/std", "serde", "sp-api/std", "sp-block-builder/std", - "sp-consensus-aura/std", + "sp-consensus-sassafras/std", "sp-core/std", "sp-inherents/std", "sp-io/std", diff --git a/bin/sassafras-template/runtime/src/lib.rs b/bin/sassafras-template/runtime/src/lib.rs index 6ff8f956719f9..f20c5b5810588 100644 --- a/bin/sassafras-template/runtime/src/lib.rs +++ b/bin/sassafras-template/runtime/src/lib.rs @@ -18,9 +18,6 @@ use sp_runtime::traits::{ BlakeTwo256, Block as BlockT, StaticLookup, Verify, ConvertInto, IdentifyAccount }; use sp_api::impl_runtime_apis; -use sp_consensus_aura::sr25519::AuthorityId as AuraId; -use grandpa::AuthorityList as GrandpaAuthorityList; -use grandpa::fg_primitives; use sp_version::RuntimeVersion; #[cfg(feature = "std")] use sp_version::NativeVersion; @@ -80,10 +77,7 @@ pub mod opaque { pub type BlockId = generic::BlockId; impl_opaque_keys! { - pub struct SessionKeys { - pub aura: Aura, - pub grandpa: Grandpa, - } + pub struct SessionKeys { } } } @@ -160,14 +154,6 @@ impl system::Trait for Runtime { type ModuleToIndex = ModuleToIndex; } -impl aura::Trait for Runtime { - type AuthorityId = AuraId; -} - -impl grandpa::Trait for Runtime { - type Event = Event; -} - impl indices::Trait for Runtime { /// The type for recording indexing into the account enumeration. If this ever overflows, there /// will be problems! @@ -187,7 +173,7 @@ parameter_types! { impl timestamp::Trait for Runtime { /// A timestamp: milliseconds since the unix epoch. type Moment = u64; - type OnTimestampSet = Aura; + type OnTimestampSet = (); type MinimumPeriod = MinimumPeriod; } @@ -238,8 +224,6 @@ construct_runtime!( { System: system::{Module, Call, Storage, Config, Event}, Timestamp: timestamp::{Module, Call, Storage, Inherent}, - Aura: aura::{Module, Config, Inherent(Timestamp)}, - Grandpa: grandpa::{Module, Call, Storage, Config, Event}, Indices: indices, Balances: balances, TransactionPayment: transaction_payment::{Module, Storage}, @@ -332,13 +316,15 @@ impl_runtime_apis! { } } - impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> u64 { - Aura::slot_duration() - } - - fn authorities() -> Vec { - Aura::authorities() + impl sp_consensus_sassafras::SassafrasApi for Runtime { + fn configuration() -> sp_consensus_sassafras::SassafrasConfiguration { + sp_consensus_sassafras::SassafrasConfiguration { + slot_duration: 2, + epoch_length: 4, + genesis_authorities: Vec::new(), + genesis_proofs: Vec::new(), + randomness: sp_consensus_sassafras::Randomness([0u8; 32]), + } } } @@ -353,10 +339,4 @@ impl_runtime_apis! { opaque::SessionKeys::decode_into_raw_public_keys(&encoded) } } - - impl fg_primitives::GrandpaApi for Runtime { - fn grandpa_authorities() -> GrandpaAuthorityList { - Grandpa::grandpa_authorities() - } - } } diff --git a/frame/sassafras/Cargo.toml b/frame/sassafras/Cargo.toml new file mode 100644 index 0000000000000..c7fc933d8d8d3 --- /dev/null +++ b/frame/sassafras/Cargo.toml @@ -0,0 +1,40 @@ +[package] +name = "pallet-sassafras" +version = "0.8.0" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0" + +[dependencies] +hex-literal = "0.2.1" +codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } +serde = { version = "1.0.101", optional = true } +sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } +sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-system = { version = "2.0.0", default-features = false, path = "../system" } +pallet-timestamp = { version = "2.0.0", default-features = false, path = "../timestamp" } +sp-timestamp = { version = "2.0.0", default-features = false, path = "../../primitives/timestamp" } +pallet-session = { version = "2.0.0", default-features = false, path = "../session" } +sp-consensus-sassafras = { version = "0.8", default-features = false, path = "../../primitives/consensus/sassafras" } +sp-io = { path = "../../primitives/io", default-features = false } + +[features] +default = ["std"] +std = [ + "serde", + "codec/std", + "sp-std/std", + "frame-support/std", + "sp-runtime/std", + "sp-staking/std", + "frame-system/std", + "pallet-timestamp/std", + "sp-timestamp/std", + "sp-inherents/std", + "sp-consensus-babe/std", + "pallet-session/std", + "sp-io/std", +] diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs new file mode 100644 index 0000000000000..eec0e93deff0e --- /dev/null +++ b/frame/sassafras/src/lib.rs @@ -0,0 +1,553 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Consensus extension module for Sassafras consensus. Collects on-chain randomness +//! from VRF outputs and manages epoch transitions. + +#![cfg_attr(not(feature = "std"), no_std)] +#![forbid(unused_must_use, unsafe_code, unused_variables, unused_must_use)] +#![deny(unused_imports)] +pub use pallet_timestamp; + +use sp_std::{result, prelude::*}; +use frame_support::{decl_storage, decl_module, traits::FindAuthor, traits::Get}; +use sp_timestamp::OnTimestampSet; +use sp_runtime::{generic::DigestItem, ConsensusEngineId, Perbill}; +use sp_runtime::traits::{IsMember, SaturatedConversion, Saturating, RandomnessBeacon}; +use sp_staking::{ + SessionIndex, + offence::{Offence, Kind}, +}; + +use codec::{Encode, Decode}; +use sp_inherents::{InherentIdentifier, InherentData, ProvideInherent, MakeFatalError}; +use sp_consensus_sassafras::{ + SASSAFRAS_ENGINE_ID, ConsensusLog, SassafrasAuthorityWeight, SlotNumber, + inherents::{INHERENT_IDENTIFIER, SassafrasInherentData}, + digests::{NextEpochDescriptor, RawPreDigest}, +}; +pub use sp_consensus_sassafras::{AuthorityId, VRF_OUTPUT_LENGTH, PUBLIC_KEY_LENGTH}; + +#[cfg(all(feature = "std", test))] +mod tests; + +#[cfg(all(feature = "std", test))] +mod mock; + +pub trait Trait: pallet_timestamp::Trait { + /// The amount of time, in slots, that each epoch should last. + type EpochDuration: Get; + + /// The expected average block time at which Sassafras should be creating + /// blocks. Since Sassafras is probabilistic it is not trivial to figure out + /// what the expected average block time should be based on the slot + /// duration and the security parameter `c` (where `1 - c` represents + /// the probability of a slot being empty). + type ExpectedBlockTime: Get; + + /// Sassafras requires some logic to be triggered on every block to query for whether an epoch + /// has ended and to perform the transition to the next epoch. + /// + /// Typically, the `ExternalTrigger` type should be used. An internal trigger should only be used + /// when no other module is responsible for changing authority set. + type EpochChangeTrigger: EpochChangeTrigger; +} + +/// Trigger an epoch change, if any should take place. +pub trait EpochChangeTrigger { + /// Trigger an epoch change, if any should take place. This should be called + /// during every block, after initialization is done. + fn trigger(now: T::BlockNumber); +} + +/// A type signifying to Sassafras that an external trigger +/// for epoch changes (e.g. pallet-session) is used. +pub struct ExternalTrigger; + +impl EpochChangeTrigger for ExternalTrigger { + fn trigger(_: T::BlockNumber) { } // nothing - trigger is external. +} + +/// A type signifying to Sassafras that it should perform epoch changes +/// with an internal trigger, recycling the same authorities forever. +pub struct SameAuthoritiesForever; + +impl EpochChangeTrigger for SameAuthoritiesForever { + fn trigger(now: T::BlockNumber) { + if >::should_epoch_change(now) { + let authorities = >::authorities(); + let next_authorities = authorities.clone(); + + >::enact_epoch_change(authorities, next_authorities); + } + } +} + +/// The length of the Sassafras randomness +pub const RANDOMNESS_LENGTH: usize = 32; + +const UNDER_CONSTRUCTION_SEGMENT_LENGTH: usize = 256; + +type MaybeVrf = Option<[u8; 32 /* VRF_OUTPUT_LENGTH */]>; + +decl_storage! { + trait Store for Module as Sassafras { + /// Current epoch index. + pub EpochIndex get(fn epoch_index): u64; + + /// Current epoch authorities. + pub Authorities get(fn authorities): Vec<(AuthorityId, SassafrasAuthorityWeight)>; + + /// The slot at which the first epoch actually started. This is 0 + /// until the first block of the chain. + pub GenesisSlot get(fn genesis_slot): u64; + + /// Current slot number. + pub CurrentSlot get(fn current_slot): u64; + + /// The epoch randomness for the *current* epoch. + /// + /// # Security + /// + /// This MUST NOT be used for gambling, as it can be influenced by a + /// malicious validator in the short term. It MAY be used in many + /// cryptographic protocols, however, so long as one remembers that this + /// (like everything else on-chain) it is public. For example, it can be + /// used where a number is needed that cannot have been chosen by an + /// adversary, for purposes such as public-coin zero-knowledge proofs. + // NOTE: the following fields don't use the constants to define the + // array size because the metadata API currently doesn't resolve the + // variable to its underlying value. + pub Randomness get(fn randomness): [u8; 32 /* RANDOMNESS_LENGTH */]; + + /// Next epoch randomness. + NextRandomness: [u8; 32 /* RANDOMNESS_LENGTH */]; + + /// Randomness under construction. + /// + /// We make a tradeoff between storage accesses and list length. + /// We store the under-construction randomness in segments of up to + /// `UNDER_CONSTRUCTION_SEGMENT_LENGTH`. + /// + /// Once a segment reaches this length, we begin the next one. + /// We reset all segments and return to `0` at the beginning of every + /// epoch. + SegmentIndex build(|_| 0): u32; + UnderConstruction: map hasher(blake2_256) u32 => Vec<[u8; 32 /* VRF_OUTPUT_LENGTH */]>; + + /// Temporary value (cleared at block finalization) which is `Some` + /// if per-block initialization has already been called for current block. + Initialized get(fn initialized): Option; + } + add_extra_genesis { + config(authorities): Vec<(AuthorityId, SassafrasAuthorityWeight)>; + build(|config| Module::::initialize_authorities(&config.authorities)) + } +} + +decl_module! { + /// The Sassafras SRML module + pub struct Module for enum Call where origin: T::Origin { + /// The number of **slots** that an epoch takes. We couple sessions to + /// epochs, i.e. we start a new session once the new epoch begins. + const EpochDuration: u64 = T::EpochDuration::get(); + + /// The expected average block time at which Sassafras should be creating + /// blocks. Since Sassafras is probabilistic it is not trivial to figure out + /// what the expected average block time should be based on the slot + /// duration and the security parameter `c` (where `1 - c` represents + /// the probability of a slot being empty). + const ExpectedBlockTime: T::Moment = T::ExpectedBlockTime::get(); + + /// Initialization + fn on_initialize(now: T::BlockNumber) { + Self::do_initialize(now); + } + + /// Block finalization + fn on_finalize() { + // at the end of the block, we can safely include the new VRF output + // from this block into the under-construction randomness. If we've determined + // that this block was the first in a new epoch, the changeover logic has + // already occurred at this point, so the under-construction randomness + // will only contain outputs from the right epoch. + if let Some(Some(vrf_output)) = Initialized::take() { + Self::deposit_vrf_output(&vrf_output); + } + } + } +} + +impl RandomnessBeacon for Module { + fn random() -> [u8; VRF_OUTPUT_LENGTH] { + Self::randomness() + } +} + +/// A Sassafras public key +pub type SassafrasKey = [u8; PUBLIC_KEY_LENGTH]; + +impl FindAuthor for Module { + fn find_author<'a, I>(digests: I) -> Option where + I: 'a + IntoIterator + { + for (id, mut data) in digests.into_iter() { + if id == SASSAFRAS_ENGINE_ID { + let pre_digest = RawPreDigest::decode(&mut data).ok()?; + return Some(match pre_digest { + RawPreDigest::Primary { authority_index, .. } => + authority_index, + RawPreDigest::Secondary { authority_index, .. } => + authority_index, + }); + } + } + + return None; + } +} + +impl IsMember for Module { + fn is_member(authority_id: &AuthorityId) -> bool { + >::authorities() + .iter() + .any(|id| &id.0 == authority_id) + } +} + +impl pallet_session::ShouldEndSession for Module { + fn should_end_session(now: T::BlockNumber) -> bool { + // it might be (and it is in current implementation) that session module is calling + // should_end_session() from it's own on_initialize() handler + // => because pallet_session on_initialize() is called earlier than ours, let's ensure + // that we have synced with digest before checking if session should be ended. + Self::do_initialize(now); + + Self::should_epoch_change(now) + } +} + +// TODO [slashing]: @marcio use this, remove the dead_code annotation. +/// A Sassafras equivocation offence report. +/// +/// When a validator released two or more blocks at the same slot. +#[allow(dead_code)] +struct SassafrasEquivocationOffence { + /// A Sassafras slot number in which this incident happened. + slot: u64, + /// The session index in which the incident happened. + session_index: SessionIndex, + /// The size of the validator set at the time of the offence. + validator_set_count: u32, + /// The authority that produced the equivocation. + offender: FullIdentification, +} + +impl Offence for SassafrasEquivocationOffence { + const ID: Kind = *b"sassafras:equivocatio"; + type TimeSlot = u64; + + fn offenders(&self) -> Vec { + vec![self.offender.clone()] + } + + fn session_index(&self) -> SessionIndex { + self.session_index + } + + fn validator_set_count(&self) -> u32 { + self.validator_set_count + } + + fn time_slot(&self) -> Self::TimeSlot { + self.slot + } + + fn slash_fraction( + offenders_count: u32, + validator_set_count: u32, + ) -> Perbill { + // the formula is min((3k / n)^2, 1) + let x = Perbill::from_rational_approximation(3 * offenders_count, validator_set_count); + // _ ^ 2 + x.square() + } +} + +impl Module { + /// Determine the Sassafras slot duration based on the Timestamp module configuration. + pub fn slot_duration() -> T::Moment { + // we double the minimum block-period so each author can always propose within + // the majority of their slot. + ::MinimumPeriod::get().saturating_mul(2.into()) + } + + /// Determine whether an epoch change should take place at this block. + /// Assumes that initialization has already taken place. + pub fn should_epoch_change(now: T::BlockNumber) -> bool { + // The epoch has technically ended during the passage of time + // between this block and the last, but we have to "end" the epoch now, + // since there is no earlier possible block we could have done it. + // + // The exception is for block 1: the genesis has slot 0, so we treat + // epoch 0 as having started at the slot of block 1. We want to use + // the same randomness and validator set as signalled in the genesis, + // so we don't rotate the epoch. + now != sp_runtime::traits::One::one() && { + let diff = CurrentSlot::get().saturating_sub(Self::current_epoch_start()); + diff >= T::EpochDuration::get() + } + } + + /// DANGEROUS: Enact an epoch change. Should be done on every block where `should_epoch_change` has returned `true`, + /// and the caller is the only caller of this function. + /// + /// Typically, this is not handled directly by the user, but by higher-level validator-set manager logic like + /// `pallet-session`. + pub fn enact_epoch_change( + authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, + next_authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, + ) { + // PRECONDITION: caller has done initialization and is guaranteed + // by the session module to be called before this. + #[cfg(debug_assertions)] + { + assert!(Self::initialized().is_some()) + } + + // Update epoch index + let epoch_index = EpochIndex::get() + .checked_add(1) + .expect("epoch indices will never reach 2^64 before the death of the universe; qed"); + + EpochIndex::put(epoch_index); + Authorities::put(authorities); + + // Update epoch randomness. + let next_epoch_index = epoch_index + .checked_add(1) + .expect("epoch indices will never reach 2^64 before the death of the universe; qed"); + + // Returns randomness for the current epoch and computes the *next* + // epoch randomness. + let randomness = Self::randomness_change_epoch(next_epoch_index); + Randomness::put(randomness); + + // After we update the current epoch, we signal the *next* epoch change + // so that nodes can track changes. + let next_randomness = NextRandomness::get(); + + let next = NextEpochDescriptor { + authorities: next_authorities, + randomness: next_randomness, + }; + + Self::deposit_consensus(ConsensusLog::NextEpochData(next)) + } + + // finds the start slot of the current epoch. only guaranteed to + // give correct results after `do_initialize` of the first block + // in the chain (as its result is based off of `GenesisSlot`). + fn current_epoch_start() -> SlotNumber { + (EpochIndex::get() * T::EpochDuration::get()) + GenesisSlot::get() + } + + fn deposit_consensus(new: U) { + let log: DigestItem = DigestItem::Consensus(SASSAFRAS_ENGINE_ID, new.encode()); + >::deposit_log(log.into()) + } + + fn deposit_vrf_output(vrf_output: &[u8; VRF_OUTPUT_LENGTH]) { + let segment_idx = ::get(); + let mut segment = ::get(&segment_idx); + if segment.len() < UNDER_CONSTRUCTION_SEGMENT_LENGTH { + // push onto current segment: not full. + segment.push(*vrf_output); + ::insert(&segment_idx, &segment); + } else { + // move onto the next segment and update the index. + let segment_idx = segment_idx + 1; + ::insert(&segment_idx, &vec![*vrf_output]); + ::put(&segment_idx); + } + } + + fn do_initialize(now: T::BlockNumber) { + // since do_initialize can be called twice (if session module is present) + // => let's ensure that we only modify the storage once per block + let initialized = Self::initialized().is_some(); + if initialized { + return; + } + + let maybe_pre_digest = >::digest() + .logs + .iter() + .filter_map(|s| s.as_pre_runtime()) + .filter_map(|(id, mut data)| if id == SASSAFRAS_ENGINE_ID { + RawPreDigest::decode(&mut data).ok() + } else { + None + }) + .next(); + + let maybe_vrf = maybe_pre_digest.and_then(|digest| { + // on the first non-zero block (i.e. block #1) + // this is where the first epoch (epoch #0) actually starts. + // we need to adjust internal storage accordingly. + if GenesisSlot::get() == 0 { + GenesisSlot::put(digest.slot_number()); + debug_assert_ne!(GenesisSlot::get(), 0); + + // deposit a log because this is the first block in epoch #0 + // we use the same values as genesis because we haven't collected any + // randomness yet. + let next = NextEpochDescriptor { + authorities: Self::authorities(), + randomness: Self::randomness(), + }; + + Self::deposit_consensus(ConsensusLog::NextEpochData(next)) + } + + CurrentSlot::put(digest.slot_number()); + + if let RawPreDigest::Primary { vrf_output, .. } = digest { + // place the VRF output into the `Initialized` storage item + // and it'll be put onto the under-construction randomness + // later, once we've decided which epoch this block is in. + Some(vrf_output) + } else { + None + } + }); + + Initialized::put(maybe_vrf); + + // enact epoch change, if necessary. + T::EpochChangeTrigger::trigger::(now) + } + + /// Call this function exactly once when an epoch changes, to update the + /// randomness. Returns the new randomness. + fn randomness_change_epoch(next_epoch_index: u64) -> [u8; RANDOMNESS_LENGTH] { + let this_randomness = NextRandomness::get(); + let segment_idx: u32 = ::mutate(|s| sp_std::mem::replace(s, 0)); + + // overestimate to the segment being full. + let rho_size = segment_idx.saturating_add(1) as usize * UNDER_CONSTRUCTION_SEGMENT_LENGTH; + + let next_randomness = compute_randomness( + this_randomness, + next_epoch_index, + (0..segment_idx).flat_map(|i| ::take(&i)), + Some(rho_size), + ); + NextRandomness::put(&next_randomness); + this_randomness + } + + fn initialize_authorities(authorities: &[(AuthorityId, SassafrasAuthorityWeight)]) { + if !authorities.is_empty() { + assert!(Authorities::get().is_empty(), "Authorities are already initialized!"); + Authorities::put(authorities); + } + } +} + +impl OnTimestampSet for Module { + fn on_timestamp_set(_moment: T::Moment) { } +} + +impl sp_runtime::BoundToRuntimeAppPublic for Module { + type Public = AuthorityId; +} + +impl pallet_session::OneSessionHandler for Module { + type Key = AuthorityId; + + fn on_genesis_session<'a, I: 'a>(validators: I) + where I: Iterator + { + let authorities = validators.map(|(_, k)| (k, 1)).collect::>(); + Self::initialize_authorities(&authorities); + } + + fn on_new_session<'a, I: 'a>(_changed: bool, validators: I, queued_validators: I) + where I: Iterator + { + let authorities = validators.map(|(_account, k)| { + (k, 1) + }).collect::>(); + + let next_authorities = queued_validators.map(|(_account, k)| { + (k, 1) + }).collect::>(); + + Self::enact_epoch_change(authorities, next_authorities) + } + + fn on_disabled(i: usize) { + Self::deposit_consensus(ConsensusLog::OnDisabled(i as u32)) + } +} + +// compute randomness for a new epoch. rho is the concatenation of all +// VRF outputs in the prior epoch. +// +// an optional size hint as to how many VRF outputs there were may be provided. +fn compute_randomness( + last_epoch_randomness: [u8; RANDOMNESS_LENGTH], + epoch_index: u64, + rho: impl Iterator, + rho_size_hint: Option, +) -> [u8; RANDOMNESS_LENGTH] { + let mut s = Vec::with_capacity(40 + rho_size_hint.unwrap_or(0) * VRF_OUTPUT_LENGTH); + s.extend_from_slice(&last_epoch_randomness); + s.extend_from_slice(&epoch_index.to_le_bytes()); + + for vrf_output in rho { + s.extend_from_slice(&vrf_output[..]); + } + + sp_io::hashing::blake2_256(&s) +} + +impl ProvideInherent for Module { + type Call = pallet_timestamp::Call; + type Error = MakeFatalError; + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(_: &InherentData) -> Option { + None + } + + fn check_inherent(call: &Self::Call, data: &InherentData) -> result::Result<(), Self::Error> { + let timestamp = match call { + pallet_timestamp::Call::set(ref timestamp) => timestamp.clone(), + _ => return Ok(()), + }; + + let timestamp_based_slot = (timestamp / Self::slot_duration()).saturated_into::(); + let seal_slot = data.sassafras_inherent_data()?; + + if timestamp_based_slot == seal_slot { + Ok(()) + } else { + Err(sp_inherents::Error::from("timestamp set in block doesn't match slot in seal").into()) + } + } +} diff --git a/primitives/consensus/sassafras/src/digests.rs b/primitives/consensus/sassafras/src/digests.rs index f10091ae2a2eb..f17e5a60b4645 100644 --- a/primitives/consensus/sassafras/src/digests.rs +++ b/primitives/consensus/sassafras/src/digests.rs @@ -14,15 +14,19 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use codec::{Encode, Decode, Codec}; +use codec::{Encode, Decode}; +use sp_std::vec::Vec; use sp_core::RuntimeDebug; -#[cfg(feature = "std")] -use sp_runtime::{DigestItem, generic::OpaqueDigestItemId}; use crate::{ - SASSAFRAS_ENGINE_ID, Randomness, VRFProof, VRFOutput, VRFIndex, + Randomness, VRFProof, VRFOutput, VRFIndex, AuthorityIndex, SlotNumber, AuthorityId, SassafrasAuthorityWeight, - AuthoritySignature, }; +#[cfg(feature = "std")] +use codec::Codec; +#[cfg(feature = "std")] +use sp_runtime::{DigestItem, generic::OpaqueDigestItemId}; +#[cfg(feature = "std")] +use crate::{SASSAFRAS_ENGINE_ID, AuthoritySignature}; /// A digest item which is usable with Sassafras consensus. #[cfg(feature = "std")] diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index bde59c0cfef3c..7b9dbfab0533e 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -29,6 +29,7 @@ pub use crate::vrf::{ RawVRFProof, VRFProof, Randomness, }; +use sp_std::vec::Vec; use sp_runtime::{ConsensusEngineId, RuntimeDebug}; use codec::{Encode, Decode}; @@ -103,7 +104,7 @@ pub struct SassafrasConfiguration { pub genesis_proofs: Vec, /// The randomness for the genesis epoch. - pub randomness: [u8; VRF_OUTPUT_LENGTH], + pub randomness: Randomness, } #[cfg(feature = "std")] diff --git a/primitives/consensus/sassafras/src/vrf.rs b/primitives/consensus/sassafras/src/vrf.rs index 9987ede1e9bde..cba1ab4fa69d6 100644 --- a/primitives/consensus/sassafras/src/vrf.rs +++ b/primitives/consensus/sassafras/src/vrf.rs @@ -1,11 +1,21 @@ -use core::convert::TryFrom; -use codec::{Encode, Decode, EncodeLike}; -use schnorrkel::{SignatureError, errors::MultiSignatureStage}; -use sp_std::ops::{Deref, DerefMut}; +use codec::{Encode, Decode}; use sp_runtime::RuntimeDebug; +#[cfg(feature = "std")] +use std::{ops::{Deref, DerefMut}, convert::TryFrom}; +#[cfg(feature = "std")] +use codec::EncodeLike; +#[cfg(feature = "std")] +use schnorrkel::{SignatureError, errors::MultiSignatureStage}; +#[cfg(feature = "std")] pub use schnorrkel::vrf::{VRF_PROOF_LENGTH, VRF_OUTPUT_LENGTH}; +#[cfg(not(feature = "std"))] +pub const VRF_PROOF_LENGTH: usize = 64; + +#[cfg(not(feature = "std"))] +pub const VRF_OUTPUT_LENGTH: usize = 32; + #[derive(Clone, Eq, PartialEq, RuntimeDebug, Encode, Decode)] pub struct RawVRFOutput(pub [u8; VRF_OUTPUT_LENGTH]); @@ -162,4 +172,5 @@ fn convert_error(e: SignatureError) -> codec::Error { } } -pub type Randomness = [u8; VRF_OUTPUT_LENGTH]; +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +pub struct Randomness(pub [u8; VRF_OUTPUT_LENGTH]); From e3837788b8588c29a8d1d21048cff87b4599245f Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Sun, 16 Feb 2020 21:34:12 +0100 Subject: [PATCH 40/75] Make the sassafras template compile --- Cargo.lock | 24 +++++++ Cargo.toml | 1 + bin/sassafras-template/node/Cargo.toml | 2 + bin/sassafras-template/node/src/chain_spec.rs | 15 +++-- bin/sassafras-template/node/src/service.rs | 64 ++++++++++--------- bin/sassafras-template/runtime/Cargo.toml | 5 ++ bin/sassafras-template/runtime/src/lib.rs | 41 ++++++++++-- frame/sassafras/Cargo.toml | 2 +- frame/sassafras/src/lib.rs | 22 ++++--- primitives/consensus/sassafras/src/lib.rs | 6 ++ primitives/consensus/sassafras/src/vrf.rs | 3 +- 11 files changed, 129 insertions(+), 56 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index df05af51c20fb..5361c338ae45e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4263,6 +4263,26 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-sassafras" +version = "0.8.0" +dependencies = [ + "frame-support", + "frame-system", + "hex-literal", + "pallet-session", + "pallet-timestamp", + "parity-scale-codec", + "serde", + "sp-consensus-sassafras", + "sp-inherents", + "sp-io", + "sp-runtime", + "sp-staking", + "sp-std", + "sp-timestamp", +] + [[package]] name = "pallet-scored-pool" version = "2.0.0" @@ -5471,12 +5491,14 @@ dependencies = [ "sc-client", "sc-consensus-sassafras", "sc-executor", + "sc-finality-grandpa", "sc-network", "sc-service", "sc-transaction-pool", "sp-consensus", "sp-consensus-sassafras", "sp-core", + "sp-finality-grandpa", "sp-inherents", "sp-runtime", "sp-transaction-pool", @@ -5493,8 +5515,10 @@ dependencies = [ "frame-support", "frame-system", "pallet-balances", + "pallet-grandpa", "pallet-indices", "pallet-randomness-collective-flip", + "pallet-sassafras", "pallet-sudo", "pallet-timestamp", "pallet-transaction-payment", diff --git a/Cargo.toml b/Cargo.toml index c903954aafb29..025de8a56e9b6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -63,6 +63,7 @@ members = [ "frame/authority-discovery", "frame/authorship", "frame/babe", + "frame/sassafras", "frame/balances", "frame/collective", "frame/contracts", diff --git a/bin/sassafras-template/node/Cargo.toml b/bin/sassafras-template/node/Cargo.toml index 865923fbb027c..d6028e754a2c4 100644 --- a/bin/sassafras-template/node/Cargo.toml +++ b/bin/sassafras-template/node/Cargo.toml @@ -28,6 +28,8 @@ sp-consensus = { version = "0.8", path = "../../../primitives/consensus/common" sc-client = { version = "0.8", path = "../../../client/" } sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } sc-basic-authorship = { path = "../../../client/basic-authorship" } +grandpa = { version = "0.8", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } +grandpa-primitives = { version = "2.0.0", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } sassafras-template-runtime = { version = "2.0.0", path = "../runtime" } diff --git a/bin/sassafras-template/node/src/chain_spec.rs b/bin/sassafras-template/node/src/chain_spec.rs index 00df7d4fb99fe..08bd6b8258895 100644 --- a/bin/sassafras-template/node/src/chain_spec.rs +++ b/bin/sassafras-template/node/src/chain_spec.rs @@ -1,8 +1,9 @@ use sp_core::{Pair, Public, sr25519}; use sassafras_template_runtime::{ - AccountId, BalancesConfig, GenesisConfig, + AccountId, BalancesConfig, GenesisConfig, GrandpaConfig, SassafrasConfig, SudoConfig, IndicesConfig, SystemConfig, WASM_BINARY, Signature }; +use grandpa_primitives::AuthorityId as GrandpaId; use sp_consensus_sassafras::AuthorityId as SassafrasId; use sc_service; use sp_runtime::traits::{Verify, IdentifyAccount}; @@ -40,9 +41,9 @@ pub fn get_account_id_from_seed(seed: &str) -> AccountId where AccountPublic::from(get_from_seed::(seed)).into_account() } -/// Helper function to generate an authority key for Aura -pub fn get_authority_keys_from_seed(s: &str) -> SassafrasId { - get_from_seed::(s) +/// Helper function to generate an authority key for Sassafras and Grandpa. +pub fn get_authority_keys_from_seed(s: &str) -> (SassafrasId, GrandpaId) { + (get_from_seed::(s), get_from_seed::(s)) } impl Alternative { @@ -114,7 +115,7 @@ impl Alternative { } } -fn testnet_genesis(initial_authorities: Vec<(AuraId, GrandpaId)>, +fn testnet_genesis(initial_authorities: Vec<(SassafrasId, GrandpaId)>, root_key: AccountId, endowed_accounts: Vec, _enable_println: bool) -> GenesisConfig { @@ -132,8 +133,8 @@ fn testnet_genesis(initial_authorities: Vec<(AuraId, GrandpaId)>, sudo: Some(SudoConfig { key: root_key, }), - aura: Some(AuraConfig { - authorities: initial_authorities.iter().map(|x| (x.0.clone())).collect(), + sassafras: Some(SassafrasConfig { + authorities: initial_authorities.iter().map(|x| (x.0.clone(), 1)).collect(), }), grandpa: Some(GrandpaConfig { authorities: initial_authorities.iter().map(|x| (x.1.clone(), 1)).collect(), diff --git a/bin/sassafras-template/node/src/service.rs b/bin/sassafras-template/node/src/service.rs index f0484d36a3267..740ca06f6fccf 100644 --- a/bin/sassafras-template/node/src/service.rs +++ b/bin/sassafras-template/node/src/service.rs @@ -2,6 +2,7 @@ use std::sync::Arc; use std::time::Duration; +use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}; use sc_client::LongestChain; use sassafras_template_runtime::{self, GenesisConfig, opaque::Block, RuntimeApi}; use sc_service::{error::{Error as ServiceError}, AbstractService, Configuration, ServiceBuilder}; @@ -42,38 +43,34 @@ macro_rules! new_full_start { let pool = sc_transaction_pool::BasicPool::new(config, std::sync::Arc::new(pool_api)); Ok(pool) })? - .with_import_queue(|_config, client, mut select_chain, transaction_pool| { + .with_import_queue(|_config, client, mut select_chain, _transaction_pool| { let select_chain = select_chain.take() .ok_or_else(|| sc_service::Error::SelectChainRequired)?; + let (grandpa_block_import, grandpa_link) = grandpa::block_import( + client.clone(), + &*client, + select_chain, + )?; + let justification_import = grandpa_block_import.clone(); let (block_import, sassafras_link) = sc_consensus_sassafras::block_import( sc_consensus_sassafras::Config::get_or_compute(&*client)?, - client.clone(), + grandpa_block_import, client.clone(), client.clone(), )?; let import_queue = sc_consensus_sassafras::import_queue( sassafras_link.clone(), - block_import.clone(, - - - let aura_block_import = sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new( - grandpa_block_import.clone(), client.clone(), - ); - - let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _>( - sc_consensus_aura::SlotDuration::get_or_compute(&*client)?, - aura_block_import, - Some(Box::new(grandpa_block_import.clone())), + block_import.clone(), + Some(Box::new(justification_import)), None, + client.clone(), client, inherent_data_providers.clone(), - Some(transaction_pool), )?; - import_setup = Some((grandpa_block_import, grandpa_link)); - + import_setup = Some((block_import, grandpa_link, sassafras_link)); Ok(import_queue) })?; @@ -97,7 +94,7 @@ pub fn new_full(config: Configuration) let (builder, mut import_setup, inherent_data_providers) = new_full_start!(config); - let (block_import, grandpa_link) = + let (block_import, grandpa_link, sassafras_link) = import_setup.take() .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); @@ -120,22 +117,21 @@ pub fn new_full(config: Configuration) let can_author_with = sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); - let aura = sc_consensus_aura::start_aura::<_, _, _, _, _, AuraPair, _, _, _>( - sc_consensus_aura::SlotDuration::get_or_compute(&*client)?, + let sassafras_config = sc_consensus_sassafras::SassafrasParams { + keystore: service.keystore(), client, select_chain, + env: proposer, block_import, - proposer, - service.network(), - inherent_data_providers.clone(), + sync_oracle: service.network(), + inherent_data_providers: inherent_data_providers.clone(), force_authoring, - service.keystore(), + sassafras_link, can_author_with, - )?; + }; - // the AURA authoring task is considered essential, i.e. if it - // fails we take down the service with it. - service.spawn_essential_task("aura", aura); + let sassafras = sc_consensus_sassafras::start_sassafras(sassafras_config)?; + service.spawn_essential_task("sassafras", sassafras); } // if the node isn't actively participating in consensus then it doesn't @@ -223,18 +219,26 @@ pub fn new_light(config: Configuration) let grandpa_block_import = grandpa::light_block_import::<_, _, _, RuntimeApi>( client.clone(), backend, &*client.clone(), Arc::new(fetch_checker), )?; + let finality_proof_import = grandpa_block_import.clone(); let finality_proof_request_builder = finality_proof_import.create_finality_proof_request_builder(); - let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, ()>( - sc_consensus_aura::SlotDuration::get_or_compute(&*client)?, + let (sassafras_block_import, sassafras_link) = sc_consensus_sassafras::block_import( + sc_consensus_sassafras::Config::get_or_compute(&*client)?, grandpa_block_import, + client.clone(), + client.clone(), + )?; + + let import_queue = sc_consensus_sassafras::import_queue( + sassafras_link, + sassafras_block_import, None, Some(Box::new(finality_proof_import)), + client.clone(), client, inherent_data_providers.clone(), - None, )?; Ok((import_queue, finality_proof_request_builder)) diff --git a/bin/sassafras-template/runtime/Cargo.toml b/bin/sassafras-template/runtime/Cargo.toml index 7f620105f7d64..70fdff03cee01 100644 --- a/bin/sassafras-template/runtime/Cargo.toml +++ b/bin/sassafras-template/runtime/Cargo.toml @@ -7,8 +7,11 @@ license = "Unlicense" [dependencies] codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } + +sassafras = { version = "0.8.0", default-features = false, package = "pallet-sassafras", path = "../../../frame/sassafras" } balances = { version = "2.0.0", default-features = false, package = "pallet-balances", path = "../../../frame/balances" } frame-support = { version = "2.0.0", default-features = false, path = "../../../frame/support" } +grandpa = { version = "2.0.0", default-features = false, package = "pallet-grandpa", path = "../../../frame/grandpa" } indices = { version = "2.0.0", default-features = false, package = "pallet-indices", path = "../../../frame/indices" } randomness-collective-flip = { version = "2.0.0", default-features = false, package = "pallet-randomness-collective-flip", path = "../../../frame/randomness-collective-flip" } sudo = { version = "2.0.0", default-features = false, package = "pallet-sudo", path = "../../../frame/sudo" } @@ -36,10 +39,12 @@ wasm-builder-runner = { version = "1.0.4", package = "substrate-wasm-builder-run [features] default = ["std"] std = [ + "sassafras/std", "balances/std", "codec/std", "frame-executive/std", "frame-support/std", + "grandpa/std", "indices/std", "randomness-collective-flip/std", "serde", diff --git a/bin/sassafras-template/runtime/src/lib.rs b/bin/sassafras-template/runtime/src/lib.rs index f20c5b5810588..37d8b611b84b2 100644 --- a/bin/sassafras-template/runtime/src/lib.rs +++ b/bin/sassafras-template/runtime/src/lib.rs @@ -18,6 +18,8 @@ use sp_runtime::traits::{ BlakeTwo256, Block as BlockT, StaticLookup, Verify, ConvertInto, IdentifyAccount }; use sp_api::impl_runtime_apis; +use grandpa::AuthorityList as GrandpaAuthorityList; +use grandpa::fg_primitives; use sp_version::RuntimeVersion; #[cfg(feature = "std")] use sp_version::NativeVersion; @@ -77,7 +79,10 @@ pub mod opaque { pub type BlockId = generic::BlockId; impl_opaque_keys! { - pub struct SessionKeys { } + pub struct SessionKeys { + pub grandpa: Grandpa, + pub sassafras: Sassafras, + } } } @@ -154,6 +159,21 @@ impl system::Trait for Runtime { type ModuleToIndex = ModuleToIndex; } +parameter_types! { + pub const EpochDuration: u64 = 4; + pub const ExpectedBlockTime: u64 = 2000; +} + +impl sassafras::Trait for Runtime { + type EpochDuration = EpochDuration; + type ExpectedBlockTime = ExpectedBlockTime; + type EpochChangeTrigger = sassafras::SameAuthoritiesForever; +} + +impl grandpa::Trait for Runtime { + type Event = Event; +} + impl indices::Trait for Runtime { /// The type for recording indexing into the account enumeration. If this ever overflows, there /// will be problems! @@ -173,7 +193,7 @@ parameter_types! { impl timestamp::Trait for Runtime { /// A timestamp: milliseconds since the unix epoch. type Moment = u64; - type OnTimestampSet = (); + type OnTimestampSet = Sassafras; type MinimumPeriod = MinimumPeriod; } @@ -224,6 +244,8 @@ construct_runtime!( { System: system::{Module, Call, Storage, Config, Event}, Timestamp: timestamp::{Module, Call, Storage, Inherent}, + Sassafras: sassafras::{Module, Call, Storage, Config, Inherent(Timestamp)}, + Grandpa: grandpa::{Module, Call, Storage, Config, Event}, Indices: indices, Balances: balances, TransactionPayment: transaction_payment::{Module, Storage}, @@ -319,11 +341,12 @@ impl_runtime_apis! { impl sp_consensus_sassafras::SassafrasApi for Runtime { fn configuration() -> sp_consensus_sassafras::SassafrasConfiguration { sp_consensus_sassafras::SassafrasConfiguration { - slot_duration: 2, - epoch_length: 4, - genesis_authorities: Vec::new(), + slot_duration: Sassafras::slot_duration(), + epoch_length: EpochDuration::get(), + genesis_authorities: Sassafras::authorities(), genesis_proofs: Vec::new(), - randomness: sp_consensus_sassafras::Randomness([0u8; 32]), + randomness: Sassafras::randomness(), + secondary_slot: true, } } } @@ -339,4 +362,10 @@ impl_runtime_apis! { opaque::SessionKeys::decode_into_raw_public_keys(&encoded) } } + + impl fg_primitives::GrandpaApi for Runtime { + fn grandpa_authorities() -> GrandpaAuthorityList { + Grandpa::grandpa_authorities() + } + } } diff --git a/frame/sassafras/Cargo.toml b/frame/sassafras/Cargo.toml index c7fc933d8d8d3..46c15530c0685 100644 --- a/frame/sassafras/Cargo.toml +++ b/frame/sassafras/Cargo.toml @@ -34,7 +34,7 @@ std = [ "pallet-timestamp/std", "sp-timestamp/std", "sp-inherents/std", - "sp-consensus-babe/std", + "sp-consensus-sassafras/std", "pallet-session/std", "sp-io/std", ] diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index eec0e93deff0e..8b5bb34651454 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -37,9 +37,11 @@ use sp_inherents::{InherentIdentifier, InherentData, ProvideInherent, MakeFatalE use sp_consensus_sassafras::{ SASSAFRAS_ENGINE_ID, ConsensusLog, SassafrasAuthorityWeight, SlotNumber, inherents::{INHERENT_IDENTIFIER, SassafrasInherentData}, - digests::{NextEpochDescriptor, RawPreDigest}, + digests::{NextEpochDescriptor, PreDigest}, +}; +pub use sp_consensus_sassafras::{ + AuthorityId, RawVRFOutput, VRFOutput, VRF_OUTPUT_LENGTH, PUBLIC_KEY_LENGTH }; -pub use sp_consensus_sassafras::{AuthorityId, VRF_OUTPUT_LENGTH, PUBLIC_KEY_LENGTH}; #[cfg(all(feature = "std", test))] mod tests; @@ -206,11 +208,11 @@ impl FindAuthor for Module { { for (id, mut data) in digests.into_iter() { if id == SASSAFRAS_ENGINE_ID { - let pre_digest = RawPreDigest::decode(&mut data).ok()?; + let pre_digest = PreDigest::decode(&mut data).ok()?; return Some(match pre_digest { - RawPreDigest::Primary { authority_index, .. } => + PreDigest::Primary { authority_index, .. } => authority_index, - RawPreDigest::Secondary { authority_index, .. } => + PreDigest::Secondary { authority_index, .. } => authority_index, }); } @@ -257,7 +259,7 @@ struct SassafrasEquivocationOffence { } impl Offence for SassafrasEquivocationOffence { - const ID: Kind = *b"sassafras:equivocatio"; + const ID: Kind = *b"sassafras:equivo"; type TimeSlot = u64; fn offenders(&self) -> Vec { @@ -398,7 +400,7 @@ impl Module { .iter() .filter_map(|s| s.as_pre_runtime()) .filter_map(|(id, mut data)| if id == SASSAFRAS_ENGINE_ID { - RawPreDigest::decode(&mut data).ok() + PreDigest::decode(&mut data).ok() } else { None }) @@ -425,17 +427,17 @@ impl Module { CurrentSlot::put(digest.slot_number()); - if let RawPreDigest::Primary { vrf_output, .. } = digest { + if let PreDigest::Primary { post_vrf_output, .. } = digest { // place the VRF output into the `Initialized` storage item // and it'll be put onto the under-construction randomness // later, once we've decided which epoch this block is in. - Some(vrf_output) + Some(RawVRFOutput::from(post_vrf_output)) } else { None } }); - Initialized::put(maybe_vrf); + Initialized::put(maybe_vrf.map(|v| RawVRFOutput::from(v).0)); // enact epoch change, if necessary. T::EpochChangeTrigger::trigger::(now) diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index 7b9dbfab0533e..e9a22c987b99b 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -44,6 +44,9 @@ pub const SASSAFRAS_TICKET_VRF_PREFIX: &[u8] = b"substrate-sassafras-ticket-vrf" /// The prefix used by Sassafras for its post-block VRF keys. pub const SASSAFRAS_POST_VRF_PREFIX: &[u8] = b"substrate-sassafras-post-vrf"; +/// The length of the public key +pub const PUBLIC_KEY_LENGTH: usize = 32; + /// A slot number. pub type SlotNumber = u64; @@ -105,6 +108,9 @@ pub struct SassafrasConfiguration { /// The randomness for the genesis epoch. pub randomness: Randomness, + + /// Whether secondary pre-digest is accepted. + pub secondary_slot: bool, } #[cfg(feature = "std")] diff --git a/primitives/consensus/sassafras/src/vrf.rs b/primitives/consensus/sassafras/src/vrf.rs index cba1ab4fa69d6..c489c9d527bd9 100644 --- a/primitives/consensus/sassafras/src/vrf.rs +++ b/primitives/consensus/sassafras/src/vrf.rs @@ -172,5 +172,4 @@ fn convert_error(e: SignatureError) -> codec::Error { } } -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] -pub struct Randomness(pub [u8; VRF_OUTPUT_LENGTH]); +pub type Randomness = [u8; VRF_OUTPUT_LENGTH]; From 283698b154061cfe343e95a52a5e0859b13943b8 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 17 Feb 2020 00:37:37 +0100 Subject: [PATCH 41/75] Use BABE secondary slot claim logic --- bin/sassafras-template/runtime/src/lib.rs | 2 +- client/consensus/sassafras/src/authorship.rs | 82 +++++++++++++++++++- primitives/consensus/sassafras/src/lib.rs | 2 +- 3 files changed, 82 insertions(+), 4 deletions(-) diff --git a/bin/sassafras-template/runtime/src/lib.rs b/bin/sassafras-template/runtime/src/lib.rs index 37d8b611b84b2..45f4436ceffd6 100644 --- a/bin/sassafras-template/runtime/src/lib.rs +++ b/bin/sassafras-template/runtime/src/lib.rs @@ -346,7 +346,7 @@ impl_runtime_apis! { genesis_authorities: Sassafras::authorities(), genesis_proofs: Vec::new(), randomness: Sassafras::randomness(), - secondary_slot: true, + secondary_slots: true, } } } diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 4090c1a71d21b..866b19aef8c72 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -16,10 +16,12 @@ //! Sassafras authority selection and slot claiming. +use codec::Encode; +use sp_core::{blake2_256, U256, crypto::Pair}; use sp_consensus_sassafras::{ - SlotNumber, AuthorityPair, SassafrasConfiguration + SlotNumber, AuthorityPair, SassafrasConfiguration, AuthorityId, + SassafrasAuthorityWeight, digests::PreDigest, }; -use sp_consensus_sassafras::digests::PreDigest; use sc_keystore::KeyStorePtr; use super::Epoch; @@ -33,5 +35,81 @@ pub(super) fn claim_slot( config: &SassafrasConfiguration, keystore: &KeyStorePtr, ) -> Option<(PreDigest, AuthorityPair)> { + None.or_else(|| { + if config.secondary_slots { + claim_secondary_slot( + slot_number, + &epoch.validating.authorities, + keystore, + epoch.validating.randomness, + ) + } else { + None + } + }) +} + +/// Claim a secondary slot if it is our turn to propose, returning the +/// pre-digest to use when authoring the block, or `None` if it is not our turn +/// to propose. +fn claim_secondary_slot( + slot_number: SlotNumber, + authorities: &[(AuthorityId, SassafrasAuthorityWeight)], + keystore: &KeyStorePtr, + randomness: [u8; 32], +) -> Option<(PreDigest, AuthorityPair)> { + if authorities.is_empty() { + return None; + } + + let expected_author = secondary_slot_author( + slot_number, + authorities, + randomness, + )?; + + let keystore = keystore.read(); + + for (pair, authority_index) in authorities.iter() + .enumerate() + .flat_map(|(i, a)| { + keystore.key_pair::(&a.0).ok().map(|kp| (kp, i)) + }) + { + if pair.public() == *expected_author { + let pre_digest = PreDigest::Secondary { + slot_number, + authority_index: authority_index as u32, + commitments: Vec::new(), + }; + + return Some((pre_digest, pair)); + } + } + None } + +/// Get the expected secondary author for the given slot and with given +/// authorities. This should always assign the slot to some authority unless the +/// authorities list is empty. +fn secondary_slot_author( + slot_number: u64, + authorities: &[(AuthorityId, SassafrasAuthorityWeight)], + randomness: [u8; 32], +) -> Option<&AuthorityId> { + if authorities.is_empty() { + return None; + } + + let rand = U256::from((randomness, slot_number).using_encoded(blake2_256)); + + let authorities_len = U256::from(authorities.len()); + let idx = rand % authorities_len; + + let expected_author = authorities.get(idx.as_u32() as usize) + .expect("authorities not empty; index constrained to list length; \ + this is a valid index; qed"); + + Some(&expected_author.0) +} diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index e9a22c987b99b..c651e3b53bc7a 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -110,7 +110,7 @@ pub struct SassafrasConfiguration { pub randomness: Randomness, /// Whether secondary pre-digest is accepted. - pub secondary_slot: bool, + pub secondary_slots: bool, } #[cfg(feature = "std")] From 9205459af191c04e090f5ffea03764248893c347 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 17 Feb 2020 01:05:16 +0100 Subject: [PATCH 42/75] Code to make ticket and post vrf transcript --- client/consensus/sassafras/src/authorship.rs | 41 +++++++++++++++++++- 1 file changed, 39 insertions(+), 2 deletions(-) diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 866b19aef8c72..16bbcca867363 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -17,10 +17,11 @@ //! Sassafras authority selection and slot claiming. use codec::Encode; +use merlin::Transcript; use sp_core::{blake2_256, U256, crypto::Pair}; use sp_consensus_sassafras::{ SlotNumber, AuthorityPair, SassafrasConfiguration, AuthorityId, - SassafrasAuthorityWeight, digests::PreDigest, + SassafrasAuthorityWeight, SASSAFRAS_ENGINE_ID, digests::PreDigest, }; use sc_keystore::KeyStorePtr; use super::Epoch; @@ -35,7 +36,7 @@ pub(super) fn claim_slot( config: &SassafrasConfiguration, keystore: &KeyStorePtr, ) -> Option<(PreDigest, AuthorityPair)> { - None.or_else(|| { + claim_primary_slot(slot_number, epoch, keystore).or_else(|| { if config.secondary_slots { claim_secondary_slot( slot_number, @@ -49,6 +50,14 @@ pub(super) fn claim_slot( }) } +fn claim_primary_slot( + slot_number: SlotNumber, + epoch: &Epoch, + keystore: &KeyStorePtr, +) -> Option<(PreDigest, AuthorityPair)> { + unimplemented!() +} + /// Claim a secondary slot if it is our turn to propose, returning the /// pre-digest to use when authoring the block, or `None` if it is not our turn /// to propose. @@ -113,3 +122,31 @@ fn secondary_slot_author( Some(&expected_author.0) } + +#[allow(deprecated)] +pub fn make_ticket_transcript( + randomness: &[u8], + slot_number: u64, + epoch: u64, +) -> Transcript { + let mut transcript = Transcript::new(&SASSAFRAS_ENGINE_ID); + transcript.commit_bytes(b"type", b"ticket"); + transcript.commit_bytes(b"slot number", &slot_number.to_le_bytes()); + transcript.commit_bytes(b"current epoch", &epoch.to_le_bytes()); + transcript.commit_bytes(b"chain randomness", randomness); + transcript +} + +#[allow(deprecated)] +pub fn make_post_transcript( + randomness: &[u8], + slot_number: u64, + epoch: u64, +) -> Transcript { + let mut transcript = Transcript::new(&SASSAFRAS_ENGINE_ID); + transcript.commit_bytes(b"type", b"post"); + transcript.commit_bytes(b"slot number", &slot_number.to_le_bytes()); + transcript.commit_bytes(b"current epoch", &epoch.to_le_bytes()); + transcript.commit_bytes(b"chain randomness", randomness); + transcript +} From 30c58331dcd30573d2a6c1786dcd0da7cf1f9012 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 17 Feb 2020 20:40:20 +0100 Subject: [PATCH 43/75] Working authoring logic --- client/consensus/babe/src/lib.rs | 1 + client/consensus/sassafras/src/authorship.rs | 131 +++++++++++- client/consensus/sassafras/src/lib.rs | 190 ++++++++++++++---- primitives/consensus/sassafras/src/digests.rs | 2 + primitives/core/src/lib.rs | 2 +- primitives/core/src/uint.rs | 2 +- 6 files changed, 280 insertions(+), 48 deletions(-) diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index af6adbf2f39cd..4951c6c880b11 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -69,6 +69,7 @@ use std::{ collections::HashMap, sync::Arc, u64, pin::Pin, time::{Instant, Duration}, any::Any, borrow::Cow }; +use primitive_types::U512; use sp_consensus_babe; use sp_consensus::{ImportResult, CanAuthorWith}; use sp_consensus::import_queue::{ diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 16bbcca867363..785743564172f 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -16,15 +16,54 @@ //! Sassafras authority selection and slot claiming. +use std::{hash::Hash, collections::HashMap}; use codec::Encode; use merlin::Transcript; +use schnorrkel::vrf; use sp_core::{blake2_256, U256, crypto::Pair}; +use sp_api::NumberFor; +use sp_runtime::traits::Block as BlockT; use sp_consensus_sassafras::{ SlotNumber, AuthorityPair, SassafrasConfiguration, AuthorityId, SassafrasAuthorityWeight, SASSAFRAS_ENGINE_ID, digests::PreDigest, + VRFProof, SASSAFRAS_TICKET_VRF_PREFIX, VRFOutput, }; +use sc_consensus_epochs::ViableEpochDescriptor; use sc_keystore::KeyStorePtr; -use super::Epoch; +use super::{Epoch, PublishingSet}; + +/// Calculates the primary selection threshold for a given authority, taking +/// into account `c` (`1 - c` represents the probability of a slot being empty). +pub fn calculate_primary_threshold( + c: (u64, u64), + authorities: &[(AuthorityId, SassafrasAuthorityWeight)], + authority_index: usize, +) -> u128 { + use num_bigint::BigUint; + use num_rational::BigRational; + use num_traits::{cast::ToPrimitive, identities::One}; + + let c = c.0 as f64 / c.1 as f64; + + let theta = + authorities[authority_index].1 as f64 / + authorities.iter().map(|(_, weight)| weight).sum::() as f64; + + let calc = || { + let p = BigRational::from_float(1f64 - (1f64 - c).powf(theta))?; + let numer = p.numer().to_biguint()?; + let denom = p.denom().to_biguint()?; + ((BigUint::one() << 128) * numer / denom).to_u128() + }; + + calc().unwrap_or(u128::max_value()) +} + +/// Returns true if the given VRF output is lower than the given threshold, +/// false otherwise. +pub fn check_primary_threshold(inout: &vrf::VRFInOut, threshold: u128) -> bool { + u128::from_le_bytes(inout.make_bytes::<[u8; 16]>(SASSAFRAS_TICKET_VRF_PREFIX)) < threshold +} /// Tries to claim the given slot number. This method starts by trying to claim /// a primary VRF based slot. If we are not able to claim it, then if we have @@ -50,12 +89,96 @@ pub(super) fn claim_slot( }) } +/// Claim a primary slot. fn claim_primary_slot( slot_number: SlotNumber, epoch: &Epoch, keystore: &KeyStorePtr, ) -> Option<(PreDigest, AuthorityPair)> { - unimplemented!() + const MAX_PRE_DIGEST_COMMITMENTS: usize = 4; + + let ticket_vrf_index = epoch.validating.proofs.iter().position(|(s, _)| *s == slot_number)? as u32; + let ticket_vrf_proof = epoch.validating.proofs[ticket_vrf_index as usize].clone().1; + let pending_index = epoch.validating.pending.iter() + .position(|(_, _, _, p)| *p == ticket_vrf_proof)?; + let (ticket_vrf_attempt, authority_index, ticket_vrf_output, _) = + epoch.validating.pending[pending_index].clone(); + + let keystore = keystore.read(); + let pair = keystore.key_pair::( + &epoch.validating.authorities[authority_index as usize].0 + ).ok()?; + let post_transcript = make_post_transcript( + &epoch.validating.randomness, + slot_number, + epoch.validating.epoch_index, + ); + let (post_vrf_inout, post_vrf_proof, _) = get_keypair(&pair).vrf_sign(post_transcript); + let post_vrf_output = VRFOutput(post_vrf_inout.to_output()); + let post_vrf_proof = VRFProof(post_vrf_proof); + + let mut commitments = Vec::new(); + for (_, _, _, proof) in &epoch.publishing.pending { + if commitments.len() < MAX_PRE_DIGEST_COMMITMENTS && + !epoch.publishing.proofs.iter().position(|p| p == proof).is_some() + { + commitments.push(proof.clone()); + } + } + + let claim = PreDigest::Primary { + ticket_vrf_index, ticket_vrf_attempt, ticket_vrf_output, + authority_index, slot_number, post_vrf_proof, post_vrf_output, + commitments, + }; + + Some((claim, pair)) +} + +fn get_keypair(q: &AuthorityPair) -> &schnorrkel::Keypair { + use sp_core::crypto::IsWrappedBy; + sp_core::sr25519::Pair::from_ref(q).as_ref() +} + +impl PublishingSet { + /// Get or generate pending proofs for current epoch, given keystore. + pub fn append_to_pending( + &mut self, + keystore: &KeyStorePtr + ) { + let mut pending = Vec::new(); + let keystore = keystore.read(); + + for (pair, authority_index) in self.authorities.iter() + .enumerate() + .flat_map(|(i, a)| { + keystore.key_pair::(&a.0).ok().map(|kp| (kp, i)) + }) + { + for attempt in 0..self.max_attempts() { + let transcript = make_ticket_transcript( + &self.randomness, + attempt, + self.epoch_index + ); + + let threshold = calculate_primary_threshold( + self.threshold(), + &self.authorities, + authority_index + ); + + if let Some((inout, proof, _)) = get_keypair(&pair) + .vrf_sign_after_check(transcript, |inout| { + check_primary_threshold(inout, threshold) + }) + { + pending.push((attempt, VRFOutput(inout.to_output()), VRFProof(proof))); + } + } + + } + } } /// Claim a secondary slot if it is our turn to propose, returning the @@ -126,12 +249,12 @@ fn secondary_slot_author( #[allow(deprecated)] pub fn make_ticket_transcript( randomness: &[u8], - slot_number: u64, + attempt: u64, epoch: u64, ) -> Transcript { let mut transcript = Transcript::new(&SASSAFRAS_ENGINE_ID); transcript.commit_bytes(b"type", b"ticket"); - transcript.commit_bytes(b"slot number", &slot_number.to_le_bytes()); + transcript.commit_bytes(b"attempt", &attempt.to_le_bytes()); transcript.commit_bytes(b"current epoch", &epoch.to_le_bytes()); transcript.commit_bytes(b"chain randomness", randomness); transcript diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index e9cf0176421cb..04052a2f1ce77 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -18,9 +18,9 @@ pub use sp_consensus_sassafras::{ SassafrasApi, ConsensusLog, SASSAFRAS_ENGINE_ID, SlotNumber, SassafrasConfiguration, - AuthorityId, AuthorityPair, AuthoritySignature, + AuthorityId, AuthorityPair, AuthoritySignature, VRFOutput, SassafrasAuthorityWeight, VRF_OUTPUT_LENGTH, VRFProof, Randomness, - digests::{PreDigest, CompatibleDigestItem, NextEpochDescriptor}, + digests::{PreDigest, CompatibleDigestItem, NextEpochDescriptor, PostBlockDescriptor}, }; pub use sp_consensus::SyncOracle; @@ -39,7 +39,7 @@ use sp_runtime::{ use sp_api::{ProvideRuntimeApi, NumberFor}; use sc_keystore::KeyStorePtr; use parking_lot::Mutex; -use sp_core::Pair; +use sp_core::{U512, Pair}; use sp_inherents::{InherentDataProviders, InherentData}; use sc_telemetry::{telemetry, CONSENSUS_TRACE, CONSENSUS_DEBUG}; use sp_consensus::{ @@ -80,31 +80,71 @@ mod authorship; #[cfg(test)] mod tests; -/// Validator set of a particular epoch, can be either publishing or validating. +/// Set that are publishing. #[derive(Debug, Clone, Encode, Decode)] -pub struct ValidatorSet { - /// Proofs of all VRFs collected. - pub proofs: Vec, +pub struct PublishingSet { + /// Epoch index of validating. + pub epoch_index: u64, /// The authorities and their weights. pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, /// Randomness for this epoch. pub randomness: Randomness, + /// Proofs of all VRFs collected. + pub proofs: Vec, + /// Local pending proofs collected. + pub pending: Vec<(u64, u32, VRFOutput, VRFProof)>, } -/// Epoch data for Sassafras +impl PublishingSet { + /// Maximum attempts for proof generation. + pub fn max_attempts(&self) -> u64 { + 64 + } + + /// Difficulty where the attempts are valid. + pub fn threshold(&self) -> (u64, u64) { + (1, 4) + } +} + +/// Set that are validating. #[derive(Debug, Clone, Encode, Decode)] -pub struct Epoch { +pub struct ValidatingSet { /// Start slot of the epoch. pub start_slot: SlotNumber, /// Duration of this epoch. pub duration: SlotNumber, - /// Epoch index. + /// Epoch index of validating. pub epoch_index: u64, + /// The authorities and their weights. + pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, + /// Randomness for this epoch. + pub randomness: Randomness, + /// Proofs as ordered by slot numbers. + pub proofs: Vec<(SlotNumber, VRFProof)>, + /// Pending local proofs. + pub pending: Vec<(u64, u32, VRFOutput, VRFProof)>, +} + +impl ValidatingSet { + /// Maximum attempts for proof generation. + pub fn max_attempts(&self) -> u64 { + 64 + } + /// Difficulty where the attempts are valid. + pub fn threshold(&self) -> (u64, u64) { + (1, 4) + } +} + +/// Epoch data for Sassafras +#[derive(Debug, Clone, Encode, Decode)] +pub struct Epoch { /// Publishing validator set. The set will start validating block in the next epoch. - pub publishing: ValidatorSet, + pub publishing: PublishingSet, /// Validating validator set. The set validates block in the current epoch. - pub validating: ValidatorSet, + pub validating: ValidatingSet, } impl EpochT for Epoch { @@ -113,25 +153,41 @@ impl EpochT for Epoch { fn increment(&self, descriptor: NextEpochDescriptor) -> Epoch { Epoch { - epoch_index: self.epoch_index + 1, - start_slot: self.start_slot + self.duration, - duration: self.duration, - - validating: self.publishing.clone(), - publishing: ValidatorSet { - proofs: Vec::new(), + validating: ValidatingSet { + start_slot: self.validating.start_slot + self.validating.duration, + duration: self.validating.duration, + epoch_index: self.publishing.epoch_index, + authorities: self.publishing.authorities.clone(), + randomness: self.publishing.randomness, + proofs: self.publishing.proofs.clone() + .into_iter() + .enumerate() + .map(|(i, p)| (i as u64, p)) + .collect(), + pending: self.publishing.pending.clone(), + }, + publishing: PublishingSet { + epoch_index: self.publishing.epoch_index + 1, authorities: descriptor.authorities, randomness: descriptor.randomness, + proofs: Vec::new(), + pending: Vec::new(), }, } } fn start_slot(&self) -> SlotNumber { - self.start_slot + self.validating.start_slot } fn end_slot(&self) -> SlotNumber { - self.start_slot + self.duration + self.validating.start_slot + self.validating.duration + } +} + +impl Epoch { + pub fn epoch_index(&self) -> u64 { + self.validating.epoch_index } } @@ -223,25 +279,32 @@ impl Config { /// Create the genesis epoch (epoch #0) pub fn genesis_epoch(&self, slot_number: SlotNumber) -> Epoch { - let proofs = self.genesis_proofs.clone() + let publishing_proofs = self.genesis_proofs.clone() .into_iter() .map(|p| p.try_into().expect("Genesis proofs are invalid")) .collect::>(); + let validating_proofs = self.genesis_proofs.clone() + .into_iter() + .enumerate() + .map(|(i, p)| (i as u64, p.try_into().expect("Genesis proofs are invalid"))) + .collect::>(); Epoch { - epoch_index: 0, - start_slot: slot_number, - duration: self.epoch_length, - - validating: ValidatorSet { - proofs: proofs.clone(), + validating: ValidatingSet { + start_slot: slot_number, + duration: self.epoch_length, + epoch_index: 0, + proofs: validating_proofs, authorities: self.genesis_authorities.clone(), randomness: self.randomness.clone(), + pending: Vec::new(), }, - publishing: ValidatorSet { - proofs, + publishing: PublishingSet { + epoch_index: 1, + proofs: publishing_proofs, authorities: self.genesis_authorities.clone(), randomness: self.randomness.clone(), + pending: Vec::new(), }, } } @@ -601,6 +664,24 @@ fn find_next_epoch_digest(header: &B::Header) Ok(epoch_digest) } +/// Extract the Sassafras epoch change digest from the given header, if it exists. +fn find_post_block_digest(header: &B::Header) + -> Result, Error> + where DigestItemFor: CompatibleDigestItem, +{ + let mut post_digest: Option<_> = None; + for log in header.digest().logs() { + trace!(target: "sassafras", "Checking log {:?}, looking for epoch change digest.", log); + let log = log.try_to::(OpaqueDigestItemId::Consensus(&SASSAFRAS_ENGINE_ID)); + match (log, post_digest.is_some()) { + (Some(ConsensusLog::PostBlockData(_)), true) => return Err(Error::MultipleEpochChangeDigests), + (Some(ConsensusLog::PostBlockData(epoch)), false) => post_digest = Some(epoch), + _ => trace!(target: "sassafras", "Ignoring digest not meant for us"), + } + } + + Ok(post_digest) +} #[derive(Default, Clone)] struct TimeSource(Arc, Vec<(Instant, u64)>)>>); @@ -1025,6 +1106,8 @@ impl BlockImport for SassafrasBlockImport(&block.header) .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + let post_block_digest = find_post_block_digest::(&block.header) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; match (first_in_epoch, next_epoch_digest.is_some()) { (true, true) => {}, @@ -1045,30 +1128,53 @@ impl BlockImport for SassafrasBlockImport::FetchEpoch(parent_hash).into()) + })?; + + match pre_digest { + PreDigest::Primary { commitments, .. } => { + let epoch = viable_epoch.as_mut(); + + for proof in commitments { + if epoch.publishing.proofs.iter().position(|p| *p == proof).is_none() { + epoch.publishing.proofs.push(proof); + } + } + }, + PreDigest::Secondary { .. } => (), + } - let viable_epoch = epoch_changes.viable_epoch( - &epoch_descriptor, - |slot| self.config.genesis_epoch(slot), - ).ok_or_else(|| { - ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) - })?; + if let Some(post_block_descriptor) = post_block_digest { + let epoch = viable_epoch.as_mut(); + for proof in post_block_descriptor.commitments { + if epoch.publishing.proofs.iter().position(|p| *p == proof).is_none() { + epoch.publishing.proofs.push(proof); + } + } + } + + let info = self.client.chain_info(); + + if let Some(next_epoch_descriptor) = next_epoch_digest { info!(target: "sassafras", "New epoch {} launching at block {} (block slot {} >= start slot {}).", - viable_epoch.as_ref().epoch_index, + viable_epoch.as_ref().epoch_index(), hash, slot_number, - viable_epoch.as_ref().start_slot); + viable_epoch.as_ref().start_slot()); let next_epoch = viable_epoch.increment(next_epoch_descriptor); info!(target: "sassafras", "Next epoch starts at slot {}", - next_epoch.as_ref().start_slot); + next_epoch.as_ref().start_slot()); // prune the tree of epochs not part of the finalized chain or // that are not live anymore, and then track the given epoch change diff --git a/primitives/consensus/sassafras/src/digests.rs b/primitives/consensus/sassafras/src/digests.rs index f17e5a60b4645..5211806b0c957 100644 --- a/primitives/consensus/sassafras/src/digests.rs +++ b/primitives/consensus/sassafras/src/digests.rs @@ -97,6 +97,8 @@ pub enum PreDigest { Primary { /// Index of ticket VRF proof that has been previously committed. ticket_vrf_index: VRFIndex, + /// Attempt number of the ticket VRF proof. + ticket_vrf_attempt: u64, /// Reveal of tocket VRF output. ticket_vrf_output: VRFOutput, /// Validator index. diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index 5bb9a3927f965..568c00662a429 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -74,7 +74,7 @@ pub mod testing; mod tests; pub use self::hash::{H160, H256, H512, convert_hash}; -pub use self::uint::U256; +pub use self::uint::{U256, U512}; pub use changes_trie::{ChangesTrieConfiguration, ChangesTrieConfigurationRange}; #[cfg(feature = "full_crypto")] pub use crypto::{DeriveJunction, Pair, Public}; diff --git a/primitives/core/src/uint.rs b/primitives/core/src/uint.rs index 54ed7ca317f00..e666137c08161 100644 --- a/primitives/core/src/uint.rs +++ b/primitives/core/src/uint.rs @@ -16,7 +16,7 @@ //! An unsigned fixed-size integer. -pub use primitive_types::U256; +pub use primitive_types::{U256, U512}; #[cfg(test)] mod tests { From 25b282db9183676b7ad4b1bb91ed758fef7a2eca Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 17 Feb 2020 20:51:21 +0100 Subject: [PATCH 44/75] Populate pending proofs when claiming slot --- client/consensus/sassafras/src/lib.rs | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 04052a2f1ce77..b33c39358426b 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -482,12 +482,19 @@ impl sc_consensus_slots::SimpleSlotWorker for Sassafra epoch_descriptor: &ViableEpochDescriptor, Epoch>, ) -> Option { debug!(target: "sassafras", "Attempting to claim slot {}", slot_number); + let mut epoch_changes = self.epoch_changes.lock(); + let mut viable_epoch = epoch_changes.viable_epoch_mut( + &epoch_descriptor, + |slot| self.config.genesis_epoch(slot) + )?; + + if viable_epoch.as_ref().publishing.pending.is_empty() { + viable_epoch.as_mut().publishing.append_to_pending(&self.keystore); + } + let s = authorship::claim_slot( slot_number, - self.epoch_changes.lock().viable_epoch( - &epoch_descriptor, - |slot| self.config.genesis_epoch(slot) - )?.as_ref(), + viable_epoch.as_ref(), &*self.config, &self.keystore, ); From 4ac78557390e578a01aa17ff1fb00d455bfc3114 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 17 Feb 2020 21:02:58 +0100 Subject: [PATCH 45/75] Push commitments of secondary claims --- client/consensus/sassafras/src/authorship.rs | 20 +++++++++++++++++++- client/consensus/sassafras/src/lib.rs | 10 +++++++++- 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 785743564172f..12b68c1a4a5f7 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -30,6 +30,7 @@ use sp_consensus_sassafras::{ }; use sc_consensus_epochs::ViableEpochDescriptor; use sc_keystore::KeyStorePtr; +use log::trace; use super::{Epoch, PublishingSet}; /// Calculates the primary selection threshold for a given authority, taking @@ -79,6 +80,7 @@ pub(super) fn claim_slot( if config.secondary_slots { claim_secondary_slot( slot_number, + epoch, &epoch.validating.authorities, keystore, epoch.validating.randomness, @@ -89,13 +91,16 @@ pub(super) fn claim_slot( }) } +const MAX_PRE_DIGEST_COMMITMENTS: usize = 4; + /// Claim a primary slot. fn claim_primary_slot( slot_number: SlotNumber, epoch: &Epoch, keystore: &KeyStorePtr, ) -> Option<(PreDigest, AuthorityPair)> { - const MAX_PRE_DIGEST_COMMITMENTS: usize = 4; + trace!(target: "sassafras", "Claiming a primary slot with slot number: {:?}", slot_number); + trace!(target: "sassafras", "Epoch data as of now: {:?}", epoch); let ticket_vrf_index = epoch.validating.proofs.iter().position(|(s, _)| *s == slot_number)? as u32; let ticket_vrf_proof = epoch.validating.proofs[ticket_vrf_index as usize].clone().1; @@ -186,6 +191,7 @@ impl PublishingSet { /// to propose. fn claim_secondary_slot( slot_number: SlotNumber, + epoch: &Epoch, authorities: &[(AuthorityId, SassafrasAuthorityWeight)], keystore: &KeyStorePtr, randomness: [u8; 32], @@ -194,6 +200,9 @@ fn claim_secondary_slot( return None; } + trace!(target: "sassafras", "Claiming a secondary slot with slot number: {:?}", slot_number); + trace!(target: "sassafras", "Epoch data as of now: {:?}", epoch); + let expected_author = secondary_slot_author( slot_number, authorities, @@ -209,6 +218,15 @@ fn claim_secondary_slot( }) { if pair.public() == *expected_author { + let mut commitments = Vec::new(); + for (_, _, _, proof) in &epoch.publishing.pending { + if commitments.len() < MAX_PRE_DIGEST_COMMITMENTS && + !epoch.publishing.proofs.iter().position(|p| p == proof).is_some() + { + commitments.push(proof.clone()); + } + } + let pre_digest = PreDigest::Secondary { slot_number, authority_index: authority_index as u32, diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index b33c39358426b..2fc4a982ad18b 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -1154,7 +1154,15 @@ impl BlockImport for SassafrasBlockImport (), + PreDigest::Secondary { commitments, .. } => { + let epoch = viable_epoch.as_mut(); + + for proof in commitments { + if epoch.publishing.proofs.iter().position(|p| *p == proof).is_none() { + epoch.publishing.proofs.push(proof); + } + } + }, } if let Some(post_block_descriptor) = post_block_digest { From 5d631cc5ac9da71672f633a3e04a02fe81d18c10 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 17 Feb 2020 21:39:26 +0100 Subject: [PATCH 46/75] Fix primary commitment importing --- client/consensus/sassafras/src/authorship.rs | 22 +++++++++++++------- client/consensus/sassafras/src/lib.rs | 11 +++++++--- 2 files changed, 22 insertions(+), 11 deletions(-) diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 12b68c1a4a5f7..1fe6eae9ae3bf 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -99,9 +99,6 @@ fn claim_primary_slot( epoch: &Epoch, keystore: &KeyStorePtr, ) -> Option<(PreDigest, AuthorityPair)> { - trace!(target: "sassafras", "Claiming a primary slot with slot number: {:?}", slot_number); - trace!(target: "sassafras", "Epoch data as of now: {:?}", epoch); - let ticket_vrf_index = epoch.validating.proofs.iter().position(|(s, _)| *s == slot_number)? as u32; let ticket_vrf_proof = epoch.validating.proofs[ticket_vrf_index as usize].clone().1; let pending_index = epoch.validating.pending.iter() @@ -125,11 +122,12 @@ fn claim_primary_slot( let mut commitments = Vec::new(); for (_, _, _, proof) in &epoch.publishing.pending { if commitments.len() < MAX_PRE_DIGEST_COMMITMENTS && - !epoch.publishing.proofs.iter().position(|p| p == proof).is_some() + epoch.publishing.proofs.iter().position(|p| p == proof).is_none() { commitments.push(proof.clone()); } } + trace!(target: "sassafras", "Appending commitment length: {}", commitments.len()); let claim = PreDigest::Primary { ticket_vrf_index, ticket_vrf_attempt, ticket_vrf_output, @@ -137,6 +135,9 @@ fn claim_primary_slot( commitments, }; + trace!(target: "sassafras", "Claimed a primary slot with slot number: {:?}", slot_number); + trace!(target: "sassafras", "Epoch data as of now: {:?}", epoch); + Some((claim, pair)) } @@ -151,7 +152,6 @@ impl PublishingSet { &mut self, keystore: &KeyStorePtr ) { - let mut pending = Vec::new(); let keystore = keystore.read(); for (pair, authority_index) in self.authorities.iter() @@ -178,7 +178,12 @@ impl PublishingSet { check_primary_threshold(inout, threshold) }) { - pending.push((attempt, VRFOutput(inout.to_output()), VRFProof(proof))); + self.pending.push(( + attempt, + authority_index as u32, + VRFOutput(inout.to_output()), + VRFProof(proof) + )); } } @@ -221,16 +226,17 @@ fn claim_secondary_slot( let mut commitments = Vec::new(); for (_, _, _, proof) in &epoch.publishing.pending { if commitments.len() < MAX_PRE_DIGEST_COMMITMENTS && - !epoch.publishing.proofs.iter().position(|p| p == proof).is_some() + epoch.publishing.proofs.iter().position(|p| p == proof).is_none() { commitments.push(proof.clone()); } } + trace!(target: "sassafras", "Appending commitment length: {}", commitments.len()); let pre_digest = PreDigest::Secondary { slot_number, authority_index: authority_index as u32, - commitments: Vec::new(), + commitments, }; return Some((pre_digest, pair)); diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 2fc4a982ad18b..cf2f88f09bcc2 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -152,9 +152,11 @@ impl EpochT for Epoch { type NextEpochDescriptor = NextEpochDescriptor; fn increment(&self, descriptor: NextEpochDescriptor) -> Epoch { + let start_slot = self.validating.start_slot + self.validating.duration; + Epoch { validating: ValidatingSet { - start_slot: self.validating.start_slot + self.validating.duration, + start_slot, duration: self.validating.duration, epoch_index: self.publishing.epoch_index, authorities: self.publishing.authorities.clone(), @@ -162,7 +164,7 @@ impl EpochT for Epoch { proofs: self.publishing.proofs.clone() .into_iter() .enumerate() - .map(|(i, p)| (i as u64, p)) + .map(|(i, p)| (start_slot + i as u64, p)) .collect(), pending: self.publishing.pending.clone(), }, @@ -286,7 +288,7 @@ impl Config { let validating_proofs = self.genesis_proofs.clone() .into_iter() .enumerate() - .map(|(i, p)| (i as u64, p.try_into().expect("Genesis proofs are invalid"))) + .map(|(i, p)| (slot_number + i as u64, p.try_into().expect("Genesis proofs are invalid"))) .collect::>(); Epoch { @@ -489,6 +491,7 @@ impl sc_consensus_slots::SimpleSlotWorker for Sassafra )?; if viable_epoch.as_ref().publishing.pending.is_empty() { + trace!(target: "sassafras", "Pending proof set is empty, generating a new one."); viable_epoch.as_mut().publishing.append_to_pending(&self.keystore); } @@ -1148,6 +1151,7 @@ impl BlockImport for SassafrasBlockImport { let epoch = viable_epoch.as_mut(); + trace!(target: "sassafras", "Importing commitments of length: {}", commitments.len()); for proof in commitments { if epoch.publishing.proofs.iter().position(|p| *p == proof).is_none() { epoch.publishing.proofs.push(proof); @@ -1157,6 +1161,7 @@ impl BlockImport for SassafrasBlockImport { let epoch = viable_epoch.as_mut(); + trace!(target: "sassafras", "Importing commitments of length: {}", commitments.len()); for proof in commitments { if epoch.publishing.proofs.iter().position(|p| *p == proof).is_none() { epoch.publishing.proofs.push(proof); From 079b297811ed887347297af5999d5ae520f4933a Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Tue, 18 Feb 2020 16:30:13 +0100 Subject: [PATCH 47/75] Add sortition process for validating phrase --- client/consensus/sassafras/src/lib.rs | 9 ++++++--- primitives/consensus/sassafras/src/vrf.rs | 15 +++++++++++++++ 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index cf2f88f09bcc2..a0fc3881df51b 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -77,8 +77,7 @@ use sp_api::ApiExt; mod aux_schema; mod verification; mod authorship; -#[cfg(test)] -mod tests; +mod utils; /// Set that are publishing. #[derive(Debug, Clone, Encode, Decode)] @@ -153,6 +152,10 @@ impl EpochT for Epoch { fn increment(&self, descriptor: NextEpochDescriptor) -> Epoch { let start_slot = self.validating.start_slot + self.validating.duration; + let sortition_proofs = utils::sortition( + &self.publishing.proofs, + self.validating.duration as usize + ); Epoch { validating: ValidatingSet { @@ -161,7 +164,7 @@ impl EpochT for Epoch { epoch_index: self.publishing.epoch_index, authorities: self.publishing.authorities.clone(), randomness: self.publishing.randomness, - proofs: self.publishing.proofs.clone() + proofs: sortition_proofs .into_iter() .enumerate() .map(|(i, p)| (start_slot + i as u64, p)) diff --git a/primitives/consensus/sassafras/src/vrf.rs b/primitives/consensus/sassafras/src/vrf.rs index c489c9d527bd9..063ceed01881d 100644 --- a/primitives/consensus/sassafras/src/vrf.rs +++ b/primitives/consensus/sassafras/src/vrf.rs @@ -1,4 +1,5 @@ use codec::{Encode, Decode}; +use sp_core::U512; use sp_runtime::RuntimeDebug; #[cfg(feature = "std")] use std::{ops::{Deref, DerefMut}, convert::TryFrom}; @@ -93,6 +94,20 @@ impl core::cmp::Eq for RawVRFProof { } #[derive(Clone, Debug, PartialEq, Eq)] pub struct VRFProof(pub schnorrkel::vrf::VRFProof); +#[cfg(feature = "std")] +impl PartialOrd for VRFProof { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +#[cfg(feature = "std")] +impl Ord for VRFProof { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + U512::from(self.0.to_bytes()).cmp(&U512::from(other.0.to_bytes())) + } +} + #[cfg(not(feature = "std"))] pub type VRFProof = RawVRFProof; From 71d8ebbe658385a8a449d459f90bcc77e3780d4c Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Tue, 18 Feb 2020 16:35:39 +0100 Subject: [PATCH 48/75] Remove support for genesis proof We always start with secondary claims, when validators start to generate proofs, and then switch back to primary claims with proofs. --- primitives/consensus/sassafras/src/lib.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index c651e3b53bc7a..9cb31315314c4 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -103,9 +103,6 @@ pub struct SassafrasConfiguration { /// The authorities for the genesis epoch. pub genesis_authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, - /// The proofs for genesis epoch. - pub genesis_proofs: Vec, - /// The randomness for the genesis epoch. pub randomness: Randomness, From 3ea4942c1597ad8db45f76ddeb8328682e803183 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Tue, 18 Feb 2020 16:44:50 +0100 Subject: [PATCH 49/75] Add support for a generating phrase So it's a 3 phrase process -- generating, publishing, validating --- bin/sassafras-template/runtime/src/lib.rs | 1 - client/consensus/sassafras/src/authorship.rs | 4 +- client/consensus/sassafras/src/lib.rs | 79 +++++++++++++------- client/consensus/sassafras/src/utils.rs | 34 +++++++++ 4 files changed, 90 insertions(+), 28 deletions(-) create mode 100644 client/consensus/sassafras/src/utils.rs diff --git a/bin/sassafras-template/runtime/src/lib.rs b/bin/sassafras-template/runtime/src/lib.rs index 45f4436ceffd6..97990c655830e 100644 --- a/bin/sassafras-template/runtime/src/lib.rs +++ b/bin/sassafras-template/runtime/src/lib.rs @@ -344,7 +344,6 @@ impl_runtime_apis! { slot_duration: Sassafras::slot_duration(), epoch_length: EpochDuration::get(), genesis_authorities: Sassafras::authorities(), - genesis_proofs: Vec::new(), randomness: Sassafras::randomness(), secondary_slots: true, } diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 1fe6eae9ae3bf..db0b3633eb360 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -31,7 +31,7 @@ use sp_consensus_sassafras::{ use sc_consensus_epochs::ViableEpochDescriptor; use sc_keystore::KeyStorePtr; use log::trace; -use super::{Epoch, PublishingSet}; +use super::{Epoch, GeneratingSet}; /// Calculates the primary selection threshold for a given authority, taking /// into account `c` (`1 - c` represents the probability of a slot being empty). @@ -146,7 +146,7 @@ fn get_keypair(q: &AuthorityPair) -> &schnorrkel::Keypair { sp_core::sr25519::Pair::from_ref(q).as_ref() } -impl PublishingSet { +impl GeneratingSet { /// Get or generate pending proofs for current epoch, given keystore. pub fn append_to_pending( &mut self, diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index a0fc3881df51b..1957cc950fc5b 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -79,6 +79,31 @@ mod verification; mod authorship; mod utils; +/// Set that are generating. +#[derive(Debug, Clone, Encode, Decode)] +pub struct GeneratingSet { + /// Epoch index of validating. + pub epoch_index: u64, + /// The authorities and their weights. + pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, + /// Randomness for this epoch. + pub randomness: Randomness, + /// Local pending proofs collected. + pub pending: Vec<(u64, u32, VRFOutput, VRFProof)>, +} + +impl GeneratingSet { + /// Maximum attempts for proof generation. + pub fn max_attempts(&self) -> u64 { + 64 + } + + /// Difficulty where the attempts are valid. + pub fn threshold(&self) -> (u64, u64) { + (1, 4) + } +} + /// Set that are publishing. #[derive(Debug, Clone, Encode, Decode)] pub struct PublishingSet { @@ -140,6 +165,8 @@ impl ValidatingSet { /// Epoch data for Sassafras #[derive(Debug, Clone, Encode, Decode)] pub struct Epoch { + /// Generating validator set. The set will start publishing in the next epoch. + pub generating: GeneratingSet, /// Publishing validator set. The set will start validating block in the next epoch. pub publishing: PublishingSet, /// Validating validator set. The set validates block in the current epoch. @@ -158,6 +185,19 @@ impl EpochT for Epoch { ); Epoch { + generating: GeneratingSet { + epoch_index: self.generating.epoch_index + 1, + authorities: descriptor.authorities, + randomness: descriptor.randomness, + pending: Vec::new(), + }, + publishing: PublishingSet { + epoch_index: self.generating.epoch_index, + authorities: self.generating.authorities.clone(), + randomness: self.generating.randomness, + proofs: Vec::new(), + pending: self.generating.pending.clone(), + }, validating: ValidatingSet { start_slot, duration: self.validating.duration, @@ -171,13 +211,6 @@ impl EpochT for Epoch { .collect(), pending: self.publishing.pending.clone(), }, - publishing: PublishingSet { - epoch_index: self.publishing.epoch_index + 1, - authorities: descriptor.authorities, - randomness: descriptor.randomness, - proofs: Vec::new(), - pending: Vec::new(), - }, } } @@ -284,29 +317,25 @@ impl Config { /// Create the genesis epoch (epoch #0) pub fn genesis_epoch(&self, slot_number: SlotNumber) -> Epoch { - let publishing_proofs = self.genesis_proofs.clone() - .into_iter() - .map(|p| p.try_into().expect("Genesis proofs are invalid")) - .collect::>(); - let validating_proofs = self.genesis_proofs.clone() - .into_iter() - .enumerate() - .map(|(i, p)| (slot_number + i as u64, p.try_into().expect("Genesis proofs are invalid"))) - .collect::>(); - Epoch { - validating: ValidatingSet { - start_slot: slot_number, - duration: self.epoch_length, - epoch_index: 0, - proofs: validating_proofs, + generating: GeneratingSet { + epoch_index: 2, authorities: self.genesis_authorities.clone(), randomness: self.randomness.clone(), pending: Vec::new(), }, publishing: PublishingSet { epoch_index: 1, - proofs: publishing_proofs, + proofs: Vec::new(), + authorities: self.genesis_authorities.clone(), + randomness: self.randomness.clone(), + pending: Vec::new(), + }, + validating: ValidatingSet { + start_slot: slot_number, + duration: self.epoch_length, + epoch_index: 0, + proofs: Vec::new(), authorities: self.genesis_authorities.clone(), randomness: self.randomness.clone(), pending: Vec::new(), @@ -493,9 +522,9 @@ impl sc_consensus_slots::SimpleSlotWorker for Sassafra |slot| self.config.genesis_epoch(slot) )?; - if viable_epoch.as_ref().publishing.pending.is_empty() { + if viable_epoch.as_ref().generating.pending.is_empty() { trace!(target: "sassafras", "Pending proof set is empty, generating a new one."); - viable_epoch.as_mut().publishing.append_to_pending(&self.keystore); + viable_epoch.as_mut().generating.append_to_pending(&self.keystore); } let s = authorship::claim_slot( diff --git a/client/consensus/sassafras/src/utils.rs b/client/consensus/sassafras/src/utils.rs new file mode 100644 index 0000000000000..023a623dda79a --- /dev/null +++ b/client/consensus/sassafras/src/utils.rs @@ -0,0 +1,34 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +/// Sort Sassafras proof with the given limit, and into inside-out order. +/// The smallest items are on both side of the array, with the largest in +/// the middle. +pub fn sortition(input: &[T], limit: usize) -> Vec { + let mut ret = input.to_vec(); + ret.sort(); + + while ret.len() > limit { + ret.pop(); + } + + if !ret.is_empty() { + let half = ret.len() / 2; + ret[half..].sort_by(|a, b| a.cmp(b).reverse()); + } + + ret +} From 2e1bfec34bacbccec5853af1a8b165057c3a2cf0 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Tue, 18 Feb 2020 20:53:13 +0100 Subject: [PATCH 50/75] sassafras, verification: secondary claim --- client/consensus/babe/src/lib.rs | 1 - client/consensus/sassafras/src/authorship.rs | 13 +- client/consensus/sassafras/src/lib.rs | 13 +- .../consensus/sassafras/src/verification.rs | 116 +++++++++++++++++- frame/sassafras/src/lib.rs | 11 +- primitives/consensus/sassafras/Cargo.toml | 2 +- primitives/consensus/sassafras/src/digests.rs | 72 ++++++----- 7 files changed, 170 insertions(+), 58 deletions(-) diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 4951c6c880b11..af6adbf2f39cd 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -69,7 +69,6 @@ use std::{ collections::HashMap, sync::Arc, u64, pin::Pin, time::{Instant, Duration}, any::Any, borrow::Cow }; -use primitive_types::U512; use sp_consensus_babe; use sp_consensus::{ImportResult, CanAuthorWith}; use sp_consensus::import_queue::{ diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index db0b3633eb360..ae4741ab482cc 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -25,8 +25,9 @@ use sp_api::NumberFor; use sp_runtime::traits::Block as BlockT; use sp_consensus_sassafras::{ SlotNumber, AuthorityPair, SassafrasConfiguration, AuthorityId, - SassafrasAuthorityWeight, SASSAFRAS_ENGINE_ID, digests::PreDigest, + SassafrasAuthorityWeight, SASSAFRAS_ENGINE_ID, VRFProof, SASSAFRAS_TICKET_VRF_PREFIX, VRFOutput, + digests::{PreDigest, PrimaryPreDigest, SecondaryPreDigest}, }; use sc_consensus_epochs::ViableEpochDescriptor; use sc_keystore::KeyStorePtr; @@ -129,11 +130,11 @@ fn claim_primary_slot( } trace!(target: "sassafras", "Appending commitment length: {}", commitments.len()); - let claim = PreDigest::Primary { + let claim = PreDigest::Primary(PrimaryPreDigest { ticket_vrf_index, ticket_vrf_attempt, ticket_vrf_output, authority_index, slot_number, post_vrf_proof, post_vrf_output, commitments, - }; + }); trace!(target: "sassafras", "Claimed a primary slot with slot number: {:?}", slot_number); trace!(target: "sassafras", "Epoch data as of now: {:?}", epoch); @@ -233,11 +234,11 @@ fn claim_secondary_slot( } trace!(target: "sassafras", "Appending commitment length: {}", commitments.len()); - let pre_digest = PreDigest::Secondary { + let pre_digest = PreDigest::Secondary(SecondaryPreDigest { slot_number, authority_index: authority_index as u32, commitments, - }; + }); return Some((pre_digest, pair)); } @@ -249,7 +250,7 @@ fn claim_secondary_slot( /// Get the expected secondary author for the given slot and with given /// authorities. This should always assign the slot to some authority unless the /// authorities list is empty. -fn secondary_slot_author( +pub(super) fn secondary_slot_author( slot_number: u64, authorities: &[(AuthorityId, SassafrasAuthorityWeight)], randomness: [u8; 32], diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 1957cc950fc5b..e2e9745c78779 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -20,7 +20,10 @@ pub use sp_consensus_sassafras::{ SassafrasApi, ConsensusLog, SASSAFRAS_ENGINE_ID, SlotNumber, SassafrasConfiguration, AuthorityId, AuthorityPair, AuthoritySignature, VRFOutput, SassafrasAuthorityWeight, VRF_OUTPUT_LENGTH, VRFProof, Randomness, - digests::{PreDigest, CompatibleDigestItem, NextEpochDescriptor, PostBlockDescriptor}, + digests::{ + PreDigest, CompatibleDigestItem, NextEpochDescriptor, PostBlockDescriptor, + PrimaryPreDigest, SecondaryPreDigest, + }, }; pub use sp_consensus::SyncOracle; @@ -668,11 +671,11 @@ fn find_pre_digest(header: &B::Header) -> Result> // genesis block doesn't contain a pre digest so let's generate a // dummy one to not break any invariants in the rest of the code if header.number().is_zero() { - return Ok(PreDigest::Secondary { + return Ok(PreDigest::Secondary(SecondaryPreDigest { slot_number: 0, authority_index: 0, commitments: Vec::new(), - }) + })) } let mut pre_digest: Option<_> = None; @@ -1180,7 +1183,7 @@ impl BlockImport for SassafrasBlockImport { + PreDigest::Primary(PrimaryPreDigest { commitments, .. }) => { let epoch = viable_epoch.as_mut(); trace!(target: "sassafras", "Importing commitments of length: {}", commitments.len()); @@ -1190,7 +1193,7 @@ impl BlockImport for SassafrasBlockImport { + PreDigest::Secondary(SecondaryPreDigest { commitments, .. }) => { let epoch = viable_epoch.as_mut(); trace!(target: "sassafras", "Importing commitments of length: {}", commitments.len()); diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index 2b98d126bc8a8..d9529026e956b 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -15,11 +15,17 @@ // along with Substrate. If not, see . //! Verification for Sassafras headers. -use sp_runtime::traits::DigestItemFor; -use sp_consensus_sassafras::{SlotNumber, AuthorityId}; -use sp_consensus_sassafras::digests::{PreDigest, CompatibleDigestItem}; +use sp_core::crypto::Pair; +use sp_runtime::traits::{Header as HeaderT, DigestItemFor}; +use sp_consensus_sassafras::{ + SlotNumber, AuthorityId, AuthorityPair, AuthoritySignature, +}; +use sp_consensus_sassafras::digests::{ + PreDigest, CompatibleDigestItem, PrimaryPreDigest, SecondaryPreDigest, +}; use sc_consensus_slots::CheckedHeader; -use super::{Epoch, BlockT, Error}; +use log::{trace, debug}; +use super::{Epoch, BlockT, Error, find_pre_digest}; /// Sassafras verification parameters pub(super) struct VerificationParams<'a, B: 'a + BlockT> { @@ -53,7 +59,66 @@ pub(super) fn check_header( ) -> Result>, Error> where DigestItemFor: CompatibleDigestItem, { - unimplemented!() + let VerificationParams { + mut header, + pre_digest, + slot_now, + epoch, + config, + } = params; + + let authorities = &epoch.validating.authorities; + let pre_digest = pre_digest.map(Ok).unwrap_or_else(|| find_pre_digest::(&header))?; + + trace!(target: "babe", "Checking header"); + let seal = match header.digest_mut().pop() { + Some(x) => x, + None => return Err(Error::HeaderUnsealed(header.hash())), + }; + + let sig = seal.as_sassafras_seal().ok_or_else(|| Error::HeaderBadSeal(header.hash()))?; + + // the pre-hash of the header doesn't include the seal + // and that's what we sign + let pre_hash = header.hash(); + + if pre_digest.slot_number() > slot_now { + header.digest_mut().push(seal); + return Ok(CheckedHeader::Deferred(header, pre_digest.slot_number())); + } + + let author = match authorities.get(pre_digest.authority_index() as usize) { + Some(author) => author.0.clone(), + None => return Err(Error::SlotAuthorNotFound), + }; + + match &pre_digest { + PreDigest::Primary { .. } => { + debug!(target: "sassafras", "Verifying Primary block"); + + unimplemented!() + }, + PreDigest::Secondary(digest) if config.secondary_slots => { + debug!(target: "sassafras", "Verifying Secondary block"); + + check_secondary_header::( + pre_hash, + digest, + sig, + &epoch, + )?; + }, + _ => { + return Err(Error::SecondarySlotAssignmentsDisabled); + } + } + + let info = VerifiedHeaderInfo { + pre_digest: CompatibleDigestItem::sassafras_pre_digest(pre_digest), + seal, + author, + }; + Ok(CheckedHeader::Checked(header, info)) } pub(super) struct VerifiedHeaderInfo { @@ -61,3 +126,44 @@ pub(super) struct VerifiedHeaderInfo { pub(super) seal: DigestItemFor, pub(super) author: AuthorityId, } + +/// Check a primary slot proposal header. +fn check_primary_header( + pre_hash: B::Hash, + pre_digest: &PrimaryPreDigest, + signature: AuthoritySignature, + epoch: &Epoch, +) -> Result<(), Error> { + unimplemented!() +} + +/// Check a secondary slot proposal header. We validate that the given header is +/// properly signed by the expected authority, which we have a deterministic way +/// of computing. Additionally, the weight of this block must stay the same +/// compared to its parent since it is a secondary block. +fn check_secondary_header( + pre_hash: B::Hash, + pre_digest: &SecondaryPreDigest, + signature: AuthoritySignature, + epoch: &Epoch, +) -> Result<(), Error> { + // check the signature is valid under the expected authority and + // chain state. + let expected_author = super::authorship::secondary_slot_author( + pre_digest.slot_number, + &epoch.validating.authorities, + epoch.validating.randomness, + ).ok_or_else(|| Error::NoSecondaryAuthorExpected)?; + + let author = &epoch.validating.authorities[pre_digest.authority_index as usize].0; + + if expected_author != author { + return Err(Error::InvalidAuthor(expected_author.clone(), author.clone())); + } + + if AuthorityPair::verify(&signature, pre_hash.as_ref(), author) { + Ok(()) + } else { + Err(Error::BadSignature(pre_hash)) + } +} diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 8b5bb34651454..18f384aef5634 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -37,7 +37,7 @@ use sp_inherents::{InherentIdentifier, InherentData, ProvideInherent, MakeFatalE use sp_consensus_sassafras::{ SASSAFRAS_ENGINE_ID, ConsensusLog, SassafrasAuthorityWeight, SlotNumber, inherents::{INHERENT_IDENTIFIER, SassafrasInherentData}, - digests::{NextEpochDescriptor, PreDigest}, + digests::{NextEpochDescriptor, PreDigest, PrimaryPreDigest}, }; pub use sp_consensus_sassafras::{ AuthorityId, RawVRFOutput, VRFOutput, VRF_OUTPUT_LENGTH, PUBLIC_KEY_LENGTH @@ -209,12 +209,7 @@ impl FindAuthor for Module { for (id, mut data) in digests.into_iter() { if id == SASSAFRAS_ENGINE_ID { let pre_digest = PreDigest::decode(&mut data).ok()?; - return Some(match pre_digest { - PreDigest::Primary { authority_index, .. } => - authority_index, - PreDigest::Secondary { authority_index, .. } => - authority_index, - }); + return Some(pre_digest.authority_index()) } } @@ -427,7 +422,7 @@ impl Module { CurrentSlot::put(digest.slot_number()); - if let PreDigest::Primary { post_vrf_output, .. } = digest { + if let PreDigest::Primary(PrimaryPreDigest { post_vrf_output, .. }) = digest { // place the VRF output into the `Initialized` storage item // and it'll be put onto the under-construction randomness // later, once we've decided which epoch this block is in. diff --git a/primitives/consensus/sassafras/Cargo.toml b/primitives/consensus/sassafras/Cargo.toml index ec179bf6eab65..775bf5bc8a077 100644 --- a/primitives/consensus/sassafras/Cargo.toml +++ b/primitives/consensus/sassafras/Cargo.toml @@ -14,7 +14,7 @@ sp-api = { version = "2.0.0", default-features = false, path = "../../api" } sp-consensus = { version = "0.8", optional = true, path = "../common" } sp-inherents = { path = "../../inherents", default-features = false } sp-timestamp = { path = "../../timestamp", default-features = false } -sp-runtime = { path = "../../runtime", default-features = false } +sp-runtime = { path = "../../runtime", default-features = false } sp-application-crypto = { path = "../../application-crypto", default-features = false } [features] diff --git a/primitives/consensus/sassafras/src/digests.rs b/primitives/consensus/sassafras/src/digests.rs index 5211806b0c957..460f63247ca2d 100644 --- a/primitives/consensus/sassafras/src/digests.rs +++ b/primitives/consensus/sassafras/src/digests.rs @@ -87,6 +87,38 @@ impl CompatibleDigestItem for DigestItem where } } +/// A primary Sassafras pre-digest. +#[derive(Clone, RuntimeDebug, Encode, Decode, PartialEq, Eq)] +pub struct PrimaryPreDigest { + /// Index of ticket VRF proof that has been previously committed. + pub ticket_vrf_index: VRFIndex, + /// Attempt number of the ticket VRF proof. + pub ticket_vrf_attempt: u64, + /// Reveal of tocket VRF output. + pub ticket_vrf_output: VRFOutput, + /// Validator index. + pub authority_index: AuthorityIndex, + /// Corresponding slot number. + pub slot_number: SlotNumber, + /// Secondary "Post Block VRF" proof. + pub post_vrf_proof: VRFProof, + /// Secondary "Post Block VRF" output. + pub post_vrf_output: VRFOutput, + /// Additional commitments posted directly at pre-digest. + pub commitments: Vec, +} + +/// A secondary Sassafras pre-digest. +#[derive(Clone, RuntimeDebug, Encode, Decode, PartialEq, Eq)] +pub struct SecondaryPreDigest { + /// Authority index. + pub authority_index: AuthorityIndex, + /// Slot number. + pub slot_number: SlotNumber, + /// Additional commitments posted directly at pre-digest. + pub commitments: Vec, +} + /// A Sassafras pre-digest. The validator pre-commit a VRF proof at `vrf_index`, and now reveal it /// as `vrf_output`. /// @@ -94,49 +126,25 @@ impl CompatibleDigestItem for DigestItem where #[derive(Clone, RuntimeDebug, Encode, Decode, PartialEq, Eq)] pub enum PreDigest { /// A primary VRF-based slot-assignment. - Primary { - /// Index of ticket VRF proof that has been previously committed. - ticket_vrf_index: VRFIndex, - /// Attempt number of the ticket VRF proof. - ticket_vrf_attempt: u64, - /// Reveal of tocket VRF output. - ticket_vrf_output: VRFOutput, - /// Validator index. - authority_index: AuthorityIndex, - /// Corresponding slot number. - slot_number: SlotNumber, - /// Secondary "Post Block VRF" proof. - post_vrf_proof: VRFProof, - /// Secondary "Post Block VRF" output. - post_vrf_output: VRFOutput, - /// Additional commitments posted directly at pre-digest. - commitments: Vec, - }, + Primary(PrimaryPreDigest), /// A secondary deterministic slot assignment. - Secondary { - /// Authority index. - authority_index: AuthorityIndex, - /// Slot number. - slot_number: SlotNumber, - /// Additional commitments posted directly at pre-digest. - commitments: Vec, - }, + Secondary(SecondaryPreDigest), } impl PreDigest { /// Returns the slot number of the pre digest. pub fn authority_index(&self) -> AuthorityIndex { match self { - PreDigest::Primary { authority_index, .. } => *authority_index, - PreDigest::Secondary { authority_index, .. } => *authority_index, + PreDigest::Primary(p) => p.authority_index, + PreDigest::Secondary(s) => s.authority_index, } } /// Returns the slot number of the pre digest. pub fn slot_number(&self) -> SlotNumber { match self { - PreDigest::Primary { slot_number, .. } => *slot_number, - PreDigest::Secondary { slot_number, .. } => *slot_number, + PreDigest::Primary(p) => p.slot_number, + PreDigest::Secondary(s) => s.slot_number, } } @@ -144,8 +152,8 @@ impl PreDigest { /// of the chain. pub fn added_weight(&self) -> super::SassafrasBlockWeight { match self { - PreDigest::Primary { .. } => 1, - PreDigest::Secondary { .. } => 0, + PreDigest::Primary(_) => 1, + PreDigest::Secondary(_) => 0, } } } From ea8d5fecc0b7316001f72d0d65ee1f31ab8132f3 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Tue, 18 Feb 2020 21:10:55 +0100 Subject: [PATCH 51/75] Primary claim verification logic --- client/consensus/sassafras/src/lib.rs | 2 + .../consensus/sassafras/src/verification.rs | 64 +++++++++++++++++-- 2 files changed, 62 insertions(+), 4 deletions(-) diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index e2e9745c78779..7b24833429fc3 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -256,6 +256,8 @@ enum Error { HeaderUnsealed(B::Hash), #[display(fmt = "Slot author not found")] SlotAuthorNotFound, + #[display(fmt = "Proof for a primary claim not found")] + ProofNotFound, #[display(fmt = "Secondary slot assignments are disabled for the current epoch.")] SecondarySlotAssignmentsDisabled, #[display(fmt = "Bad signature on {:?}", _0)] diff --git a/client/consensus/sassafras/src/verification.rs b/client/consensus/sassafras/src/verification.rs index d9529026e956b..faa61efa87c73 100644 --- a/client/consensus/sassafras/src/verification.rs +++ b/client/consensus/sassafras/src/verification.rs @@ -15,7 +15,7 @@ // along with Substrate. If not, see . //! Verification for Sassafras headers. -use sp_core::crypto::Pair; +use sp_core::crypto::{Pair, Public}; use sp_runtime::traits::{Header as HeaderT, DigestItemFor}; use sp_consensus_sassafras::{ SlotNumber, AuthorityId, AuthorityPair, AuthoritySignature, @@ -93,10 +93,15 @@ pub(super) fn check_header( }; match &pre_digest { - PreDigest::Primary { .. } => { + PreDigest::Primary(digest) => { debug!(target: "sassafras", "Verifying Primary block"); - unimplemented!() + check_primary_header::( + pre_hash, + digest, + sig, + &epoch, + )?; }, PreDigest::Secondary(digest) if config.secondary_slots => { debug!(target: "sassafras", "Verifying Secondary block"); @@ -134,7 +139,58 @@ fn check_primary_header( signature: AuthoritySignature, epoch: &Epoch, ) -> Result<(), Error> { - unimplemented!() + let ticket_vrf_proof = epoch.validating.proofs.iter() + .find(|p| p.0 == pre_digest.slot_number) + .ok_or_else(|| Error::ProofNotFound)? + .1 + .clone(); + let author = &epoch.validating.authorities[pre_digest.authority_index as usize].0; + + if !AuthorityPair::verify(&signature, pre_hash, &author) { + return Err(Error::BadSignature(pre_hash)) + } + + let (ticket_inout, _) = { + let ticket_transcript = crate::authorship::make_ticket_transcript( + &epoch.validating.randomness, + pre_digest.ticket_vrf_attempt, + epoch.validating.epoch_index + ); + + schnorrkel::PublicKey::from_bytes(author.as_slice()).and_then(|p| { + p.vrf_verify( + ticket_transcript, + &pre_digest.ticket_vrf_output, + &ticket_vrf_proof, + ) + }).map_err(|s| Error::VRFVerificationFailed(s))? + }; + + let ticket_threshold = crate::authorship::calculate_primary_threshold( + epoch.validating.threshold(), + &epoch.validating.authorities, + pre_digest.authority_index as usize, + ); + + if !crate::authorship::check_primary_threshold(&ticket_inout, ticket_threshold) { + return Err(Error::VRFVerificationOfBlockFailed(author.clone(), ticket_threshold)); + } + + let post_transcript = crate::authorship::make_post_transcript( + &epoch.validating.randomness, + pre_digest.slot_number, + epoch.validating.epoch_index, + ); + + schnorrkel::PublicKey::from_bytes(author.as_slice()).and_then(|p| { + p.vrf_verify( + post_transcript, + &pre_digest.post_vrf_output, + &pre_digest.post_vrf_proof, + ) + }).map_err(|s| Error::VRFVerificationFailed(s))?; + + Ok(()) } /// Check a secondary slot proposal header. We validate that the given header is From 5395a9ca2b811aab2d420bd4b82a68b59fa7669b Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 2 Mar 2020 22:17:45 +0100 Subject: [PATCH 52/75] Fix version declarations --- Cargo.lock | 2 +- bin/sassafras-template/node/Cargo.toml | 35 +++++++-------- bin/sassafras-template/runtime/Cargo.toml | 44 +++++++++--------- client/consensus/sassafras/Cargo.toml | 54 +++++++++++------------ frame/babe/Cargo.toml | 2 +- frame/sassafras/Cargo.toml | 22 ++++----- primitives/consensus/sassafras/Cargo.toml | 16 +++---- 7 files changed, 87 insertions(+), 88 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 413caed7b142e..d4571a915cb0a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6029,7 +6029,7 @@ dependencies = [ "fork-tree", "futures 0.1.29", "futures 0.3.4", - "futures-timer 3.0.1", + "futures-timer 3.0.2", "log 0.4.8", "merlin", "num-bigint", diff --git a/bin/sassafras-template/node/Cargo.toml b/bin/sassafras-template/node/Cargo.toml index d6028e754a2c4..daa0b5e967e28 100644 --- a/bin/sassafras-template/node/Cargo.toml +++ b/bin/sassafras-template/node/Cargo.toml @@ -14,25 +14,24 @@ futures = "0.3.1" log = "0.4.8" structopt = "0.3.8" -sc-cli = { version = "0.8.0", path = "../../../client/cli" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sc-executor = { version = "0.8", path = "../../../client/executor" } -sc-service = { version = "0.8", path = "../../../client/service" } -sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } -sc-transaction-pool = { version = "2.0.0", path = "../../../client/transaction-pool" } -sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } -sc-network = { version = "0.8", path = "../../../client/network" } -sc-consensus-sassafras = { version = "0.8", path = "../../../client/consensus/sassafras" } -sp-consensus-sassafras = { version = "0.8", path = "../../../primitives/consensus/sassafras" } -sp-consensus = { version = "0.8", path = "../../../primitives/consensus/common" } -sc-client = { version = "0.8", path = "../../../client/" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sc-cli = { path = "../../../client/cli" } +sp-core = { path = "../../../primitives/core" } +sc-executor = { path = "../../../client/executor" } +sc-service = { path = "../../../client/service" } +sp-inherents = { path = "../../../primitives/inherents" } +sc-transaction-pool = { path = "../../../client/transaction-pool" } +sp-transaction-pool = { path = "../../../primitives/transaction-pool" } +sc-network = { path = "../../../client/network" } +sc-consensus-sassafras = { path = "../../../client/consensus/sassafras" } +sp-consensus-sassafras = { path = "../../../primitives/consensus/sassafras" } +sp-consensus = { path = "../../../primitives/consensus/common" } +sc-client = { path = "../../../client/" } +sp-runtime = { path = "../../../primitives/runtime" } sc-basic-authorship = { path = "../../../client/basic-authorship" } -grandpa = { version = "0.8", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } -grandpa-primitives = { version = "2.0.0", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } - -sassafras-template-runtime = { version = "2.0.0", path = "../runtime" } +grandpa = { package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } +grandpa-primitives = { package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } +sassafras-template-runtime = { path = "../runtime" } [build-dependencies] vergen = "3.0.4" -build-script-utils = { version = "2.0.0", package = "substrate-build-script-utils", path = "../../../utils/build-script-utils" } +build-script-utils = { package = "substrate-build-script-utils", path = "../../../utils/build-script-utils" } diff --git a/bin/sassafras-template/runtime/Cargo.toml b/bin/sassafras-template/runtime/Cargo.toml index 70fdff03cee01..69489e2da1e3d 100644 --- a/bin/sassafras-template/runtime/Cargo.toml +++ b/bin/sassafras-template/runtime/Cargo.toml @@ -8,33 +8,33 @@ license = "Unlicense" [dependencies] codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sassafras = { version = "0.8.0", default-features = false, package = "pallet-sassafras", path = "../../../frame/sassafras" } -balances = { version = "2.0.0", default-features = false, package = "pallet-balances", path = "../../../frame/balances" } -frame-support = { version = "2.0.0", default-features = false, path = "../../../frame/support" } -grandpa = { version = "2.0.0", default-features = false, package = "pallet-grandpa", path = "../../../frame/grandpa" } -indices = { version = "2.0.0", default-features = false, package = "pallet-indices", path = "../../../frame/indices" } -randomness-collective-flip = { version = "2.0.0", default-features = false, package = "pallet-randomness-collective-flip", path = "../../../frame/randomness-collective-flip" } -sudo = { version = "2.0.0", default-features = false, package = "pallet-sudo", path = "../../../frame/sudo" } -system = { version = "2.0.0", default-features = false, package = "frame-system", path = "../../../frame/system" } -timestamp = { version = "2.0.0", default-features = false, package = "pallet-timestamp", path = "../../../frame/timestamp" } -transaction-payment = { version = "2.0.0", default-features = false, package = "pallet-transaction-payment", path = "../../../frame/transaction-payment" } -frame-executive = { version = "2.0.0", default-features = false, path = "../../../frame/executive" } +sassafras = { default-features = false, package = "pallet-sassafras", path = "../../../frame/sassafras" } +balances = { default-features = false, package = "pallet-balances", path = "../../../frame/balances" } +frame-support = { default-features = false, path = "../../../frame/support" } +grandpa = { default-features = false, package = "pallet-grandpa", path = "../../../frame/grandpa" } +indices = { default-features = false, package = "pallet-indices", path = "../../../frame/indices" } +randomness-collective-flip = { default-features = false, package = "pallet-randomness-collective-flip", path = "../../../frame/randomness-collective-flip" } +sudo = { default-features = false, package = "pallet-sudo", path = "../../../frame/sudo" } +system = { default-features = false, package = "frame-system", path = "../../../frame/system" } +timestamp = { default-features = false, package = "pallet-timestamp", path = "../../../frame/timestamp" } +transaction-payment = { default-features = false, package = "pallet-transaction-payment", path = "../../../frame/transaction-payment" } +frame-executive = { default-features = false, path = "../../../frame/executive" } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-api = { version = "2.0.0", default-features = false, path = "../../../primitives/api" } +sp-api = { default-features = false, path = "../../../primitives/api" } sp-block-builder = { path = "../../../primitives/block-builder", default-features = false} -sp-consensus-sassafras = { version = "0.8", default-features = false, path = "../../../primitives/consensus/sassafras" } -sp-core = { version = "2.0.0", default-features = false, path = "../../../primitives/core" } +sp-consensus-sassafras = { default-features = false, path = "../../../primitives/consensus/sassafras" } +sp-core = { default-features = false, path = "../../../primitives/core" } sp-inherents = { path = "../../../primitives/inherents", default-features = false} -sp-io = { version = "2.0.0", default-features = false, path = "../../../primitives/io" } -sp-offchain = { version = "2.0.0", default-features = false, path = "../../../primitives/offchain" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-session = { version = "2.0.0", default-features = false, path = "../../../primitives/session" } -sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } -sp-transaction-pool = { version = "2.0.0", default-features = false, path = "../../../primitives/transaction-pool" } -sp-version = { version = "2.0.0", default-features = false, path = "../../../primitives/version" } +sp-io = { default-features = false, path = "../../../primitives/io" } +sp-offchain = { default-features = false, path = "../../../primitives/offchain" } +sp-runtime = { default-features = false, path = "../../../primitives/runtime" } +sp-session = { default-features = false, path = "../../../primitives/session" } +sp-std = { default-features = false, path = "../../../primitives/std" } +sp-transaction-pool = { default-features = false, path = "../../../primitives/transaction-pool" } +sp-version = { default-features = false, path = "../../../primitives/version" } [build-dependencies] -wasm-builder-runner = { version = "1.0.4", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } +wasm-builder-runner = { package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } [features] default = ["std"] diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml index 285ef35ef08aa..6aaf384befc1d 100644 --- a/client/consensus/sassafras/Cargo.toml +++ b/client/consensus/sassafras/Cargo.toml @@ -8,29 +8,29 @@ license = "GPL-3.0" [dependencies] codec = { package = "parity-scale-codec", version = "1.0.0", features = ["derive"] } -sp-consensus-sassafras = { version = "0.8", path = "../../../primitives/consensus/sassafras" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-application-crypto = { version = "2.0.0", path = "../../../primitives/application-crypto" } +sp-consensus-sassafras = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/sassafras" } +sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } +sp-application-crypto = { version = "2.0.0-alpha.2", path = "../../../primitives/application-crypto" } num-bigint = "0.2.3" num-rational = "0.2.2" num-traits = "0.2.8" -sp-version = { version = "2.0.0", path = "../../../primitives/version" } -sp-io = { version = "2.0.0", path = "../../../primitives/io" } -sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } -sp-timestamp = { version = "2.0.0", path = "../../../primitives/timestamp" } -sc-telemetry = { version = "2.0.0", path = "../../telemetry" } -sc-keystore = { version = "2.0.0", path = "../../keystore" } -sc-client-api = { version = "2.0.0", path = "../../api" } -sc-client = { version = "0.8", path = "../../" } -sc-consensus-epochs = { version = "0.8", path = "../epochs" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-consensus = { version = "0.8", path = "../../../primitives/consensus/common" } -sc-consensus-uncles = { version = "0.8", path = "../uncles" } -sc-consensus-slots = { version = "0.8", path = "../slots" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -fork-tree = { version = "2.0.0", path = "../../../utils/fork-tree" } +sp-version = { version = "2.0.0-alpha.2", path = "../../../primitives/version" } +sp-io = { version = "2.0.0-alpha.2", path = "../../../primitives/io" } +sp-inherents = { version = "2.0.0-alpha.2", path = "../../../primitives/inherents" } +sp-timestamp = { version = "2.0.0-alpha.2", path = "../../../primitives/timestamp" } +sc-telemetry = { version = "2.0.0-alpha.2", path = "../../telemetry" } +sc-keystore = { version = "2.0.0-alpha.2", path = "../../keystore" } +sc-client-api = { version = "2.0.0-alpha.2", path = "../../api" } +sc-client = { version = "0.8.0-alpha.2", path = "../../" } +sc-consensus-epochs = { version = "0.8.0-alpha.2", path = "../epochs" } +sp-api = { version = "2.0.0-alpha.2", path = "../../../primitives/api" } +sp-block-builder = { version = "2.0.0-alpha.2", path = "../../../primitives/block-builder" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } +sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } +sc-consensus-uncles = { version = "0.8.0-alpha.2", path = "../uncles" } +sc-consensus-slots = { version = "0.8.0-alpha.2", path = "../slots" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } +fork-tree = { version = "2.0.0-alpha.2", path = "../../../utils/fork-tree" } futures = "0.3.1" futures-timer = "3.0.1" parking_lot = "0.10.0" @@ -42,13 +42,13 @@ pdqselect = "0.1.0" derive_more = "0.99.2" [dev-dependencies] -sp-keyring = { version = "2.0.0", path = "../../../primitives/keyring" } -sc-executor = { version = "0.8", path = "../../executor" } -sc-network = { version = "0.8", path = "../../network" } -sc-network-test = { version = "0.8.0", path = "../../network/test" } -sc-service = { version = "0.8", path = "../../service" } -substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } -sc-block-builder = { version = "0.8", path = "../../block-builder" } +sp-keyring = { version = "2.0.0-alpha.2", path = "../../../primitives/keyring" } +sc-executor = { version = "0.8.0-alpha.2", path = "../../executor" } +sc-network = { version = "0.8.0-alpha.2", path = "../../network" } +sc-network-test = { version = "0.8.0-alpha.2", path = "../../network/test" } +sc-service = { version = "0.8.0-alpha.2", path = "../../service" } +substrate-test-runtime-client = { version = "2.0.0-alpha.2", path = "../../../test-utils/runtime/client" } +sc-block-builder = { version = "0.8.0-alpha.2", path = "../../block-builder" } tokio = "0.1.22" env_logger = "0.7.0" tempfile = "3.1.0" diff --git a/frame/babe/Cargo.toml b/frame/babe/Cargo.toml index 14176a1ea479f..0ee10678526bf 100644 --- a/frame/babe/Cargo.toml +++ b/frame/babe/Cargo.toml @@ -22,7 +22,7 @@ pallet-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = sp-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/timestamp" } pallet-session = { version = "2.0.0-alpha.2", default-features = false, path = "../session" } sp-consensus-babe = { version = "0.8.0-alpha.2", default-features = false, path = "../../primitives/consensus/babe" } -sp-io ={ path = "../../primitives/io", default-features = false , version = "2.0.0-alpha.2"} +sp-io ={ path = "../../primitives/io", default-features = false, version = "2.0.0-alpha.2"} [dev-dependencies] lazy_static = "1.4.0" diff --git a/frame/sassafras/Cargo.toml b/frame/sassafras/Cargo.toml index 46c15530c0685..ce5164e8e3297 100644 --- a/frame/sassafras/Cargo.toml +++ b/frame/sassafras/Cargo.toml @@ -9,17 +9,17 @@ license = "GPL-3.0" hex-literal = "0.2.1" codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -pallet-timestamp = { version = "2.0.0", default-features = false, path = "../timestamp" } -sp-timestamp = { version = "2.0.0", default-features = false, path = "../../primitives/timestamp" } -pallet-session = { version = "2.0.0", default-features = false, path = "../session" } -sp-consensus-sassafras = { version = "0.8", default-features = false, path = "../../primitives/consensus/sassafras" } -sp-io = { path = "../../primitives/io", default-features = false } +sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/inherents" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +pallet-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../timestamp" } +sp-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/timestamp" } +pallet-session = { version = "2.0.0-alpha.2", default-features = false, path = "../session" } +sp-consensus-sassafras = { version = "0.8.0-alpha.2", default-features = false, path = "../../primitives/consensus/sassafras" } +sp-io = { version = "2.0.0-alpha.2", path = "../../primitives/io", default-features = false } [features] default = ["std"] diff --git a/primitives/consensus/sassafras/Cargo.toml b/primitives/consensus/sassafras/Cargo.toml index 775bf5bc8a077..9f7992357ed86 100644 --- a/primitives/consensus/sassafras/Cargo.toml +++ b/primitives/consensus/sassafras/Cargo.toml @@ -8,14 +8,14 @@ edition = "2018" [dependencies] codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false } schnorrkel = { version = "0.8.5", features = ["preaudit_deprecated"], optional = true } -sp-std = { path = "../../std", default-features = false } -sp-core = { path = "../../core", default-features = false } -sp-api = { version = "2.0.0", default-features = false, path = "../../api" } -sp-consensus = { version = "0.8", optional = true, path = "../common" } -sp-inherents = { path = "../../inherents", default-features = false } -sp-timestamp = { path = "../../timestamp", default-features = false } -sp-runtime = { path = "../../runtime", default-features = false } -sp-application-crypto = { path = "../../application-crypto", default-features = false } +sp-std = { version = "2.0.0-alpha.2", path = "../../std", default-features = false } +sp-core = { version = "2.0.0-alpha.2", path = "../../core", default-features = false } +sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../../api" } +sp-consensus = { version = "0.8.0-alpha.2", optional = true, path = "../common" } +sp-inherents = { version = "2.0.0-alpha.2", path = "../../inherents", default-features = false } +sp-timestamp = { version = "2.0.0-alpha.2", path = "../../timestamp", default-features = false } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../runtime", default-features = false } +sp-application-crypto = { version = "2.0.0-alpha.2", path = "../../application-crypto", default-features = false } [features] default = ["std"] From c7b3af54e28091d95bbbbe1e41fd06bd6492d18b Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Tue, 3 Mar 2020 23:05:13 +0100 Subject: [PATCH 53/75] Fix babe RPC compile --- client/consensus/babe/rpc/src/lib.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/client/consensus/babe/rpc/src/lib.rs b/client/consensus/babe/rpc/src/lib.rs index 1ea7e423dc7a9..d1b44e053c44f 100644 --- a/client/consensus/babe/rpc/src/lib.rs +++ b/client/consensus/babe/rpc/src/lib.rs @@ -182,7 +182,7 @@ fn epoch_data( SC: SelectChain, { let parent = select_chain.best_chain()?; - epoch_changes.lock().epoch_for_child_of( + epoch_changes.lock().epoch_data_for_child_of( descendent_query(&**client), &parent.hash(), parent.number().clone(), @@ -190,7 +190,6 @@ fn epoch_data( |slot| babe_config.genesis_epoch(slot), ) .map_err(|e| Error::Consensus(ConsensusError::ChainLookup(format!("{:?}", e))))? - .map(|e| e.into_inner()) .ok_or(Error::Consensus(ConsensusError::InvalidAuthoritiesSet)) } From 3e798750af5571ce4cd15ab3c9d9a6cacc0fdf28 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Tue, 3 Mar 2020 23:34:55 +0100 Subject: [PATCH 54/75] Init communication module and fix sassafras template compile --- Cargo.lock | 1 + bin/sassafras-template/node/src/chain_spec.rs | 5 +- bin/sassafras-template/node/src/command.rs | 39 +++++---- bin/sassafras-template/node/src/main.rs | 6 +- bin/sassafras-template/node/src/service.rs | 84 ++++++++----------- bin/sassafras-template/runtime/src/lib.rs | 46 +++++----- client/consensus/sassafras/Cargo.toml | 2 + .../sassafras/src/communication/mod.rs | 54 ++++++++++++ client/consensus/sassafras/src/lib.rs | 55 ++++-------- frame/sassafras/src/lib.rs | 16 ++-- 10 files changed, 162 insertions(+), 146 deletions(-) create mode 100644 client/consensus/sassafras/src/communication/mod.rs diff --git a/Cargo.lock b/Cargo.lock index d4571a915cb0a..ea14e5fa18498 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6048,6 +6048,7 @@ dependencies = [ "sc-executor", "sc-keystore", "sc-network", + "sc-network-gossip", "sc-network-test", "sc-service", "sc-telemetry", diff --git a/bin/sassafras-template/node/src/chain_spec.rs b/bin/sassafras-template/node/src/chain_spec.rs index 08bd6b8258895..abc722fba5724 100644 --- a/bin/sassafras-template/node/src/chain_spec.rs +++ b/bin/sassafras-template/node/src/chain_spec.rs @@ -1,7 +1,7 @@ use sp_core::{Pair, Public, sr25519}; use sassafras_template_runtime::{ AccountId, BalancesConfig, GenesisConfig, GrandpaConfig, SassafrasConfig, - SudoConfig, IndicesConfig, SystemConfig, WASM_BINARY, Signature + SudoConfig, SystemConfig, WASM_BINARY, Signature }; use grandpa_primitives::AuthorityId as GrandpaId; use sp_consensus_sassafras::AuthorityId as SassafrasId; @@ -124,9 +124,6 @@ fn testnet_genesis(initial_authorities: Vec<(SassafrasId, GrandpaId)>, code: WASM_BINARY.to_vec(), changes_trie_config: Default::default(), }), - indices: Some(IndicesConfig { - ids: endowed_accounts.clone(), - }), balances: Some(BalancesConfig { balances: endowed_accounts.iter().cloned().map(|k|(k, 1 << 60)).collect(), }), diff --git a/bin/sassafras-template/node/src/command.rs b/bin/sassafras-template/node/src/command.rs index 598b3345bca40..012c6389c3098 100644 --- a/bin/sassafras-template/node/src/command.rs +++ b/bin/sassafras-template/node/src/command.rs @@ -14,32 +14,35 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use sc_cli::{VersionInfo, error}; +use sc_cli::VersionInfo; use crate::service; use crate::chain_spec; use crate::cli::Cli; /// Parse and run command line arguments -pub fn run(version: VersionInfo) -> error::Result<()> { +pub fn run(version: VersionInfo) -> sc_cli::Result<()> { let opt = sc_cli::from_args::(&version); - let config = sc_service::Configuration::new(&version); + let mut config = sc_service::Configuration::from_version(&version); match opt.subcommand { - Some(subcommand) => sc_cli::run_subcommand( - config, - subcommand, - chain_spec::load_spec, - |config: _| Ok(new_full_start!(config).0), - &version, - ), - None => sc_cli::run( - config, - opt.run, - service::new_light, - service::new_full, - chain_spec::load_spec, - &version, - ) + Some(subcommand) => { + subcommand.init(&version)?; + subcommand.update_config(&mut config, chain_spec::load_spec, &version)?; + subcommand.run( + config, + |config: _| Ok(new_full_start!(config).0), + ) + }, + None => { + opt.run.init(&version)?; + opt.run.update_config(&mut config, chain_spec::load_spec, &version)?; + opt.run.run( + config, + service::new_light, + service::new_full, + &version, + ) + }, } } diff --git a/bin/sassafras-template/node/src/main.rs b/bin/sassafras-template/node/src/main.rs index 9d0a57d77a851..91b2c257e0cd7 100644 --- a/bin/sassafras-template/node/src/main.rs +++ b/bin/sassafras-template/node/src/main.rs @@ -7,10 +7,8 @@ mod service; mod cli; mod command; -pub use sc_cli::{VersionInfo, error}; - -fn main() -> Result<(), error::Error> { - let version = VersionInfo { +fn main() -> sc_cli::Result<()> { + let version = sc_cli::VersionInfo { name: "Substrate Node", commit: env!("VERGEN_SHA_SHORT"), version: env!("CARGO_PKG_VERSION"), diff --git a/bin/sassafras-template/node/src/service.rs b/bin/sassafras-template/node/src/service.rs index 740ca06f6fccf..fbebab665ced9 100644 --- a/bin/sassafras-template/node/src/service.rs +++ b/bin/sassafras-template/node/src/service.rs @@ -7,7 +7,6 @@ use sc_client::LongestChain; use sassafras_template_runtime::{self, GenesisConfig, opaque::Block, RuntimeApi}; use sc_service::{error::{Error as ServiceError}, AbstractService, Configuration, ServiceBuilder}; use sp_inherents::InherentDataProviders; -use sc_network::{construct_simple_protocol}; use sc_executor::native_executor_instance; pub use sc_executor::NativeExecutor; @@ -18,11 +17,6 @@ native_executor_instance!( sassafras_template_runtime::native_version, ); -construct_simple_protocol! { - /// Demo protocol attachment for substrate. - pub struct NodeProtocol where Block = Block { } -} - /// Starts a `ServiceBuilder` for a full service. /// /// Use this macro if you don't actually need the full service, but just the builder in order to @@ -98,17 +92,17 @@ pub fn new_full(config: Configuration) import_setup.take() .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); - let service = builder.with_network_protocol(|_| Ok(NodeProtocol::new()))? + let service = builder .with_finality_proof_provider(|client, backend| Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, client)) as _) )? .build()?; if participates_in_consensus { - let proposer = sc_basic_authorship::ProposerFactory { - client: service.client(), - transaction_pool: service.transaction_pool(), - }; + let proposer = sc_basic_authorship::ProposerFactory::new( + service.client(), + service.transaction_pool() + ); let client = service.client(); let select_chain = service.select_chain() @@ -152,41 +146,36 @@ pub fn new_full(config: Configuration) is_authority, }; - match (is_authority, disable_grandpa) { - (false, false) => { - // start the lightweight GRANDPA observer - service.spawn_task("grandpa-observer", grandpa::run_grandpa_observer( - grandpa_config, - grandpa_link, - service.network(), - service.on_exit(), - service.spawn_task_handle(), - )?); - }, - (true, false) => { - // start the full GRANDPA voter - let voter_config = grandpa::GrandpaParams { - config: grandpa_config, - link: grandpa_link, - network: service.network(), - inherent_data_providers: inherent_data_providers.clone(), - on_exit: service.on_exit(), - telemetry_on_connect: Some(service.telemetry_on_connect_stream()), - voting_rule: grandpa::VotingRulesBuilder::default().build(), - executor: service.spawn_task_handle(), - }; - - // the GRANDPA voter task is considered infallible, i.e. - // if it fails we take down the service with it. - service.spawn_essential_task("grandpa", grandpa::run_grandpa_voter(voter_config)?); - }, - (_, true) => { - grandpa::setup_disabled_grandpa( - service.client(), - &inherent_data_providers, - service.network(), - )?; - }, + let enable_grandpa = !disable_grandpa; + if enable_grandpa { + // start the full GRANDPA voter + // NOTE: non-authorities could run the GRANDPA observer protocol, but at + // this point the full voter should provide better guarantees of block + // and vote data availability than the observer. The observer has not + // been tested extensively yet and having most nodes in a network run it + // could lead to finality stalls. + let grandpa_config = grandpa::GrandpaParams { + config: grandpa_config, + link: grandpa_link, + network: service.network(), + inherent_data_providers: inherent_data_providers.clone(), + on_exit: service.on_exit(), + telemetry_on_connect: Some(service.telemetry_on_connect_stream()), + voting_rule: grandpa::VotingRulesBuilder::default().build(), + }; + + // the GRANDPA voter task is considered infallible, i.e. + // if it fails we take down the service with it. + service.spawn_essential_task( + "grandpa-voter", + grandpa::run_grandpa_voter(grandpa_config)? + ); + } else { + grandpa::setup_disabled_grandpa( + service.client(), + &inherent_data_providers, + service.network(), + )?; } Ok(service) @@ -216,7 +205,7 @@ pub fn new_light(config: Configuration) let fetch_checker = fetcher .map(|fetcher| fetcher.checker().clone()) .ok_or_else(|| "Trying to start light import queue without active fetch checker")?; - let grandpa_block_import = grandpa::light_block_import::<_, _, _, RuntimeApi>( + let grandpa_block_import = grandpa::light_block_import( client.clone(), backend, &*client.clone(), Arc::new(fetch_checker), )?; @@ -243,7 +232,6 @@ pub fn new_light(config: Configuration) Ok((import_queue, finality_proof_request_builder)) })? - .with_network_protocol(|_| Ok(NodeProtocol::new()))? .with_finality_proof_provider(|client, backend| Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, client)) as _) )? diff --git a/bin/sassafras-template/runtime/src/lib.rs b/bin/sassafras-template/runtime/src/lib.rs index 97990c655830e..85ed233057c46 100644 --- a/bin/sassafras-template/runtime/src/lib.rs +++ b/bin/sassafras-template/runtime/src/lib.rs @@ -15,7 +15,7 @@ use sp_runtime::{ impl_opaque_keys, MultiSignature }; use sp_runtime::traits::{ - BlakeTwo256, Block as BlockT, StaticLookup, Verify, ConvertInto, IdentifyAccount + BlakeTwo256, Block as BlockT, IdentityLookup, Verify, ConvertInto, IdentifyAccount }; use sp_api::impl_runtime_apis; use grandpa::AuthorityList as GrandpaAuthorityList; @@ -128,7 +128,7 @@ impl system::Trait for Runtime { /// The aggregated dispatch type that is available for extrinsics. type Call = Call; /// The lookup mechanism to get account ID from whatever is passed in dispatchers. - type Lookup = Indices; + type Lookup = IdentityLookup; /// The index type for storing how many extrinsics an account has signed. type Index = Index; /// The index type for blocks. @@ -157,6 +157,12 @@ impl system::Trait for Runtime { /// /// This type is being generated by `construct_runtime!`. type ModuleToIndex = ModuleToIndex; + /// What to do if a new account is created. + type OnNewAccount = (); + /// What to do if an account is fully reaped from the system. + type OnKilledAccount = (); + /// The data to be stored in an account. + type AccountData = balances::AccountData; } parameter_types! { @@ -174,18 +180,6 @@ impl grandpa::Trait for Runtime { type Event = Event; } -impl indices::Trait for Runtime { - /// The type for recording indexing into the account enumeration. If this ever overflows, there - /// will be problems! - type AccountIndex = AccountIndex; - /// Use the standard means of resolving an index hint from an id. - type ResolveHint = indices::SimpleResolveHint; - /// Determine whether an account is dead. - type IsDeadAccount = Balances; - /// The ubiquitous event type. - type Event = Event; -} - parameter_types! { pub const MinimumPeriod: u64 = SLOT_DURATION / 2; } @@ -205,16 +199,11 @@ parameter_types! { impl balances::Trait for Runtime { /// The type for recording an account's balance. type Balance = Balance; - /// What to do if an account is fully reaped from the system. - type OnReapAccount = System; - /// What to do if a new account is created. - type OnNewAccount = Indices; /// The ubiquitous event type. type Event = Event; type DustRemoval = (); - type TransferPayment = (); type ExistentialDeposit = ExistentialDeposit; - type CreationFee = CreationFee; + type AccountStore = System; } parameter_types! { @@ -233,7 +222,7 @@ impl transaction_payment::Trait for Runtime { impl sudo::Trait for Runtime { type Event = Event; - type Proposal = Call; + type Call = Call; } construct_runtime!( @@ -242,20 +231,19 @@ construct_runtime!( NodeBlock = opaque::Block, UncheckedExtrinsic = UncheckedExtrinsic { - System: system::{Module, Call, Storage, Config, Event}, + System: system::{Module, Call, Storage, Config, Event}, + RandomnessCollectiveFlip: randomness_collective_flip::{Module, Call, Storage}, Timestamp: timestamp::{Module, Call, Storage, Inherent}, Sassafras: sassafras::{Module, Call, Storage, Config, Inherent(Timestamp)}, Grandpa: grandpa::{Module, Call, Storage, Config, Event}, - Indices: indices, - Balances: balances, + Balances: balances::{Module, Call, Storage, Config, Event}, TransactionPayment: transaction_payment::{Module, Storage}, - Sudo: sudo, - RandomnessCollectiveFlip: randomness_collective_flip::{Module, Call, Storage}, + Sudo: sudo::{Module, Call, Config, Storage, Event}, } ); /// The address format for describing accounts. -pub type Address = ::Source; +pub type Address = AccountId; /// Block header type as expected by this runtime. pub type Header = generic::Header; /// Block type as expected by this runtime. @@ -306,6 +294,10 @@ impl_runtime_apis! { Executive::apply_extrinsic(extrinsic) } + fn apply_trusted_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { + Executive::apply_trusted_extrinsic(extrinsic) + } + fn finalize_block() -> ::Header { Executive::finalize_block() } diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml index 6aaf384befc1d..9909c73cdc03d 100644 --- a/client/consensus/sassafras/Cargo.toml +++ b/client/consensus/sassafras/Cargo.toml @@ -29,6 +29,8 @@ sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockch sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } sc-consensus-uncles = { version = "0.8.0-alpha.2", path = "../uncles" } sc-consensus-slots = { version = "0.8.0-alpha.2", path = "../slots" } +sc-network = { version = "0.8.0-alpha.2", path = "../../network" } +sc-network-gossip = { version = "0.8.0-alpha.2", path = "../../network-gossip" } sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } fork-tree = { version = "2.0.0-alpha.2", path = "../../../utils/fork-tree" } futures = "0.3.1" diff --git a/client/consensus/sassafras/src/communication/mod.rs b/client/consensus/sassafras/src/communication/mod.rs new file mode 100644 index 0000000000000..ded1e8592550c --- /dev/null +++ b/client/consensus/sassafras/src/communication/mod.rs @@ -0,0 +1,54 @@ +use std::{marker::PhantomData, sync::Arc}; +use sp_runtime::traits::Block as BlockT; +use sc_network::PeerId; +use sc_network_gossip::{ + Validator as ValidatorT, ValidatorContext, GossipEngine, Network as GossipNetwork, + ValidationResult, +}; + +pub use sp_consensus_sassafras::SASSAFRAS_ENGINE_ID; +pub const SASSAFRAS_PROTOCOL_NAME: &[u8] = b"/paritytech/sassafras/1"; + +pub struct GossipValidator { + _marker: PhantomData, +} + +impl ValidatorT for GossipValidator { + fn validate( + &self, + context: &mut dyn ValidatorContext, + sender: &PeerId, + data: &[u8] + ) -> ValidationResult { + unimplemented!() + } +} + +pub struct NetworkBridge { + service: N, + gossip_engine: GossipEngine, + validator: Arc>, +} + +impl NetworkBridge where + N: GossipNetwork + Clone + Send + 'static, +{ + pub fn new(service: N) -> Self { + let validator = Arc::new(GossipValidator { + _marker: PhantomData, + }); + + let gossip_engine = GossipEngine::new( + service.clone(), + SASSAFRAS_ENGINE_ID, + SASSAFRAS_PROTOCOL_NAME, + validator.clone(), + ); + + Self { + service, + gossip_engine, + validator, + } + } +} diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 7b24833429fc3..8f9a7fb0fd4bf 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -81,6 +81,7 @@ mod aux_schema; mod verification; mod authorship; mod utils; +mod communication; /// Set that are generating. #[derive(Debug, Clone, Encode, Decode)] @@ -570,27 +571,15 @@ impl sc_consensus_slots::SimpleSlotWorker for Sassafra let signature = pair.sign(header_hash.as_ref()); let digest_item = as CompatibleDigestItem>::sassafras_seal(signature); - BlockImportParams { - origin: BlockOrigin::Own, - header, - justification: None, - post_digests: vec![digest_item], - body: Some(body), - storage_changes: Some(storage_changes), - finalized: false, - auxiliary: Vec::new(), // block-weight is written in block import. - intermediates: { - let mut intermediates = HashMap::new(); - intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(SassafrasIntermediate:: { epoch_descriptor }) as Box, - ); - intermediates - }, - fork_choice: None, - allow_missing_state: false, - import_existing: false, - } + let mut params = BlockImportParams::new(BlockOrigin::Own, header); + params.post_digests.push(digest_item); + params.body = Some(body); + params.storage_changes = Some(storage_changes); + params.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(SassafrasIntermediate:: { epoch_descriptor }) as Box, + ); + params }) } @@ -964,29 +953,17 @@ impl Verifier for SassafrasVerifier ?pre_header); - let mut intermediates = HashMap::new(); - intermediates.insert( + let mut block_import_params = BlockImportParams::new(origin, pre_header); + block_import_params.post_digests.push(verified_info.seal); + block_import_params.body = body; + block_import_params.justification = justification; + block_import_params.intermediates.insert( Cow::from(INTERMEDIATE_KEY), Box::new(SassafrasIntermediate:: { epoch_descriptor, }) as Box, ); - let block_import_params = BlockImportParams { - origin, - header: pre_header, - post_digests: vec![verified_info.seal], - body, - storage_changes: None, - finalized: false, - justification, - auxiliary: Vec::new(), - intermediates, - fork_choice: None, - allow_missing_state: false, - import_existing: false, - }; - Ok((block_import_params, Default::default())) } CheckedHeader::Deferred(a, b) => { @@ -1084,7 +1061,7 @@ impl BlockImport for SassafrasBlockImport, new_cache: HashMap>, ) -> Result { - let hash = block.post_header().hash(); + let hash = block.post_hash(); let number = block.header.number().clone(); // early exit if block already in chain, otherwise the check for diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 18f384aef5634..a25cc6a9ab6d6 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -23,10 +23,10 @@ pub use pallet_timestamp; use sp_std::{result, prelude::*}; -use frame_support::{decl_storage, decl_module, traits::FindAuthor, traits::Get}; +use frame_support::{decl_storage, decl_module, traits::{FindAuthor, Get, Randomness as RandomnessT}}; use sp_timestamp::OnTimestampSet; -use sp_runtime::{generic::DigestItem, ConsensusEngineId, Perbill}; -use sp_runtime::traits::{IsMember, SaturatedConversion, Saturating, RandomnessBeacon}; +use sp_runtime::{generic::DigestItem, ConsensusEngineId, Perbill, PerThing}; +use sp_runtime::traits::{IsMember, SaturatedConversion, Saturating, Hash}; use sp_staking::{ SessionIndex, offence::{Offence, Kind}, @@ -193,9 +193,13 @@ decl_module! { } } -impl RandomnessBeacon for Module { - fn random() -> [u8; VRF_OUTPUT_LENGTH] { - Self::randomness() +impl RandomnessT<::Hash> for Module { + fn random(subject: &[u8]) -> T::Hash { + let mut subject = subject.to_vec(); + subject.reserve(VRF_OUTPUT_LENGTH); + subject.extend_from_slice(&Self::randomness()[..]); + + ::Hashing::hash(&subject[..]) } } From a026e82f6cc5ae007d64e7a0d2dfe0f9df5e6ab3 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Wed, 4 Mar 2020 03:00:05 +0100 Subject: [PATCH 55/75] Integrate the communication mod to sassafras template --- bin/sassafras-template/node/src/service.rs | 1 + .../sassafras/src/communication/mod.rs | 11 +++++- client/consensus/sassafras/src/lib.rs | 34 ++++++++++++------- 3 files changed, 33 insertions(+), 13 deletions(-) diff --git a/bin/sassafras-template/node/src/service.rs b/bin/sassafras-template/node/src/service.rs index fbebab665ced9..e31b40f10307e 100644 --- a/bin/sassafras-template/node/src/service.rs +++ b/bin/sassafras-template/node/src/service.rs @@ -122,6 +122,7 @@ pub fn new_full(config: Configuration) force_authoring, sassafras_link, can_author_with, + network: service.network(), }; let sassafras = sc_consensus_sassafras::start_sassafras(sassafras_config)?; diff --git a/client/consensus/sassafras/src/communication/mod.rs b/client/consensus/sassafras/src/communication/mod.rs index ded1e8592550c..b2232c79e025c 100644 --- a/client/consensus/sassafras/src/communication/mod.rs +++ b/client/consensus/sassafras/src/communication/mod.rs @@ -1,4 +1,5 @@ -use std::{marker::PhantomData, sync::Arc}; +use std::{marker::PhantomData, sync::Arc, pin::Pin, task::{Poll, Context}}; +use futures::prelude::*; use sp_runtime::traits::Block as BlockT; use sc_network::PeerId; use sc_network_gossip::{ @@ -52,3 +53,11 @@ impl NetworkBridge where } } } + +impl Future for NetworkBridge { + type Output = (); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + self.gossip_engine.poll_unpin(cx) + } +} diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 8f9a7fb0fd4bf..2cd07ea0095cc 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -359,7 +359,7 @@ impl std::ops::Deref for Config { } /// Parameters for Sassafras. -pub struct SassafrasParams { +pub struct SassafrasParams { /// The keystore that manages the keys of the node. pub keystore: KeyStorePtr, @@ -391,10 +391,13 @@ pub struct SassafrasParams { /// Checks if the current native implementation can author with a runtime at a given block. pub can_author_with: CAW, + + /// The network instance. + pub network: N, } /// Start the sassafras worker. The returned future should be run in a tokio runtime. -pub fn start_sassafras(SassafrasParams { +pub fn start_sassafras(SassafrasParams { keystore, client, select_chain, @@ -405,7 +408,8 @@ pub fn start_sassafras(SassafrasParams { force_authoring, sassafras_link, can_author_with, -}: SassafrasParams) -> Result< + network, +}: SassafrasParams) -> Result< impl futures::Future, sp_consensus::Error, > where @@ -421,6 +425,7 @@ pub fn start_sassafras(SassafrasParams { Error: std::error::Error + Send + From + From + 'static, SO: SyncOracle + Send + Sync + Clone, CAW: CanAuthorWith + Send, + N: sc_network_gossip::Network + Clone + Send + Unpin + 'static, { let config = sassafras_link.config; let worker = SassafrasWorker { @@ -441,16 +446,21 @@ pub fn start_sassafras(SassafrasParams { &inherent_data_providers, )?; + let network = communication::NetworkBridge::new(network); + info!(target: "sassafras", "Starting Sassafras authorship worker"); - Ok(sc_consensus_slots::start_slot_worker( - config.0, - select_chain, - worker, - sync_oracle, - inherent_data_providers, - sassafras_link.time_source, - can_author_with, - )) + Ok(future::select( + sc_consensus_slots::start_slot_worker( + config.0, + select_chain, + worker, + sync_oracle, + inherent_data_providers, + sassafras_link.time_source, + can_author_with, + ), + network + ).map(drop)) } struct SassafrasWorker { From eb3c30b9b98a9e0facc31a2f1ca9469fc2f7561e Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Wed, 4 Mar 2020 18:20:50 +0100 Subject: [PATCH 56/75] Change PendingProof from tuple to struct for clarity --- client/consensus/sassafras/src/authorship.rs | 34 +++++++++++--------- client/consensus/sassafras/src/lib.rs | 23 ++++++++++--- 2 files changed, 37 insertions(+), 20 deletions(-) diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index ae4741ab482cc..a1aad11789f51 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -32,7 +32,7 @@ use sp_consensus_sassafras::{ use sc_consensus_epochs::ViableEpochDescriptor; use sc_keystore::KeyStorePtr; use log::trace; -use super::{Epoch, GeneratingSet}; +use super::{Epoch, GeneratingSet, PendingProof}; /// Calculates the primary selection threshold for a given authority, taking /// into account `c` (`1 - c` represents the probability of a slot being empty). @@ -103,9 +103,10 @@ fn claim_primary_slot( let ticket_vrf_index = epoch.validating.proofs.iter().position(|(s, _)| *s == slot_number)? as u32; let ticket_vrf_proof = epoch.validating.proofs[ticket_vrf_index as usize].clone().1; let pending_index = epoch.validating.pending.iter() - .position(|(_, _, _, p)| *p == ticket_vrf_proof)?; - let (ticket_vrf_attempt, authority_index, ticket_vrf_output, _) = - epoch.validating.pending[pending_index].clone(); + .position(|p| p.vrf_proof == ticket_vrf_proof)?; + let ticket_vrf_attempt = epoch.validating.pending[pending_index].attempt; + let authority_index = epoch.validating.pending[pending_index].authority_index; + let ticket_vrf_output = epoch.validating.pending[pending_index].vrf_output.clone(); let keystore = keystore.read(); let pair = keystore.key_pair::( @@ -121,11 +122,11 @@ fn claim_primary_slot( let post_vrf_proof = VRFProof(post_vrf_proof); let mut commitments = Vec::new(); - for (_, _, _, proof) in &epoch.publishing.pending { + for pending_proof in &epoch.publishing.pending { if commitments.len() < MAX_PRE_DIGEST_COMMITMENTS && - epoch.publishing.proofs.iter().position(|p| p == proof).is_none() + epoch.publishing.proofs.iter().position(|p| *p == pending_proof.vrf_proof).is_none() { - commitments.push(proof.clone()); + commitments.push(pending_proof.vrf_proof.clone()); } } trace!(target: "sassafras", "Appending commitment length: {}", commitments.len()); @@ -179,12 +180,12 @@ impl GeneratingSet { check_primary_threshold(inout, threshold) }) { - self.pending.push(( + self.pending.push(PendingProof { attempt, - authority_index as u32, - VRFOutput(inout.to_output()), - VRFProof(proof) - )); + authority_index: authority_index as u32, + vrf_output: VRFOutput(inout.to_output()), + vrf_proof: VRFProof(proof) + }); } } @@ -225,11 +226,12 @@ fn claim_secondary_slot( { if pair.public() == *expected_author { let mut commitments = Vec::new(); - for (_, _, _, proof) in &epoch.publishing.pending { - if commitments.len() < MAX_PRE_DIGEST_COMMITMENTS && - epoch.publishing.proofs.iter().position(|p| p == proof).is_none() + for pending_proof in &epoch.publishing.pending { + if commitments.len() < MAX_PRE_DIGEST_COMMITMENTS && epoch.publishing.proofs.iter() + .position(|p| *p == pending_proof.vrf_proof) + .is_none() { - commitments.push(proof.clone()); + commitments.push(pending_proof.vrf_proof.clone()); } } trace!(target: "sassafras", "Appending commitment length: {}", commitments.len()); diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 2cd07ea0095cc..b82fc925b577d 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -83,6 +83,19 @@ mod authorship; mod utils; mod communication; +/// Information about a local pending proof. +#[derive(Debug, Clone, Encode, Decode)] +pub struct PendingProof { + /// Attempt integer number. + pub attempt: u64, + /// Validator index. + pub authority_index: u32, + /// VRF output. + pub vrf_output: VRFOutput, + /// VRF proof. + pub vrf_proof: VRFProof, +} + /// Set that are generating. #[derive(Debug, Clone, Encode, Decode)] pub struct GeneratingSet { @@ -93,7 +106,7 @@ pub struct GeneratingSet { /// Randomness for this epoch. pub randomness: Randomness, /// Local pending proofs collected. - pub pending: Vec<(u64, u32, VRFOutput, VRFProof)>, + pub pending: Vec, } impl GeneratingSet { @@ -120,7 +133,7 @@ pub struct PublishingSet { /// Proofs of all VRFs collected. pub proofs: Vec, /// Local pending proofs collected. - pub pending: Vec<(u64, u32, VRFOutput, VRFProof)>, + pub pending: Vec, } impl PublishingSet { @@ -151,7 +164,7 @@ pub struct ValidatingSet { /// Proofs as ordered by slot numbers. pub proofs: Vec<(SlotNumber, VRFProof)>, /// Pending local proofs. - pub pending: Vec<(u64, u32, VRFOutput, VRFProof)>, + pub pending: Vec, } impl ValidatingSet { @@ -472,6 +485,8 @@ struct SassafrasWorker { keystore: KeyStorePtr, epoch_changes: SharedEpochChanges, config: Config, + // local_out_proofs: UnboundedSender, + // remote_in_proofs: UnboundedReceiver, } impl sc_consensus_slots::SimpleSlotWorker for SassafrasWorker where @@ -481,7 +496,7 @@ impl sc_consensus_slots::SimpleSlotWorker for Sassafra HeaderBackend + HeaderMetadata, C::Api: SassafrasApi, - E: Environment, + E: Environment + Send, E::Proposer: Proposer>, I: BlockImport> + Send + Sync + 'static, SO: SyncOracle + Send + Clone, From 0e8f7598938a76e978f25f4fef6a38cdc0c8a47f Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Thu, 5 Mar 2020 01:12:20 +0100 Subject: [PATCH 57/75] Function for finding out which peer it should send the pending proof to --- client/consensus/sassafras/src/authorship.rs | 11 ++++---- client/consensus/sassafras/src/lib.rs | 27 +++++++++++++++++++- 2 files changed, 32 insertions(+), 6 deletions(-) diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index a1aad11789f51..9d7db1d63d959 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -180,12 +180,13 @@ impl GeneratingSet { check_primary_threshold(inout, threshold) }) { - self.pending.push(PendingProof { + self.pending.push(PendingProof::new( attempt, - authority_index: authority_index as u32, - vrf_output: VRFOutput(inout.to_output()), - vrf_proof: VRFProof(proof) - }); + authority_index as u32, + self.authorities.len() as u32, + VRFOutput(inout.to_output()), + VRFProof(proof) + )); } } diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index b82fc925b577d..c8ec4758b1ee7 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -42,7 +42,7 @@ use sp_runtime::{ use sp_api::{ProvideRuntimeApi, NumberFor}; use sc_keystore::KeyStorePtr; use parking_lot::Mutex; -use sp_core::{U512, Pair}; +use sp_core::{U512, U256, Pair, blake2_256}; use sp_inherents::{InherentDataProviders, InherentData}; use sc_telemetry::{telemetry, CONSENSUS_TRACE, CONSENSUS_DEBUG}; use sp_consensus::{ @@ -94,6 +94,31 @@ pub struct PendingProof { pub vrf_output: VRFOutput, /// VRF proof. pub vrf_proof: VRFProof, + /// Whether the proof has been submitted. + pub submit_status: Option, + /// Paired authority index for submission. + pub submit_authority_index: u32, +} + +impl PendingProof { + /// Create a new pending proof. + pub fn new( + attempt: u64, + authority_index: u32, + authority_len: u32, + vrf_output: VRFOutput, + vrf_proof: VRFProof + ) -> Self { + let h = U256::from(blake2_256( + &(&attempt, &authority_index, &vrf_output, &vrf_proof).encode() + )); + let submit_authority_index = (h % U256::from(authority_len)).as_u32(); + + Self { + attempt, authority_index, vrf_output, vrf_proof, + submit_status: None, submit_authority_index, + } + } } /// Set that are generating. From ae58b47f617de70527955244ef9de65432426149 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Sun, 8 Mar 2020 03:58:42 +0100 Subject: [PATCH 58/75] Channels for sending proofs to and from networking Currently they're hard-wired back, but later will be integrated with networking. --- client/consensus/sassafras/src/lib.rs | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index c8ec4758b1ee7..c3ec57786c9ff 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -61,7 +61,7 @@ use sc_client::Client; use sp_block_builder::BlockBuilder as BlockBuilderApi; -use futures::prelude::*; +use futures::{prelude::*, channel::mpsc::{self, UnboundedSender, UnboundedReceiver}}; use log::{warn, debug, info, trace}; use sc_consensus_slots::{ SlotWorker, SlotInfo, SlotCompatible, StorageChanges, CheckedHeader, check_equivocation, @@ -157,6 +157,8 @@ pub struct PublishingSet { pub randomness: Randomness, /// Proofs of all VRFs collected. pub proofs: Vec, + /// Disclosing proofs. + pub disclosing: Vec, /// Local pending proofs collected. pub pending: Vec, } @@ -239,6 +241,7 @@ impl EpochT for Epoch { randomness: self.generating.randomness, proofs: Vec::new(), pending: self.generating.pending.clone(), + disclosing: Vec::new(), }, validating: ValidatingSet { start_slot, @@ -374,6 +377,7 @@ impl Config { authorities: self.genesis_authorities.clone(), randomness: self.randomness.clone(), pending: Vec::new(), + disclosing: Vec::new(), }, validating: ValidatingSet { start_slot: slot_number, @@ -466,6 +470,7 @@ pub fn start_sassafras(SassafrasParams { N: sc_network_gossip::Network + Clone + Send + Unpin + 'static, { let config = sassafras_link.config; + let (local_out_proofs, remote_in_proofs) = mpsc::unbounded(); let worker = SassafrasWorker { client: client.clone(), block_import: Arc::new(Mutex::new(block_import)), @@ -475,6 +480,7 @@ pub fn start_sassafras(SassafrasParams { keystore, epoch_changes: sassafras_link.epoch_changes.clone(), config: config.clone(), + local_out_proofs, remote_in_proofs, }; register_sassafras_inherent_data_provider(&inherent_data_providers, config.slot_duration())?; @@ -510,8 +516,8 @@ struct SassafrasWorker { keystore: KeyStorePtr, epoch_changes: SharedEpochChanges, config: Config, - // local_out_proofs: UnboundedSender, - // remote_in_proofs: UnboundedReceiver, + local_out_proofs: UnboundedSender, + remote_in_proofs: UnboundedReceiver, } impl sc_consensus_slots::SimpleSlotWorker for SassafrasWorker where From 325706651fe5bafee7d7ee9d9b7323519e24c33d Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Sun, 8 Mar 2020 09:26:44 +0100 Subject: [PATCH 59/75] Split communciation to network and utils, implement send_out --- .../sassafras/src/communication/mod.rs | 82 ++++++------------- .../sassafras/src/communication/network.rs | 63 ++++++++++++++ client/consensus/sassafras/src/lib.rs | 6 ++ 3 files changed, 95 insertions(+), 56 deletions(-) create mode 100644 client/consensus/sassafras/src/communication/network.rs diff --git a/client/consensus/sassafras/src/communication/mod.rs b/client/consensus/sassafras/src/communication/mod.rs index b2232c79e025c..81d6e86c86a30 100644 --- a/client/consensus/sassafras/src/communication/mod.rs +++ b/client/consensus/sassafras/src/communication/mod.rs @@ -1,63 +1,33 @@ -use std::{marker::PhantomData, sync::Arc, pin::Pin, task::{Poll, Context}}; -use futures::prelude::*; -use sp_runtime::traits::Block as BlockT; -use sc_network::PeerId; -use sc_network_gossip::{ - Validator as ValidatorT, ValidatorContext, GossipEngine, Network as GossipNetwork, - ValidationResult, -}; - -pub use sp_consensus_sassafras::SASSAFRAS_ENGINE_ID; -pub const SASSAFRAS_PROTOCOL_NAME: &[u8] = b"/paritytech/sassafras/1"; - -pub struct GossipValidator { - _marker: PhantomData, -} - -impl ValidatorT for GossipValidator { - fn validate( - &self, - context: &mut dyn ValidatorContext, - sender: &PeerId, - data: &[u8] - ) -> ValidationResult { - unimplemented!() - } -} +mod network; -pub struct NetworkBridge { - service: N, - gossip_engine: GossipEngine, - validator: Arc>, -} - -impl NetworkBridge where - N: GossipNetwork + Clone + Send + 'static, -{ - pub fn new(service: N) -> Self { - let validator = Arc::new(GossipValidator { - _marker: PhantomData, - }); +use futures::channel::mpsc::UnboundedSender; +use sp_consensus_sassafras::{SlotNumber, VRFProof}; +use crate::PublishingSet; - let gossip_engine = GossipEngine::new( - service.clone(), - SASSAFRAS_ENGINE_ID, - SASSAFRAS_PROTOCOL_NAME, - validator.clone(), - ); +pub use self::network::{ + SASSAFRAS_ENGINE_ID, SASSAFRAS_PROTOCOL_NAME, GossipValidator, NetworkBridge, +}; - Self { - service, - gossip_engine, - validator, +pub fn send_out( + sender: &UnboundedSender, + slot_number: SlotNumber, + set: &mut PublishingSet +) { + const SEND_OUT_LIMIT: usize = 4; + + let mut sent = 0; + for pending in &mut set.pending { + if sent >= SEND_OUT_LIMIT { + return } - } -} - -impl Future for NetworkBridge { - type Output = (); - fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { - self.gossip_engine.poll_unpin(cx) + if pending.submit_status.is_none() { + match sender.unbounded_send(pending.vrf_proof.clone()) { + Ok(()) => { + pending.submit_status = Some(slot_number); + }, + Err(_) => return, + } + } } } diff --git a/client/consensus/sassafras/src/communication/network.rs b/client/consensus/sassafras/src/communication/network.rs new file mode 100644 index 0000000000000..b2232c79e025c --- /dev/null +++ b/client/consensus/sassafras/src/communication/network.rs @@ -0,0 +1,63 @@ +use std::{marker::PhantomData, sync::Arc, pin::Pin, task::{Poll, Context}}; +use futures::prelude::*; +use sp_runtime::traits::Block as BlockT; +use sc_network::PeerId; +use sc_network_gossip::{ + Validator as ValidatorT, ValidatorContext, GossipEngine, Network as GossipNetwork, + ValidationResult, +}; + +pub use sp_consensus_sassafras::SASSAFRAS_ENGINE_ID; +pub const SASSAFRAS_PROTOCOL_NAME: &[u8] = b"/paritytech/sassafras/1"; + +pub struct GossipValidator { + _marker: PhantomData, +} + +impl ValidatorT for GossipValidator { + fn validate( + &self, + context: &mut dyn ValidatorContext, + sender: &PeerId, + data: &[u8] + ) -> ValidationResult { + unimplemented!() + } +} + +pub struct NetworkBridge { + service: N, + gossip_engine: GossipEngine, + validator: Arc>, +} + +impl NetworkBridge where + N: GossipNetwork + Clone + Send + 'static, +{ + pub fn new(service: N) -> Self { + let validator = Arc::new(GossipValidator { + _marker: PhantomData, + }); + + let gossip_engine = GossipEngine::new( + service.clone(), + SASSAFRAS_ENGINE_ID, + SASSAFRAS_PROTOCOL_NAME, + validator.clone(), + ); + + Self { + service, + gossip_engine, + validator, + } + } +} + +impl Future for NetworkBridge { + type Output = (); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + self.gossip_engine.poll_unpin(cx) + } +} diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index c3ec57786c9ff..87ffd92bccb6d 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -589,6 +589,12 @@ impl sc_consensus_slots::SimpleSlotWorker for Sassafra viable_epoch.as_mut().generating.append_to_pending(&self.keystore); } + crate::communication::send_out( + &self.local_out_proofs, + slot_number, + &mut viable_epoch.as_mut().publishing + ); + let s = authorship::claim_slot( slot_number, viable_epoch.as_ref(), From 29e4003089170346df0aa95d08f9a6303cb078f6 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Sun, 8 Mar 2020 09:35:11 +0100 Subject: [PATCH 60/75] Fetch proofs for inclusion from disclosing, rather than local pending --- client/consensus/sassafras/src/authorship.rs | 12 ++++++------ client/consensus/sassafras/src/communication/mod.rs | 11 ++++++++++- client/consensus/sassafras/src/lib.rs | 6 +++++- client/consensus/slots/src/lib.rs | 2 +- 4 files changed, 22 insertions(+), 9 deletions(-) diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 9d7db1d63d959..de3d5b67853f0 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -122,11 +122,11 @@ fn claim_primary_slot( let post_vrf_proof = VRFProof(post_vrf_proof); let mut commitments = Vec::new(); - for pending_proof in &epoch.publishing.pending { + for disclosing in &epoch.publishing.disclosing { if commitments.len() < MAX_PRE_DIGEST_COMMITMENTS && - epoch.publishing.proofs.iter().position(|p| *p == pending_proof.vrf_proof).is_none() + epoch.publishing.proofs.iter().position(|p| p == disclosing).is_none() { - commitments.push(pending_proof.vrf_proof.clone()); + commitments.push(disclosing.clone()); } } trace!(target: "sassafras", "Appending commitment length: {}", commitments.len()); @@ -227,12 +227,12 @@ fn claim_secondary_slot( { if pair.public() == *expected_author { let mut commitments = Vec::new(); - for pending_proof in &epoch.publishing.pending { + for disclosing in &epoch.publishing.disclosing { if commitments.len() < MAX_PRE_DIGEST_COMMITMENTS && epoch.publishing.proofs.iter() - .position(|p| *p == pending_proof.vrf_proof) + .position(|p| p == disclosing) .is_none() { - commitments.push(pending_proof.vrf_proof.clone()); + commitments.push(disclosing.clone()); } } trace!(target: "sassafras", "Appending commitment length: {}", commitments.len()); diff --git a/client/consensus/sassafras/src/communication/mod.rs b/client/consensus/sassafras/src/communication/mod.rs index 81d6e86c86a30..9906f4cb6f775 100644 --- a/client/consensus/sassafras/src/communication/mod.rs +++ b/client/consensus/sassafras/src/communication/mod.rs @@ -1,6 +1,6 @@ mod network; -use futures::channel::mpsc::UnboundedSender; +use futures::channel::mpsc::{UnboundedSender, UnboundedReceiver}; use sp_consensus_sassafras::{SlotNumber, VRFProof}; use crate::PublishingSet; @@ -31,3 +31,12 @@ pub fn send_out( } } } + +pub fn receive_in( + receiver: &mut UnboundedReceiver, + set: &mut PublishingSet, +) { + while let Ok(Some(proof)) = receiver.try_next() { + set.disclosing.push(proof); + } +} diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 87ffd92bccb6d..7e5c34000a272 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -572,7 +572,7 @@ impl sc_consensus_slots::SimpleSlotWorker for Sassafra } fn claim_slot( - &self, + &mut self, _parent_header: &B::Header, slot_number: SlotNumber, epoch_descriptor: &ViableEpochDescriptor, Epoch>, @@ -594,6 +594,10 @@ impl sc_consensus_slots::SimpleSlotWorker for Sassafra slot_number, &mut viable_epoch.as_mut().publishing ); + crate::communication::receive_in( + &mut self.remote_in_proofs, + &mut viable_epoch.as_mut().publishing + ); let s = authorship::claim_slot( slot_number, diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index c3ef56651d94d..1c637643a6c1c 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -99,7 +99,7 @@ pub trait SimpleSlotWorker { /// Tries to claim the given slot, returning an object with claim data if successful. fn claim_slot( - &self, + &mut self, header: &B::Header, slot_number: u64, epoch_data: &Self::EpochData, From 8478c91f4ea5da041c18f9da7bf95fa04cd638f8 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Sun, 8 Mar 2020 11:41:01 +0100 Subject: [PATCH 61/75] Update schnorrkel/merlin/bip39 versions so that it has aead support --- Cargo.lock | 67 ++++++++--------------- Cargo.toml | 3 + bin/utils/subkey/Cargo.toml | 2 +- client/consensus/babe/Cargo.toml | 4 +- client/consensus/sassafras/Cargo.toml | 4 +- primitives/consensus/babe/Cargo.toml | 2 +- primitives/consensus/sassafras/Cargo.toml | 2 +- primitives/core/Cargo.toml | 4 +- 8 files changed, 36 insertions(+), 52 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ea14e5fa18498..f312b88b6303f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1018,19 +1018,6 @@ dependencies = [ "rand 0.3.23", ] -[[package]] -name = "curve25519-dalek" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b7dcd30ba50cdf88b55b033456138b7c0ac4afdc436d82e1b79f370f24cc66d" -dependencies = [ - "byteorder 1.3.4", - "clear_on_drop", - "digest", - "rand_core 0.3.1", - "subtle 2.2.2", -] - [[package]] name = "curve25519-dalek" version = "2.0.0" @@ -1041,7 +1028,7 @@ dependencies = [ "digest", "rand_core 0.5.1", "subtle 2.2.2", - "zeroize 1.1.0", + "zeroize", ] [[package]] @@ -1121,7 +1108,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978710b352437433c97b2bff193f2fb1dfd58a093f863dd95e225a19baa599a2" dependencies = [ "clear_on_drop", - "curve25519-dalek 2.0.0", + "curve25519-dalek", "rand 0.7.3", "sha2", ] @@ -2668,7 +2655,7 @@ dependencies = [ "thiserror", "unsigned-varint 0.3.1", "void", - "zeroize 1.1.0", + "zeroize", ] [[package]] @@ -2832,7 +2819,7 @@ version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15a8a3d71f898beb6f854c8aae27aa1d198e0d1f2e49412261c2d90ef39675a" dependencies = [ - "curve25519-dalek 2.0.0", + "curve25519-dalek", "futures 0.3.4", "lazy_static", "libp2p-core", @@ -2844,7 +2831,7 @@ dependencies = [ "snow", "static_assertions", "x25519-dalek", - "zeroize 1.1.0", + "zeroize", ] [[package]] @@ -3160,14 +3147,14 @@ checksum = "71d96e3f3c0b6325d8ccd83c33b28acb183edcb6c67938ba104ec546854b0882" [[package]] name = "merlin" -version = "1.3.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b0942b357c1b4d0dc43ba724674ec89c3218e6ca2b3e8269e7cb53bcecd2f6e" +checksum = "c6feca46f4fa3443a01769d768727f10c10a20fdb65e52dc16a81f0c8269bb78" dependencies = [ "byteorder 1.3.4", "keccak", - "rand_core 0.4.2", - "zeroize 1.1.0", + "rand_core 0.5.1", + "zeroize", ] [[package]] @@ -6312,7 +6299,7 @@ dependencies = [ "unsigned-varint 0.3.1", "void", "wasm-timer", - "zeroize 1.1.0", + "zeroize", ] [[package]] @@ -6662,19 +6649,19 @@ dependencies = [ [[package]] name = "schnorrkel" -version = "0.8.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eacd8381b3c37840c9c9f40472af529e49975bdcbc24f83c31059fd6539023d3" +version = "0.9.0" +source = "git+https://github.com/w3f/schnorrkel#5896b38680c2c8344734c263314b54e3e01a47f0" dependencies = [ - "curve25519-dalek 1.2.3", - "failure", + "arrayref", + "arrayvec 0.5.1", + "curve25519-dalek", + "getrandom", "merlin", - "rand 0.6.5", - "rand_core 0.4.2", - "rand_os", + "rand 0.7.3", + "rand_core 0.5.1", "sha2", "subtle 2.2.2", - "zeroize 0.9.3", + "zeroize", ] [[package]] @@ -7243,7 +7230,7 @@ dependencies = [ "tiny-keccak 2.0.1", "twox-hash", "wasmi", - "zeroize 1.1.0", + "zeroize", ] [[package]] @@ -7711,9 +7698,9 @@ dependencies = [ [[package]] name = "substrate-bip39" -version = "0.3.1" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be511be555a3633e71739a79e4ddff6a6aaa6579fa6114182a51d72c3eb93c5" +checksum = "a71f83e790f01b8f83129217dd044ae76fece0d39a5771aed3c6a67cedd27d91" dependencies = [ "hmac", "pbkdf2", @@ -9236,9 +9223,9 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "637ff90c9540fa3073bb577e65033069e4bae7c79d49d74aa3ffdf5342a53217" dependencies = [ - "curve25519-dalek 2.0.0", + "curve25519-dalek", "rand_core 0.5.1", - "zeroize 1.1.0", + "zeroize", ] [[package]] @@ -9262,12 +9249,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "zeroize" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45af6a010d13e4cf5b54c94ba5a2b2eba5596b9e46bf5875612d332a1f2b3f86" - [[package]] name = "zeroize" version = "1.1.0" diff --git a/Cargo.toml b/Cargo.toml index c0f111c025195..736eff110cb02 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -171,3 +171,6 @@ members = [ [profile.release] # Substrate runtime requires unwinding. panic = "unwind" + +[patch.crates-io] +schnorrkel = { git = "https://github.com/w3f/schnorrkel" } diff --git a/bin/utils/subkey/Cargo.toml b/bin/utils/subkey/Cargo.toml index b0777b1f700f6..2ad93f270a9d2 100644 --- a/bin/utils/subkey/Cargo.toml +++ b/bin/utils/subkey/Cargo.toml @@ -17,7 +17,7 @@ rand = "0.7.2" clap = "2.33.0" tiny-bip39 = "0.7" rustc-hex = "2.0.1" -substrate-bip39 = "0.3.1" +substrate-bip39 = "0.4.0" hex = "0.4.0" hex-literal = "0.2.1" codec = { package = "parity-scale-codec", version = "1.0.0" } diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index c6d61eb5ec41f..abeebf57ded66 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -40,9 +40,9 @@ futures = "0.3.1" futures-timer = "3.0.1" parking_lot = "0.10.0" log = "0.4.8" -schnorrkel = { version = "0.8.5", features = ["preaudit_deprecated"] } +schnorrkel = { version = "0.9.0", features = ["preaudit_deprecated"] } rand = "0.7.2" -merlin = "1.2.1" +merlin = "2.0.0" pdqselect = "0.1.0" derive_more = "0.99.2" diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml index 9909c73cdc03d..945f6b290b9f0 100644 --- a/client/consensus/sassafras/Cargo.toml +++ b/client/consensus/sassafras/Cargo.toml @@ -37,9 +37,9 @@ futures = "0.3.1" futures-timer = "3.0.1" parking_lot = "0.10.0" log = "0.4.8" -schnorrkel = { version = "0.8.5", features = ["preaudit_deprecated"] } +schnorrkel = { version = "0.9.0", features = ["preaudit_deprecated"] } rand = "0.7.2" -merlin = "1.2.1" +merlin = "2.0.0" pdqselect = "0.1.0" derive_more = "0.99.2" diff --git a/primitives/consensus/babe/Cargo.toml b/primitives/consensus/babe/Cargo.toml index 291958c100c0f..9b770604b920b 100644 --- a/primitives/consensus/babe/Cargo.toml +++ b/primitives/consensus/babe/Cargo.toml @@ -12,7 +12,7 @@ repository = "https://github.com/paritytech/substrate/" sp-application-crypto = { version = "2.0.0-alpha.2", default-features = false, path = "../../application-crypto" } codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false } sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../std" } -schnorrkel = { version = "0.8.5", features = ["preaudit_deprecated"], optional = true } +schnorrkel = { version = "0.9.0", features = ["preaudit_deprecated"], optional = true } sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../../api" } sp-consensus = { version = "0.8.0-alpha.2", optional = true, path = "../common" } sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../inherents" } diff --git a/primitives/consensus/sassafras/Cargo.toml b/primitives/consensus/sassafras/Cargo.toml index 9f7992357ed86..f2f8733d4ea7b 100644 --- a/primitives/consensus/sassafras/Cargo.toml +++ b/primitives/consensus/sassafras/Cargo.toml @@ -7,7 +7,7 @@ edition = "2018" [dependencies] codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false } -schnorrkel = { version = "0.8.5", features = ["preaudit_deprecated"], optional = true } +schnorrkel = { version = "0.9.0", features = ["preaudit_deprecated"], optional = true } sp-std = { version = "2.0.0-alpha.2", path = "../../std", default-features = false } sp-core = { version = "2.0.0-alpha.2", path = "../../core", default-features = false } sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../../api" } diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 8d69110ae6de7..4cfbe906fdc41 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -23,7 +23,7 @@ hash-db = { version = "0.15.2", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } base58 = { version = "0.1.0", optional = true } rand = { version = "0.7.2", optional = true } -substrate-bip39 = { version = "0.3.1", optional = true } +substrate-bip39 = { version = "0.4.0", optional = true } tiny-bip39 = { version = "0.7", optional = true } regex = { version = "1.3.1", optional = true } num-traits = { version = "0.2.8", default-features = false } @@ -40,7 +40,7 @@ parity-util-mem = { version = "0.5.1", default-features = false, features = ["pr ed25519-dalek = { version = "1.0.0-pre.3", default-features = false, features = ["u64_backend", "alloc"], optional = true } blake2-rfc = { version = "0.2.18", default-features = false, optional = true } tiny-keccak = { version = "2.0.1", features = ["keccak"], optional = true } -schnorrkel = { version = "0.8.5", features = ["preaudit_deprecated", "u64_backend"], default-features = false, optional = true } +schnorrkel = { version = "0.9.0", features = ["preaudit_deprecated", "u64_backend"], default-features = false, optional = true } sha2 = { version = "0.8.0", default-features = false, optional = true } hex = { version = "0.4", default-features = false, optional = true } twox-hash = { version = "1.5.0", default-features = false, optional = true } From 44db4f6f46dd7b3264bbdad04ea96d43fe43003f Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 9 Mar 2020 02:22:14 +0100 Subject: [PATCH 62/75] Implement gossip AEAD encrypt/decrypt --- Cargo.lock | 68 ++++++++++++++++++- client/consensus/sassafras/Cargo.toml | 4 +- client/consensus/sassafras/src/authorship.rs | 2 +- .../sassafras/src/communication/mod.rs | 58 ++++++++++++++-- client/consensus/sassafras/src/lib.rs | 9 +-- 5 files changed, 127 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f312b88b6303f..071557f5cc862 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -16,6 +16,26 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d2e7343e7fc9de883d1b0341e0b13970f764c14101234857d2ddafa1cb1cac2" +[[package]] +name = "aead" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cf01b9b56e767bb57b94ebf91a58b338002963785cdd7013e21c0d4679471e4" +dependencies = [ + "generic-array", +] + +[[package]] +name = "aes" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54eb1d8fe354e5fc611daf4f2ea97dd45a765f4f1e4512306ec183ae2e8f20c9" +dependencies = [ + "aes-soft", + "aesni", + "block-cipher-trait", +] + [[package]] name = "aes-ctr" version = "0.3.0" @@ -28,6 +48,20 @@ dependencies = [ "stream-cipher", ] +[[package]] +name = "aes-gcm" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cfe3c2bd0624e9488c8711bd7f3cc5689fac9427622a98694a15b7b58f54fa8" +dependencies = [ + "aead", + "aes", + "block-cipher-trait", + "ghash", + "subtle 2.2.2", + "zeroize", +] + [[package]] name = "aes-soft" version = "0.3.3" @@ -1844,6 +1878,15 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "ghash" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f0930ed19a7184089ea46d2fedead2f6dc2b674c5db4276b7da336c7cd83252" +dependencies = [ + "polyval", +] + [[package]] name = "gimli" version = "0.19.0" @@ -4839,6 +4882,16 @@ dependencies = [ "web-sys", ] +[[package]] +name = "polyval" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ec3341498978de3bfd12d1b22f1af1de22818f5473a11e8a6ef997989e3a212" +dependencies = [ + "cfg-if", + "universal-hash", +] + [[package]] name = "ppv-lite86" version = "0.2.6" @@ -6011,6 +6064,8 @@ dependencies = [ name = "sc-consensus-sassafras" version = "0.8.0" dependencies = [ + "aead", + "aes-gcm", "derive_more", "env_logger 0.7.1", "fork-tree", @@ -6650,8 +6705,9 @@ dependencies = [ [[package]] name = "schnorrkel" version = "0.9.0" -source = "git+https://github.com/w3f/schnorrkel#5896b38680c2c8344734c263314b54e3e01a47f0" +source = "git+https://github.com/w3f/schnorrkel#2d004cffa5dc3bf6dafeebee8d4d40b9f696462f" dependencies = [ + "aead", "arrayref", "arrayvec 0.5.1", "curve25519-dalek", @@ -8626,6 +8682,16 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" +[[package]] +name = "universal-hash" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df0c900f2f9b4116803415878ff48b63da9edb268668e08cf9292d7503114a01" +dependencies = [ + "generic-array", + "subtle 2.2.2", +] + [[package]] name = "unsigned-varint" version = "0.2.3" diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml index 945f6b290b9f0..5cece862c530f 100644 --- a/client/consensus/sassafras/Cargo.toml +++ b/client/consensus/sassafras/Cargo.toml @@ -37,11 +37,13 @@ futures = "0.3.1" futures-timer = "3.0.1" parking_lot = "0.10.0" log = "0.4.8" -schnorrkel = { version = "0.9.0", features = ["preaudit_deprecated"] } +schnorrkel = { version = "0.9.0", features = ["preaudit_deprecated", "aead"] } rand = "0.7.2" merlin = "2.0.0" pdqselect = "0.1.0" derive_more = "0.99.2" +aes-gcm = "0.4.1" +aead = "0.2.0" [dev-dependencies] sp-keyring = { version = "2.0.0-alpha.2", path = "../../../primitives/keyring" } diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index de3d5b67853f0..aacd66e9ece7f 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -143,7 +143,7 @@ fn claim_primary_slot( Some((claim, pair)) } -fn get_keypair(q: &AuthorityPair) -> &schnorrkel::Keypair { +pub(crate) fn get_keypair(q: &AuthorityPair) -> &schnorrkel::Keypair { use sp_core::crypto::IsWrappedBy; sp_core::sr25519::Pair::from_ref(q).as_ref() } diff --git a/client/consensus/sassafras/src/communication/mod.rs b/client/consensus/sassafras/src/communication/mod.rs index 9906f4cb6f775..350270b596cb9 100644 --- a/client/consensus/sassafras/src/communication/mod.rs +++ b/client/consensus/sassafras/src/communication/mod.rs @@ -1,7 +1,10 @@ mod network; +use aead::Aead; +use codec::{Encode, Decode}; use futures::channel::mpsc::{UnboundedSender, UnboundedReceiver}; -use sp_consensus_sassafras::{SlotNumber, VRFProof}; +use sp_consensus_sassafras::{SlotNumber, VRFProof, AuthorityId, AuthorityPair}; +use sc_keystore::KeyStorePtr; use crate::PublishingSet; pub use self::network::{ @@ -9,9 +12,9 @@ pub use self::network::{ }; pub fn send_out( - sender: &UnboundedSender, + sender: &UnboundedSender<(AuthorityId, [u8; 32], Vec)>, + set: &mut PublishingSet, slot_number: SlotNumber, - set: &mut PublishingSet ) { const SEND_OUT_LIMIT: usize = 4; @@ -22,21 +25,62 @@ pub fn send_out( } if pending.submit_status.is_none() { - match sender.unbounded_send(pending.vrf_proof.clone()) { + let receiver_id = set.authorities[pending.submit_authority_index as usize].0.clone(); + let receiver_public = match schnorrkel::PublicKey::from_bytes(receiver_id.as_ref()) { + Ok(public) => public, + Err(_) => continue, + }; + let (ephemeral_key, aead) = receiver_public + .init_aead32_unauthenticated::(); + let encrypted = match aead.encrypt( + &Default::default(), + &pending.vrf_proof.encode()[..], + ) { + Ok(encrypted) => encrypted, + Err(_) => continue, + }; + + match sender.unbounded_send((receiver_id, ephemeral_key.to_bytes(), encrypted)) { Ok(()) => { pending.submit_status = Some(slot_number); }, - Err(_) => return, + Err(_) => break, } } } } pub fn receive_in( - receiver: &mut UnboundedReceiver, + receiver: &mut UnboundedReceiver<(AuthorityId, [u8; 32], Vec)>, set: &mut PublishingSet, + keystore: &KeyStorePtr, ) { - while let Ok(Some(proof)) = receiver.try_next() { + let keystore = keystore.read(); + + while let Ok(Some((receiver_id, ephemeral_key, encrypted))) = receiver.try_next() { + let receiver_pair = match keystore.key_pair::(&receiver_id) { + Ok(pair) => pair, + Err(_) => continue, + }; + let pair = crate::authorship::get_keypair(&receiver_pair); + let aead = pair.secret.aead32_unauthenticated::( + &match schnorrkel::PublicKey::from_bytes(&ephemeral_key) { + Ok(key) => key, + Err(_) => continue, + } + ); + let decrypted = match aead.decrypt( + &Default::default(), + &encrypted[..], + ) { + Ok(decrypted) => decrypted, + Err(_) => continue, + }; + let proof = match VRFProof::decode(&mut &decrypted[..]) { + Ok(proof) => proof, + Err(_) => continue, + }; + set.disclosing.push(proof); } } diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 7e5c34000a272..e59009598b336 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -516,8 +516,8 @@ struct SassafrasWorker { keystore: KeyStorePtr, epoch_changes: SharedEpochChanges, config: Config, - local_out_proofs: UnboundedSender, - remote_in_proofs: UnboundedReceiver, + local_out_proofs: UnboundedSender<(AuthorityId, [u8; 32], Vec)>, + remote_in_proofs: UnboundedReceiver<(AuthorityId, [u8; 32], Vec)>, } impl sc_consensus_slots::SimpleSlotWorker for SassafrasWorker where @@ -591,12 +591,13 @@ impl sc_consensus_slots::SimpleSlotWorker for Sassafra crate::communication::send_out( &self.local_out_proofs, + &mut viable_epoch.as_mut().publishing, slot_number, - &mut viable_epoch.as_mut().publishing ); crate::communication::receive_in( &mut self.remote_in_proofs, - &mut viable_epoch.as_mut().publishing + &mut viable_epoch.as_mut().publishing, + &self.keystore, ); let s = authorship::claim_slot( From bef31139c0a843b4bf2f2440a2618ecac5640786 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 9 Mar 2020 02:28:10 +0100 Subject: [PATCH 63/75] Connect channels to the actual networking gossip layer --- .../sassafras/src/communication/network.rs | 13 +++++++++++-- client/consensus/sassafras/src/lib.rs | 12 +++++++++--- 2 files changed, 20 insertions(+), 5 deletions(-) diff --git a/client/consensus/sassafras/src/communication/network.rs b/client/consensus/sassafras/src/communication/network.rs index b2232c79e025c..7d72f0066e93d 100644 --- a/client/consensus/sassafras/src/communication/network.rs +++ b/client/consensus/sassafras/src/communication/network.rs @@ -1,11 +1,12 @@ use std::{marker::PhantomData, sync::Arc, pin::Pin, task::{Poll, Context}}; -use futures::prelude::*; +use futures::{prelude::*, channel::mpsc::{UnboundedSender, UnboundedReceiver}}; use sp_runtime::traits::Block as BlockT; use sc_network::PeerId; use sc_network_gossip::{ Validator as ValidatorT, ValidatorContext, GossipEngine, Network as GossipNetwork, ValidationResult, }; +use sp_consensus_sassafras::AuthorityId; pub use sp_consensus_sassafras::SASSAFRAS_ENGINE_ID; pub const SASSAFRAS_PROTOCOL_NAME: &[u8] = b"/paritytech/sassafras/1"; @@ -29,12 +30,18 @@ pub struct NetworkBridge { service: N, gossip_engine: GossipEngine, validator: Arc>, + local_out_proofs: UnboundedReceiver<(AuthorityId, [u8; 32], Vec)>, + remote_in_proofs: UnboundedSender<(AuthorityId, [u8; 32], Vec)>, } impl NetworkBridge where N: GossipNetwork + Clone + Send + 'static, { - pub fn new(service: N) -> Self { + pub fn new( + service: N, + local_out_proofs: UnboundedReceiver<(AuthorityId, [u8; 32], Vec)>, + remote_in_proofs: UnboundedSender<(AuthorityId, [u8; 32], Vec)>, + ) -> Self { let validator = Arc::new(GossipValidator { _marker: PhantomData, }); @@ -50,6 +57,8 @@ impl NetworkBridge where service, gossip_engine, validator, + local_out_proofs, + remote_in_proofs, } } } diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index e59009598b336..2a91156f7f34d 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -470,7 +470,8 @@ pub fn start_sassafras(SassafrasParams { N: sc_network_gossip::Network + Clone + Send + Unpin + 'static, { let config = sassafras_link.config; - let (local_out_proofs, remote_in_proofs) = mpsc::unbounded(); + let (local_out_proofs_sender, local_out_proofs_receiver) = mpsc::unbounded(); + let (remote_in_proofs_sender, remote_in_proofs_receiver) = mpsc::unbounded(); let worker = SassafrasWorker { client: client.clone(), block_import: Arc::new(Mutex::new(block_import)), @@ -480,7 +481,8 @@ pub fn start_sassafras(SassafrasParams { keystore, epoch_changes: sassafras_link.epoch_changes.clone(), config: config.clone(), - local_out_proofs, remote_in_proofs, + local_out_proofs: local_out_proofs_sender, + remote_in_proofs: remote_in_proofs_receiver, }; register_sassafras_inherent_data_provider(&inherent_data_providers, config.slot_duration())?; @@ -490,7 +492,11 @@ pub fn start_sassafras(SassafrasParams { &inherent_data_providers, )?; - let network = communication::NetworkBridge::new(network); + let network = communication::NetworkBridge::new( + network, + local_out_proofs_receiver, + remote_in_proofs_sender, + ); info!(target: "sassafras", "Starting Sassafras authorship worker"); Ok(future::select( From bafbb7c5c2a832390c96e3b12365f458b3d9771c Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 9 Mar 2020 02:45:28 +0100 Subject: [PATCH 64/75] Finish all basic networking gossip impl --- .../sassafras/src/communication/network.rs | 38 ++++++++++++++++++- 1 file changed, 36 insertions(+), 2 deletions(-) diff --git a/client/consensus/sassafras/src/communication/network.rs b/client/consensus/sassafras/src/communication/network.rs index 7d72f0066e93d..380131701a8f1 100644 --- a/client/consensus/sassafras/src/communication/network.rs +++ b/client/consensus/sassafras/src/communication/network.rs @@ -1,6 +1,7 @@ use std::{marker::PhantomData, sync::Arc, pin::Pin, task::{Poll, Context}}; use futures::{prelude::*, channel::mpsc::{UnboundedSender, UnboundedReceiver}}; -use sp_runtime::traits::Block as BlockT; +use codec::{Encode, Decode}; +use sp_runtime::traits::{Block as BlockT, Hash as HashT, Header as HeaderT}; use sc_network::PeerId; use sc_network_gossip::{ Validator as ValidatorT, ValidatorContext, GossipEngine, Network as GossipNetwork, @@ -22,7 +23,9 @@ impl ValidatorT for GossipValidator { sender: &PeerId, data: &[u8] ) -> ValidationResult { - unimplemented!() + ValidationResult::ProcessAndKeep( + <::Hashing as HashT>::hash(&b"SASSAFRAS-PROOF-GLOBAL"[..]) + ) } } @@ -67,6 +70,37 @@ impl Future for NetworkBridge { type Output = (); fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + let topic = <::Hashing as HashT>::hash( + &b"SASSAFRAS-PROOF-GLOBAL"[..] + ); + + let mut messages = self.gossip_engine.messages_for(topic); + + while let Poll::Ready(Some(notification)) = messages.poll_next_unpin(cx) { + match Decode::decode(&mut ¬ification.message[..]) { + Ok((receiver_id, ephemeral_key, encrypted)) => { + match self.remote_in_proofs.unbounded_send( + (receiver_id, ephemeral_key, encrypted) + ) { + Ok(()) => (), + Err(_) => continue, + } + }, + Err(_) => continue, + } + } + + while let Poll::Ready( + Some((receiver_id, ephemeral_key, encrypted)) + ) = self.local_out_proofs.poll_next_unpin(cx) { + self.gossip_engine.gossip_message( + topic, + (receiver_id, ephemeral_key, encrypted).encode(), + false, + ); + } + + self.gossip_engine.poll_unpin(cx) } } From 085b17549aafbf83a86b72b56b3174d12c01d518 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Mon, 9 Mar 2020 02:45:49 +0100 Subject: [PATCH 65/75] Remove extra white line --- client/consensus/sassafras/src/communication/network.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/client/consensus/sassafras/src/communication/network.rs b/client/consensus/sassafras/src/communication/network.rs index 380131701a8f1..7c52da5fead12 100644 --- a/client/consensus/sassafras/src/communication/network.rs +++ b/client/consensus/sassafras/src/communication/network.rs @@ -100,7 +100,6 @@ impl Future for NetworkBridge { ); } - self.gossip_engine.poll_unpin(cx) } } From 15c4cf23705431797205fbb1b05a9cc18026c47f Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Fri, 10 Apr 2020 15:40:27 +0200 Subject: [PATCH 66/75] [WIP] Switch to use sp-consensus-vrf for sp-consensus-sassafras --- Cargo.lock | 223 ++++++++++++++++++ frame/sassafras/Cargo.toml | 24 +- frame/sassafras/src/lib.rs | 11 +- primitives/consensus/sassafras/Cargo.toml | 20 +- primitives/consensus/sassafras/src/digests.rs | 158 +++++++------ primitives/consensus/sassafras/src/lib.rs | 7 +- primitives/consensus/sassafras/src/vrf.rs | 190 --------------- 7 files changed, 341 insertions(+), 292 deletions(-) delete mode 100644 primitives/consensus/sassafras/src/vrf.rs diff --git a/Cargo.lock b/Cargo.lock index affa982bd3856..28db4e5f0997f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -16,6 +16,26 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d2e7343e7fc9de883d1b0341e0b13970f764c14101234857d2ddafa1cb1cac2" +[[package]] +name = "aead" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cf01b9b56e767bb57b94ebf91a58b338002963785cdd7013e21c0d4679471e4" +dependencies = [ + "generic-array", +] + +[[package]] +name = "aes" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54eb1d8fe354e5fc611daf4f2ea97dd45a765f4f1e4512306ec183ae2e8f20c9" +dependencies = [ + "aes-soft", + "aesni", + "block-cipher-trait", +] + [[package]] name = "aes-ctr" version = "0.3.0" @@ -28,6 +48,20 @@ dependencies = [ "stream-cipher", ] +[[package]] +name = "aes-gcm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff010d955ff3380d45cd17bd86be81cc45e27bc89579d1429ee430163ac19086" +dependencies = [ + "aead", + "aes", + "block-cipher-trait", + "ghash", + "subtle 2.2.2", + "zeroize", +] + [[package]] name = "aes-soft" version = "0.3.3" @@ -1869,6 +1903,15 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "ghash" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f0930ed19a7184089ea46d2fedead2f6dc2b674c5db4276b7da336c7cd83252" +dependencies = [ + "polyval", +] + [[package]] name = "gimli" version = "0.20.0" @@ -4417,6 +4460,26 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-sassafras" +version = "0.8.0" +dependencies = [ + "frame-support", + "frame-system", + "pallet-session", + "pallet-timestamp", + "parity-scale-codec", + "serde", + "sp-consensus-sassafras", + "sp-consensus-vrf", + "sp-inherents", + "sp-io", + "sp-runtime", + "sp-staking", + "sp-std", + "sp-timestamp", +] + [[package]] name = "pallet-scheduler" version = "2.0.0-alpha.5" @@ -4990,6 +5053,16 @@ dependencies = [ "web-sys", ] +[[package]] +name = "polyval" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ec3341498978de3bfd12d1b22f1af1de22818f5473a11e8a6ef997989e3a212" +dependencies = [ + "cfg-if", + "universal-hash", +] + [[package]] name = "ppv-lite86" version = "0.2.6" @@ -5739,6 +5812,66 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "sassafras-template" +version = "2.0.0" +dependencies = [ + "futures 0.3.4", + "log", + "sassafras-template-runtime", + "sc-basic-authorship", + "sc-cli", + "sc-client", + "sc-consensus-sassafras", + "sc-executor", + "sc-finality-grandpa", + "sc-network", + "sc-service", + "sc-transaction-pool", + "sp-consensus", + "sp-consensus-sassafras", + "sp-core", + "sp-finality-grandpa", + "sp-inherents", + "sp-runtime", + "sp-transaction-pool", + "structopt", + "substrate-build-script-utils", + "vergen", +] + +[[package]] +name = "sassafras-template-runtime" +version = "2.0.0" +dependencies = [ + "frame-executive", + "frame-support", + "frame-system", + "pallet-balances", + "pallet-grandpa", + "pallet-indices", + "pallet-randomness-collective-flip", + "pallet-sassafras", + "pallet-sudo", + "pallet-timestamp", + "pallet-transaction-payment", + "parity-scale-codec", + "serde", + "sp-api", + "sp-block-builder", + "sp-consensus-sassafras", + "sp-core", + "sp-inherents", + "sp-io", + "sp-offchain", + "sp-runtime", + "sp-session", + "sp-std", + "sp-transaction-pool", + "sp-version", + "substrate-wasm-builder-runner", +] + [[package]] name = "sc-authority-discovery" version = "0.8.0-alpha.5" @@ -6154,6 +6287,59 @@ dependencies = [ "sp-timestamp", ] +[[package]] +name = "sc-consensus-sassafras" +version = "0.8.0" +dependencies = [ + "aead", + "aes-gcm", + "derive_more", + "env_logger 0.7.1", + "fork-tree", + "futures 0.1.29", + "futures 0.3.4", + "futures-timer 3.0.2", + "log", + "merlin", + "num-bigint", + "num-rational", + "num-traits 0.2.11", + "parity-scale-codec", + "parking_lot 0.10.0", + "pdqselect", + "rand 0.7.3", + "sc-block-builder", + "sc-client", + "sc-client-api", + "sc-consensus-epochs", + "sc-consensus-slots", + "sc-consensus-uncles", + "sc-executor", + "sc-keystore", + "sc-network", + "sc-network-gossip", + "sc-network-test", + "sc-service", + "sc-telemetry", + "schnorrkel", + "sp-api", + "sp-application-crypto", + "sp-block-builder", + "sp-blockchain", + "sp-consensus", + "sp-consensus-sassafras", + "sp-core", + "sp-inherents", + "sp-io", + "sp-keyring", + "sp-runtime", + "sp-timestamp", + "sp-version", + "substrate-test-runtime-client", + "tempfile", + "tokio 0.1.22", +] + [[package]] name = "sc-consensus-slots" version = "0.8.0-alpha.5" @@ -6774,6 +6960,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "021b403afe70d81eea68f6ea12f6b3c9588e5d536a94c3bf80f15e7faa267862" dependencies = [ + "aead", "arrayref", "arrayvec 0.5.1", "curve25519-dalek", @@ -7318,6 +7505,22 @@ dependencies = [ "sp-std", ] +[[package]] +name = "sp-consensus-sassafras" +version = "0.8.0" +dependencies = [ + "parity-scale-codec", + "sp-api", + "sp-application-crypto", + "sp-consensus", + "sp-consensus-vrf", + "sp-core", + "sp-inherents", + "sp-runtime", + "sp-std", + "sp-timestamp", +] + [[package]] name = "sp-consensus-vrf" version = "0.8.0-alpha.5" @@ -8877,6 +9080,16 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" +[[package]] +name = "universal-hash" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df0c900f2f9b4116803415878ff48b63da9edb268668e08cf9292d7503114a01" +dependencies = [ + "generic-array", + "subtle 2.2.2", +] + [[package]] name = "unsigned-varint" version = "0.3.2" @@ -8935,6 +9148,16 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05c78687fb1a80548ae3250346c3db86a80a7cdd77bda190189f2d0a0987c81a" +[[package]] +name = "vergen" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ce50d8996df1f85af15f2cd8d33daae6e479575123ef4314a51a70a230739cb" +dependencies = [ + "bitflags", + "chrono", +] + [[package]] name = "version_check" version = "0.9.1" diff --git a/frame/sassafras/Cargo.toml b/frame/sassafras/Cargo.toml index ce5164e8e3297..f54ebb3318b66 100644 --- a/frame/sassafras/Cargo.toml +++ b/frame/sassafras/Cargo.toml @@ -6,19 +6,19 @@ edition = "2018" license = "GPL-3.0" [dependencies] -hex-literal = "0.2.1" -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true } -sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/inherents" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } -pallet-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../timestamp" } -sp-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/timestamp" } -pallet-session = { version = "2.0.0-alpha.2", default-features = false, path = "../session" } -sp-consensus-sassafras = { version = "0.8.0-alpha.2", default-features = false, path = "../../primitives/consensus/sassafras" } +sp-inherents = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/inherents" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } +pallet-timestamp = { version = "2.0.0-alpha.5", default-features = false, path = "../timestamp" } +sp-timestamp = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/timestamp" } +pallet-session = { version = "2.0.0-alpha.5", default-features = false, path = "../session" } +sp-consensus-sassafras = { version = "0.8.0-alpha.5", default-features = false, path = "../../primitives/consensus/sassafras" } +sp-consensus-vrf = { version = "0.8.0-alpha.5", default-features = false, path = "../../primitives/consensus/vrf" } sp-io = { version = "2.0.0-alpha.2", path = "../../primitives/io", default-features = false } [features] diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index a25cc6a9ab6d6..a589d575e8d52 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -19,14 +19,17 @@ #![cfg_attr(not(feature = "std"), no_std)] #![forbid(unused_must_use, unsafe_code, unused_variables, unused_must_use)] -#![deny(unused_imports)] + pub use pallet_timestamp; use sp_std::{result, prelude::*}; -use frame_support::{decl_storage, decl_module, traits::{FindAuthor, Get, Randomness as RandomnessT}}; +use frame_support::{ + decl_storage, decl_module, traits::{FindAuthor, Get, Randomness as RandomnessT}, + weights::{Weight, SimpleDispatchInfo, WeighData}, +}; use sp_timestamp::OnTimestampSet; -use sp_runtime::{generic::DigestItem, ConsensusEngineId, Perbill, PerThing}; -use sp_runtime::traits::{IsMember, SaturatedConversion, Saturating, Hash}; +use sp_runtime::{generic::DigestItem, ConsensusEngineId, Perbill}; +use sp_runtime::traits::{IsMember, SaturatedConversion, Saturating, Hash, One}; use sp_staking::{ SessionIndex, offence::{Offence, Kind}, diff --git a/primitives/consensus/sassafras/Cargo.toml b/primitives/consensus/sassafras/Cargo.toml index f2f8733d4ea7b..6c2de45781b4c 100644 --- a/primitives/consensus/sassafras/Cargo.toml +++ b/primitives/consensus/sassafras/Cargo.toml @@ -7,25 +7,25 @@ edition = "2018" [dependencies] codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false } -schnorrkel = { version = "0.9.0", features = ["preaudit_deprecated"], optional = true } -sp-std = { version = "2.0.0-alpha.2", path = "../../std", default-features = false } -sp-core = { version = "2.0.0-alpha.2", path = "../../core", default-features = false } -sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../../api" } -sp-consensus = { version = "0.8.0-alpha.2", optional = true, path = "../common" } -sp-inherents = { version = "2.0.0-alpha.2", path = "../../inherents", default-features = false } -sp-timestamp = { version = "2.0.0-alpha.2", path = "../../timestamp", default-features = false } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../runtime", default-features = false } -sp-application-crypto = { version = "2.0.0-alpha.2", path = "../../application-crypto", default-features = false } +sp-std = { version = "2.0.0-alpha.5", path = "../../std", default-features = false } +sp-core = { version = "2.0.0-alpha.5", path = "../../core", default-features = false } +sp-api = { version = "2.0.0-alpha.5", default-features = false, path = "../../api" } +sp-consensus = { version = "0.8.0-alpha.5", optional = true, path = "../common" } +sp-consensus-vrf = { version = "0.8.0-alpha.5", path = "../vrf", default-features = false } +sp-inherents = { version = "2.0.0-alpha.5", path = "../../inherents", default-features = false } +sp-timestamp = { version = "2.0.0-alpha.5", path = "../../timestamp", default-features = false } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../runtime", default-features = false } +sp-application-crypto = { version = "2.0.0-alpha.5", path = "../../application-crypto", default-features = false } [features] default = ["std"] std = [ "codec/std", - "schnorrkel", "sp-std/std", "sp-core/std", "sp-api/std", "sp-consensus", + "sp-consensus-vrf/std", "sp-inherents/std", "sp-timestamp/std", "sp-runtime/std", diff --git a/primitives/consensus/sassafras/src/digests.rs b/primitives/consensus/sassafras/src/digests.rs index 460f63247ca2d..6a5f2ab07d89b 100644 --- a/primitives/consensus/sassafras/src/digests.rs +++ b/primitives/consensus/sassafras/src/digests.rs @@ -17,10 +17,12 @@ use codec::{Encode, Decode}; use sp_std::vec::Vec; use sp_core::RuntimeDebug; +use sp_consensus_vrf::schnorrkel; use crate::{ - Randomness, VRFProof, VRFOutput, VRFIndex, + Randomness, VRFIndex, AuthorityIndex, SlotNumber, AuthorityId, SassafrasAuthorityWeight, }; + #[cfg(feature = "std")] use codec::Codec; #[cfg(feature = "std")] @@ -28,68 +30,9 @@ use sp_runtime::{DigestItem, generic::OpaqueDigestItemId}; #[cfg(feature = "std")] use crate::{SASSAFRAS_ENGINE_ID, AuthoritySignature}; -/// A digest item which is usable with Sassafras consensus. -#[cfg(feature = "std")] -pub trait CompatibleDigestItem: Sized { - /// Construct a digest item which contains a Sassafras `PreDigest`. - fn sassafras_pre_digest(seal: PreDigest) -> Self; - - /// Construct a digest item which contains a Sassafras seal. - fn sassafras_seal(signature: AuthoritySignature) -> Self; - - /// If this item is a Sassafras `PreDigest`, return it. - fn as_sassafras_pre_digest(&self) -> Option; - - /// If this item is a Sassafras `NextEpochDescriptor`, return it. - fn as_sassafras_next_epoch_descriptor(&self) -> Option; - - /// If this item is a Sassafras `PostBlockDescriptor`, return it. - fn as_sassafras_post_block_descriptor(&self) -> Option; - - /// If this item is a Sassafras seal, return it. - fn as_sassafras_seal(&self) -> Option; -} - -#[cfg(feature = "std")] -impl CompatibleDigestItem for DigestItem where - Hash: core::fmt::Debug + Send + Sync + Eq + Clone + Codec + 'static -{ - fn sassafras_pre_digest(seal: PreDigest) -> Self { - DigestItem::PreRuntime(SASSAFRAS_ENGINE_ID, seal.encode()) - } - - fn sassafras_seal(signature: AuthoritySignature) -> Self { - DigestItem::Seal(SASSAFRAS_ENGINE_ID, signature.encode()) - } - - fn as_sassafras_pre_digest(&self) -> Option { - self.try_to(OpaqueDigestItemId::PreRuntime(&SASSAFRAS_ENGINE_ID)) - } - - fn as_sassafras_next_epoch_descriptor(&self) -> Option { - self.try_to(OpaqueDigestItemId::Consensus(&SASSAFRAS_ENGINE_ID)) - .and_then(|x: super::ConsensusLog| match x { - super::ConsensusLog::NextEpochData(n) => Some(n), - _ => None, - }) - } - - fn as_sassafras_post_block_descriptor(&self) -> Option { - self.try_to(OpaqueDigestItemId::Consensus(&SASSAFRAS_ENGINE_ID)) - .and_then(|x: super::ConsensusLog| match x { - super::ConsensusLog::PostBlockData(p) => Some(p), - _ => None, - }) - } - - fn as_sassafras_seal(&self) -> Option { - self.try_to(OpaqueDigestItemId::Seal(&SASSAFRAS_ENGINE_ID)) - } -} - -/// A primary Sassafras pre-digest. -#[derive(Clone, RuntimeDebug, Encode, Decode, PartialEq, Eq)] -pub struct PrimaryPreDigest { +/// Raw Sassafras primary slot assignment pre-digest. +#[derive(Clone, RuntimeDebug, Encode, Decode)] +pub struct RawPrimaryPreDigest { /// Index of ticket VRF proof that has been previously committed. pub ticket_vrf_index: VRFIndex, /// Attempt number of the ticket VRF proof. @@ -108,9 +51,13 @@ pub struct PrimaryPreDigest { pub commitments: Vec, } +#[cfg(feature = "std")] +/// Sassafras primary slot assignment pre-digest for std environment. +pub type PrimaryPreDigest = RawPrimaryPreDigest; + /// A secondary Sassafras pre-digest. -#[derive(Clone, RuntimeDebug, Encode, Decode, PartialEq, Eq)] -pub struct SecondaryPreDigest { +#[derive(Clone, RuntimeDebug, Encode, Decode)] +pub struct RawSecondaryPreDigest { /// Authority index. pub authority_index: AuthorityIndex, /// Slot number. @@ -123,14 +70,18 @@ pub struct SecondaryPreDigest { /// as `vrf_output`. /// /// This digest is included in every block, generated by Sassafras consensus engine. -#[derive(Clone, RuntimeDebug, Encode, Decode, PartialEq, Eq)] -pub enum PreDigest { +#[derive(Clone, RuntimeDebug, Encode, Decode)] +pub enum RawPreDigest { /// A primary VRF-based slot-assignment. - Primary(PrimaryPreDigest), + Primary(RawPrimaryPreDigest), /// A secondary deterministic slot assignment. - Secondary(SecondaryPreDigest), + Secondary(RawSecondaryPreDigest), } +#[cfg(feature = "std")] +/// A Sassafras pre-runtime digest for std. +pub type PreDigest = RawPreDigest; + impl PreDigest { /// Returns the slot number of the pre digest. pub fn authority_index(&self) -> AuthorityIndex { @@ -161,7 +112,7 @@ impl PreDigest { /// Post-digest about next epoch information. /// /// This digest is generated by runtime, at the beginning of every epoch. -#[derive(Clone, RuntimeDebug, Encode, Decode, PartialEq, Eq)] +#[derive(Clone, RuntimeDebug, Encode, Decode)] pub struct NextEpochDescriptor { /// The authorities that generate VRF proofs. Note that those keys will only be generating /// blocks two epochs later. @@ -174,8 +125,71 @@ pub struct NextEpochDescriptor { /// Post-digest about post-block information such as ticket commitments. /// /// This digest is generated by runtime, optional, and can be included at every block. -#[derive(Clone, RuntimeDebug, Encode, Decode, PartialEq, Eq)] -pub struct PostBlockDescriptor { +#[derive(Clone, RuntimeDebug, Encode, Decode)] +pub struct RawPostBlockDescriptor { /// Commitments of tickets. pub commitments: Vec, } + +#[cfg(feature = "std")] +/// Sassafras post-digest suitable for std environment. +pub type PostBlockDescriptor = RawPostBlockDescriptor; + +/// A digest item which is usable with Sassafras consensus. +#[cfg(feature = "std")] +pub trait CompatibleDigestItem: Sized { + /// Construct a digest item which contains a Sassafras `PreDigest`. + fn sassafras_pre_digest(seal: PreDigest) -> Self; + + /// Construct a digest item which contains a Sassafras seal. + fn sassafras_seal(signature: AuthoritySignature) -> Self; + + /// If this item is a Sassafras `PreDigest`, return it. + fn as_sassafras_pre_digest(&self) -> Option; + + /// If this item is a Sassafras `NextEpochDescriptor`, return it. + fn as_sassafras_next_epoch_descriptor(&self) -> Option; + + /// If this item is a Sassafras `PostBlockDescriptor`, return it. + fn as_sassafras_post_block_descriptor(&self) -> Option; + + /// If this item is a Sassafras seal, return it. + fn as_sassafras_seal(&self) -> Option; +} + +#[cfg(feature = "std")] +impl CompatibleDigestItem for DigestItem where + Hash: core::fmt::Debug + Send + Sync + Eq + Clone + Codec + 'static +{ + fn sassafras_pre_digest(seal: PreDigest) -> Self { + DigestItem::PreRuntime(SASSAFRAS_ENGINE_ID, seal.encode()) + } + + fn sassafras_seal(signature: AuthoritySignature) -> Self { + DigestItem::Seal(SASSAFRAS_ENGINE_ID, signature.encode()) + } + + fn as_sassafras_pre_digest(&self) -> Option { + self.try_to(OpaqueDigestItemId::PreRuntime(&SASSAFRAS_ENGINE_ID)) + } + + fn as_sassafras_next_epoch_descriptor(&self) -> Option { + self.try_to(OpaqueDigestItemId::Consensus(&SASSAFRAS_ENGINE_ID)) + .and_then(|x: super::ConsensusLog| match x { + super::ConsensusLog::NextEpochData(n) => Some(n), + _ => None, + }) + } + + fn as_sassafras_post_block_descriptor(&self) -> Option { + self.try_to(OpaqueDigestItemId::Consensus(&SASSAFRAS_ENGINE_ID)) + .and_then(|x: super::ConsensusLog| match x { + super::ConsensusLog::PostBlockData(p) => Some(p), + _ => None, + }) + } + + fn as_sassafras_seal(&self) -> Option { + self.try_to(OpaqueDigestItemId::Seal(&SASSAFRAS_ENGINE_ID)) + } +} diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index 9cb31315314c4..0f3e1d6d29a58 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -22,9 +22,8 @@ pub mod digests; pub mod inherents; -mod vrf; -pub use crate::vrf::{ +pub use sp_consensus_vrf::schnorrkel::{ VRF_PROOF_LENGTH, VRF_OUTPUT_LENGTH, RawVRFOutput, VRFOutput, RawVRFProof, VRFProof, Randomness, }; @@ -81,7 +80,7 @@ pub type SassafrasAuthorityWeight = u64; pub type SassafrasBlockWeight = u32; /// An consensus log item for Sassafras. -#[derive(Decode, Encode, Clone, PartialEq, Eq, RuntimeDebug)] +#[derive(Decode, Encode, Clone, RuntimeDebug)] pub enum ConsensusLog { /// The epoch has changed. NextEpochData(digests::NextEpochDescriptor), @@ -92,7 +91,7 @@ pub enum ConsensusLog { } /// Configuration data used by the Sassafras consensus engine. -#[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] +#[derive(Clone, Encode, Decode, RuntimeDebug)] pub struct SassafrasConfiguration { /// The slot duration in milliseconds for Sassafras. pub slot_duration: u64, diff --git a/primitives/consensus/sassafras/src/vrf.rs b/primitives/consensus/sassafras/src/vrf.rs deleted file mode 100644 index 063ceed01881d..0000000000000 --- a/primitives/consensus/sassafras/src/vrf.rs +++ /dev/null @@ -1,190 +0,0 @@ -use codec::{Encode, Decode}; -use sp_core::U512; -use sp_runtime::RuntimeDebug; -#[cfg(feature = "std")] -use std::{ops::{Deref, DerefMut}, convert::TryFrom}; -#[cfg(feature = "std")] -use codec::EncodeLike; -#[cfg(feature = "std")] -use schnorrkel::{SignatureError, errors::MultiSignatureStage}; - -#[cfg(feature = "std")] -pub use schnorrkel::vrf::{VRF_PROOF_LENGTH, VRF_OUTPUT_LENGTH}; - -#[cfg(not(feature = "std"))] -pub const VRF_PROOF_LENGTH: usize = 64; - -#[cfg(not(feature = "std"))] -pub const VRF_OUTPUT_LENGTH: usize = 32; - -#[derive(Clone, Eq, PartialEq, RuntimeDebug, Encode, Decode)] -pub struct RawVRFOutput(pub [u8; VRF_OUTPUT_LENGTH]); - -#[cfg(feature = "std")] -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct VRFOutput(pub schnorrkel::vrf::VRFOutput); - -#[cfg(not(feature = "std"))] -pub type VRFOutput = RawVRFOutput; - -#[cfg(feature = "std")] -impl Deref for VRFOutput { - type Target = schnorrkel::vrf::VRFOutput; - fn deref(&self) -> &Self::Target { &self.0 } -} - -#[cfg(feature = "std")] -impl DerefMut for VRFOutput { - fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } -} - -#[cfg(feature = "std")] -impl Encode for VRFOutput { - fn encode(&self) -> Vec { - self.0.as_bytes().encode() - } -} - -#[cfg(feature = "std")] -impl EncodeLike for VRFOutput { } - -#[cfg(feature = "std")] -impl Decode for VRFOutput { - fn decode(i: &mut R) -> Result { - let decoded = <[u8; VRF_OUTPUT_LENGTH]>::decode(i)?; - Ok(Self(schnorrkel::vrf::VRFOutput::from_bytes(&decoded).map_err(convert_error)?)) - } -} - -#[cfg(feature = "std")] -impl TryFrom for VRFOutput { - type Error = SignatureError; - - fn try_from(raw: RawVRFOutput) -> Result { - schnorrkel::vrf::VRFOutput::from_bytes(&raw.0).map(VRFOutput) - } -} - -#[cfg(feature = "std")] -impl From for RawVRFOutput { - fn from(output: VRFOutput) -> RawVRFOutput { - RawVRFOutput(output.to_bytes()) - } -} - -#[derive(Clone, Encode, Decode)] -pub struct RawVRFProof(pub [u8; VRF_PROOF_LENGTH]); - -#[cfg(feature = "std")] -impl std::fmt::Debug for RawVRFProof { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", &self) - } -} - -impl core::cmp::PartialEq for RawVRFProof { - fn eq(&self, other: &Self) -> bool { - self == other - } -} - -impl core::cmp::Eq for RawVRFProof { } - -#[cfg(feature = "std")] -#[derive(Clone, Debug, PartialEq, Eq)] -pub struct VRFProof(pub schnorrkel::vrf::VRFProof); - -#[cfg(feature = "std")] -impl PartialOrd for VRFProof { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - -#[cfg(feature = "std")] -impl Ord for VRFProof { - fn cmp(&self, other: &Self) -> core::cmp::Ordering { - U512::from(self.0.to_bytes()).cmp(&U512::from(other.0.to_bytes())) - } -} - -#[cfg(not(feature = "std"))] -pub type VRFProof = RawVRFProof; - -#[cfg(feature = "std")] -impl Deref for VRFProof { - type Target = schnorrkel::vrf::VRFProof; - fn deref(&self) -> &Self::Target { &self.0 } -} - -#[cfg(feature = "std")] -impl DerefMut for VRFProof { - fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } -} - -#[cfg(feature = "std")] -impl Encode for VRFProof { - fn encode(&self) -> Vec { - self.0.to_bytes().encode() - } -} - -#[cfg(feature = "std")] -impl EncodeLike for VRFProof { } - -#[cfg(feature = "std")] -impl Decode for VRFProof { - fn decode(i: &mut R) -> Result { - let decoded = <[u8; VRF_PROOF_LENGTH]>::decode(i)?; - Ok(Self(schnorrkel::vrf::VRFProof::from_bytes(&decoded).map_err(convert_error)?)) - } -} - -#[cfg(feature = "std")] -impl TryFrom for VRFProof { - type Error = SignatureError; - - fn try_from(raw: RawVRFProof) -> Result { - schnorrkel::vrf::VRFProof::from_bytes(&raw.0).map(VRFProof) - } -} - -#[cfg(feature = "std")] -impl From for RawVRFProof { - fn from(output: VRFProof) -> RawVRFProof { - RawVRFProof(output.to_bytes()) - } -} - -#[cfg(feature = "std")] -fn convert_error(e: SignatureError) -> codec::Error { - use SignatureError::*; - use MultiSignatureStage::*; - match e { - EquationFalse => "Signature error: `EquationFalse`".into(), - PointDecompressionError => "Signature error: `PointDecompressionError`".into(), - ScalarFormatError => "Signature error: `ScalarFormatError`".into(), - NotMarkedSchnorrkel => "Signature error: `NotMarkedSchnorrkel`".into(), - BytesLengthError { .. } => "Signature error: `BytesLengthError`".into(), - MuSigAbsent { musig_stage: Commitment } => - "Signature error: `MuSigAbsent` at stage `Commitment`".into(), - MuSigAbsent { musig_stage: Reveal } => - "Signature error: `MuSigAbsent` at stage `Reveal`".into(), - MuSigAbsent { musig_stage: Cosignature } => - "Signature error: `MuSigAbsent` at stage `Commitment`".into(), - MuSigInconsistent { musig_stage: Commitment, duplicate: true } => - "Signature error: `MuSigInconsistent` at stage `Commitment` on duplicate".into(), - MuSigInconsistent { musig_stage: Commitment, duplicate: false } => - "Signature error: `MuSigInconsistent` at stage `Commitment` on not duplicate".into(), - MuSigInconsistent { musig_stage: Reveal, duplicate: true } => - "Signature error: `MuSigInconsistent` at stage `Reveal` on duplicate".into(), - MuSigInconsistent { musig_stage: Reveal, duplicate: false } => - "Signature error: `MuSigInconsistent` at stage `Reveal` on not duplicate".into(), - MuSigInconsistent { musig_stage: Cosignature, duplicate: true } => - "Signature error: `MuSigInconsistent` at stage `Cosignature` on duplicate".into(), - MuSigInconsistent { musig_stage: Cosignature, duplicate: false } => - "Signature error: `MuSigInconsistent` at stage `Cosignature` on not duplicate".into(), - } -} - -pub type Randomness = [u8; VRF_OUTPUT_LENGTH]; From 7cde5a57338ed8c56679cd9df633257823a78318 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Sat, 11 Apr 2020 13:06:11 +0200 Subject: [PATCH 67/75] Fix-up frame-sassafras --- frame/sassafras/src/lib.rs | 114 ++++++++++++------ primitives/consensus/sassafras/src/digests.rs | 14 +-- primitives/consensus/sassafras/src/lib.rs | 4 +- 3 files changed, 89 insertions(+), 43 deletions(-) diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index a589d575e8d52..739140cfe7385 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -20,7 +20,7 @@ #![cfg_attr(not(feature = "std"), no_std)] #![forbid(unused_must_use, unsafe_code, unused_variables, unused_must_use)] -pub use pallet_timestamp; +use pallet_timestamp; use sp_std::{result, prelude::*}; use frame_support::{ @@ -40,11 +40,10 @@ use sp_inherents::{InherentIdentifier, InherentData, ProvideInherent, MakeFatalE use sp_consensus_sassafras::{ SASSAFRAS_ENGINE_ID, ConsensusLog, SassafrasAuthorityWeight, SlotNumber, inherents::{INHERENT_IDENTIFIER, SassafrasInherentData}, - digests::{NextEpochDescriptor, PreDigest, PrimaryPreDigest}, -}; -pub use sp_consensus_sassafras::{ - AuthorityId, RawVRFOutput, VRFOutput, VRF_OUTPUT_LENGTH, PUBLIC_KEY_LENGTH + digests::{NextEpochDescriptor, RawPreDigest}, }; +use sp_consensus_vrf::schnorrkel; +pub use sp_consensus_sassafras::{AuthorityId, VRF_OUTPUT_LENGTH, RANDOMNESS_LENGTH, PUBLIC_KEY_LENGTH}; #[cfg(all(feature = "std", test))] mod tests; @@ -101,12 +100,9 @@ impl EpochChangeTrigger for SameAuthoritiesForever { } } -/// The length of the Sassafras randomness -pub const RANDOMNESS_LENGTH: usize = 32; - const UNDER_CONSTRUCTION_SEGMENT_LENGTH: usize = 256; -type MaybeVrf = Option<[u8; 32 /* VRF_OUTPUT_LENGTH */]>; +type MaybeVrf = Option; decl_storage! { trait Store for Module as Sassafras { @@ -136,10 +132,10 @@ decl_storage! { // NOTE: the following fields don't use the constants to define the // array size because the metadata API currently doesn't resolve the // variable to its underlying value. - pub Randomness get(fn randomness): [u8; 32 /* RANDOMNESS_LENGTH */]; + pub Randomness get(fn randomness): schnorrkel::Randomness; /// Next epoch randomness. - NextRandomness: [u8; 32 /* RANDOMNESS_LENGTH */]; + NextRandomness: schnorrkel::Randomness; /// Randomness under construction. /// @@ -151,11 +147,18 @@ decl_storage! { /// We reset all segments and return to `0` at the beginning of every /// epoch. SegmentIndex build(|_| 0): u32; - UnderConstruction: map hasher(blake2_256) u32 => Vec<[u8; 32 /* VRF_OUTPUT_LENGTH */]>; + UnderConstruction: map hasher(twox_64_concat) u32 => Vec; /// Temporary value (cleared at block finalization) which is `Some` /// if per-block initialization has already been called for current block. Initialized get(fn initialized): Option; + + /// How late the current block is compared to its parent. + /// + /// This entry is populated as part of block execution and is cleaned up + /// on block finalization. Querying this storage entry outside of block + /// execution context should always yield zero. + Lateness get(fn lateness): T::BlockNumber; } add_extra_genesis { config(authorities): Vec<(AuthorityId, SassafrasAuthorityWeight)>; @@ -164,7 +167,7 @@ decl_storage! { } decl_module! { - /// The Sassafras SRML module + /// The Sassafras Pallet pub struct Module for enum Call where origin: T::Origin { /// The number of **slots** that an epoch takes. We couple sessions to /// epochs, i.e. we start a new session once the new epoch begins. @@ -178,8 +181,10 @@ decl_module! { const ExpectedBlockTime: T::Moment = T::ExpectedBlockTime::get(); /// Initialization - fn on_initialize(now: T::BlockNumber) { + fn on_initialize(now: T::BlockNumber) -> Weight { Self::do_initialize(now); + + SimpleDispatchInfo::default().weigh_data(()) } /// Block finalization @@ -192,6 +197,9 @@ decl_module! { if let Some(Some(vrf_output)) = Initialized::take() { Self::deposit_vrf_output(&vrf_output); } + + // remove temporary "environment" entry from storage + Lateness::::kill(); } } } @@ -215,7 +223,7 @@ impl FindAuthor for Module { { for (id, mut data) in digests.into_iter() { if id == SASSAFRAS_ENGINE_ID { - let pre_digest = PreDigest::decode(&mut data).ok()?; + let pre_digest: RawPreDigest = RawPreDigest::decode(&mut data).ok()?; return Some(pre_digest.authority_index()) } } @@ -248,7 +256,6 @@ impl pallet_session::ShouldEndSession for Module { /// A Sassafras equivocation offence report. /// /// When a validator released two or more blocks at the same slot. -#[allow(dead_code)] struct SassafrasEquivocationOffence { /// A Sassafras slot number in which this incident happened. slot: u64, @@ -310,12 +317,34 @@ impl Module { // epoch 0 as having started at the slot of block 1. We want to use // the same randomness and validator set as signalled in the genesis, // so we don't rotate the epoch. - now != sp_runtime::traits::One::one() && { + now != One::one() && { let diff = CurrentSlot::get().saturating_sub(Self::current_epoch_start()); diff >= T::EpochDuration::get() } } + /// Return the _best guess_ block number, at which the next epoch change is predicted to happen. + /// + /// Returns None if the prediction is in the past; This implies an error internally in the Sassafras + /// and should not happen under normal circumstances. + /// + /// In other word, this is only accurate if no slots are missed. Given missed slots, the slot + /// number will grow while the block number will not. Hence, the result can be interpreted as an + /// upper bound. + // -------------- IMPORTANT NOTE -------------- + // This implementation is linked to how [`should_epoch_change`] is working. This might need to + // be updated accordingly, if the underlying mechanics of slot and epochs change. + pub fn next_expected_epoch_change(now: T::BlockNumber) -> Option { + let next_slot = Self::current_epoch_start().saturating_add(T::EpochDuration::get()); + next_slot + .checked_sub(CurrentSlot::get()) + .map(|slots_remaining| { + // This is a best effort guess. Drifts in the slot/block ratio will cause errors here. + let blocks_remaining: T::BlockNumber = slots_remaining.saturated_into(); + now.saturating_add(blocks_remaining) + }) + } + /// DANGEROUS: Enact an epoch change. Should be done on every block where `should_epoch_change` has returned `true`, /// and the caller is the only caller of this function. /// @@ -327,10 +356,7 @@ impl Module { ) { // PRECONDITION: caller has done initialization and is guaranteed // by the session module to be called before this. - #[cfg(debug_assertions)] - { - assert!(Self::initialized().is_some()) - } + debug_assert!(Self::initialized().is_some()); // Update epoch index let epoch_index = EpochIndex::get() @@ -365,7 +391,7 @@ impl Module { // finds the start slot of the current epoch. only guaranteed to // give correct results after `do_initialize` of the first block // in the chain (as its result is based off of `GenesisSlot`). - fn current_epoch_start() -> SlotNumber { + pub fn current_epoch_start() -> SlotNumber { (EpochIndex::get() * T::EpochDuration::get()) + GenesisSlot::get() } @@ -374,7 +400,7 @@ impl Module { >::deposit_log(log.into()) } - fn deposit_vrf_output(vrf_output: &[u8; VRF_OUTPUT_LENGTH]) { + fn deposit_vrf_output(vrf_output: &schnorrkel::RawVRFOutput) { let segment_idx = ::get(); let mut segment = ::get(&segment_idx); if segment.len() < UNDER_CONSTRUCTION_SEGMENT_LENGTH { @@ -384,7 +410,7 @@ impl Module { } else { // move onto the next segment and update the index. let segment_idx = segment_idx + 1; - ::insert(&segment_idx, &vec![*vrf_output]); + ::insert(&segment_idx, &vec![vrf_output.clone()]); ::put(&segment_idx); } } @@ -397,12 +423,12 @@ impl Module { return; } - let maybe_pre_digest = >::digest() + let maybe_pre_digest: Option = >::digest() .logs .iter() .filter_map(|s| s.as_pre_runtime()) .filter_map(|(id, mut data)| if id == SASSAFRAS_ENGINE_ID { - PreDigest::decode(&mut data).ok() + RawPreDigest::decode(&mut data).ok() } else { None }) @@ -427,19 +453,27 @@ impl Module { Self::deposit_consensus(ConsensusLog::NextEpochData(next)) } - CurrentSlot::put(digest.slot_number()); + // the slot number of the current block being initialized + let current_slot = digest.slot_number(); + + // how many slots were skipped between current and last block + let lateness = current_slot.saturating_sub(CurrentSlot::get() + 1); + let lateness = T::BlockNumber::from(lateness as u32); - if let PreDigest::Primary(PrimaryPreDigest { post_vrf_output, .. }) = digest { + Lateness::::put(lateness); + CurrentSlot::put(current_slot); + + if let RawPreDigest::Primary(primary) = digest { // place the VRF output into the `Initialized` storage item // and it'll be put onto the under-construction randomness // later, once we've decided which epoch this block is in. - Some(RawVRFOutput::from(post_vrf_output)) + Some(primary.post_vrf_output) } else { None } }); - Initialized::put(maybe_vrf.map(|v| RawVRFOutput::from(v).0)); + Initialized::put(maybe_vrf); // enact epoch change, if necessary. T::EpochChangeTrigger::trigger::(now) @@ -447,7 +481,7 @@ impl Module { /// Call this function exactly once when an epoch changes, to update the /// randomness. Returns the new randomness. - fn randomness_change_epoch(next_epoch_index: u64) -> [u8; RANDOMNESS_LENGTH] { + fn randomness_change_epoch(next_epoch_index: u64) -> schnorrkel::Randomness { let this_randomness = NextRandomness::get(); let segment_idx: u32 = ::mutate(|s| sp_std::mem::replace(s, 0)); @@ -476,6 +510,18 @@ impl OnTimestampSet for Module { fn on_timestamp_set(_moment: T::Moment) { } } +impl frame_support::traits::EstimateNextSessionRotation for Module { + fn estimate_next_session_rotation(now: T::BlockNumber) -> Option { + Self::next_expected_epoch_change(now) + } +} + +impl frame_support::traits::Lateness for Module { + fn lateness(&self) -> T::BlockNumber { + Self::lateness() + } +} + impl sp_runtime::BoundToRuntimeAppPublic for Module { type Public = AuthorityId; } @@ -514,11 +560,11 @@ impl pallet_session::OneSessionHandler for Module { // // an optional size hint as to how many VRF outputs there were may be provided. fn compute_randomness( - last_epoch_randomness: [u8; RANDOMNESS_LENGTH], + last_epoch_randomness: schnorrkel::Randomness, epoch_index: u64, - rho: impl Iterator, + rho: impl Iterator, rho_size_hint: Option, -) -> [u8; RANDOMNESS_LENGTH] { +) -> schnorrkel::Randomness { let mut s = Vec::with_capacity(40 + rho_size_hint.unwrap_or(0) * VRF_OUTPUT_LENGTH); s.extend_from_slice(&last_epoch_randomness); s.extend_from_slice(&epoch_index.to_le_bytes()); diff --git a/primitives/consensus/sassafras/src/digests.rs b/primitives/consensus/sassafras/src/digests.rs index 6a5f2ab07d89b..030183edd7691 100644 --- a/primitives/consensus/sassafras/src/digests.rs +++ b/primitives/consensus/sassafras/src/digests.rs @@ -82,20 +82,20 @@ pub enum RawPreDigest; -impl PreDigest { +impl RawPreDigest { /// Returns the slot number of the pre digest. pub fn authority_index(&self) -> AuthorityIndex { match self { - PreDigest::Primary(p) => p.authority_index, - PreDigest::Secondary(s) => s.authority_index, + RawPreDigest::Primary(p) => p.authority_index, + RawPreDigest::Secondary(s) => s.authority_index, } } /// Returns the slot number of the pre digest. pub fn slot_number(&self) -> SlotNumber { match self { - PreDigest::Primary(p) => p.slot_number, - PreDigest::Secondary(s) => s.slot_number, + RawPreDigest::Primary(p) => p.slot_number, + RawPreDigest::Secondary(s) => s.slot_number, } } @@ -103,8 +103,8 @@ impl PreDigest { /// of the chain. pub fn added_weight(&self) -> super::SassafrasBlockWeight { match self { - PreDigest::Primary(_) => 1, - PreDigest::Secondary(_) => 0, + RawPreDigest::Primary(_) => 1, + RawPreDigest::Secondary(_) => 0, } } } diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index 0f3e1d6d29a58..31bcfde37d77f 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -24,8 +24,8 @@ pub mod digests; pub mod inherents; pub use sp_consensus_vrf::schnorrkel::{ - VRF_PROOF_LENGTH, VRF_OUTPUT_LENGTH, RawVRFOutput, VRFOutput, - RawVRFProof, VRFProof, Randomness, + VRF_PROOF_LENGTH, VRF_OUTPUT_LENGTH, RANDOMNESS_LENGTH, + RawVRFOutput, VRFOutput, RawVRFProof, VRFProof, Randomness, }; use sp_std::vec::Vec; From 60e785d88161b6977a1f584c3bfbb8f4e8e22b06 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Sat, 11 Apr 2020 17:21:18 +0200 Subject: [PATCH 68/75] Fix sc-consensus-sassafras compile --- primitives/consensus/sassafras/src/digests.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/primitives/consensus/sassafras/src/digests.rs b/primitives/consensus/sassafras/src/digests.rs index 030183edd7691..dc6186c92909a 100644 --- a/primitives/consensus/sassafras/src/digests.rs +++ b/primitives/consensus/sassafras/src/digests.rs @@ -66,6 +66,10 @@ pub struct RawSecondaryPreDigest { pub commitments: Vec, } +#[cfg(feature = "std")] +/// Sassafras secondary slot assignment pre-digest for std environment. +pub type SecondaryPreDigest = RawSecondaryPreDigest; + /// A Sassafras pre-digest. The validator pre-commit a VRF proof at `vrf_index`, and now reveal it /// as `vrf_output`. /// From b21daea958f1fd3e62f8ed9c79035c5f36f07e51 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Sat, 11 Apr 2020 18:58:42 +0200 Subject: [PATCH 69/75] Fix interface update for node template --- Cargo.lock | 13 +- bin/sassafras-template/node/Cargo.toml | 4 +- bin/sassafras-template/node/build.rs | 8 +- bin/sassafras-template/node/src/chain_spec.rs | 155 ++++++++---------- bin/sassafras-template/node/src/command.rs | 75 ++++++--- bin/sassafras-template/node/src/main.rs | 13 +- bin/sassafras-template/node/src/service.rs | 50 +++--- bin/sassafras-template/runtime/src/lib.rs | 15 +- client/consensus/sassafras/Cargo.toml | 1 + client/consensus/sassafras/src/authorship.rs | 3 +- .../sassafras/src/communication/mod.rs | 3 +- client/consensus/sassafras/src/lib.rs | 5 +- frame/sassafras/src/lib.rs | 8 +- primitives/consensus/sassafras/src/lib.rs | 16 +- 14 files changed, 180 insertions(+), 189 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 28db4e5f0997f..7cf49734b63a2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5822,6 +5822,7 @@ dependencies = [ "sc-basic-authorship", "sc-cli", "sc-client", + "sc-client-api", "sc-consensus-sassafras", "sc-executor", "sc-finality-grandpa", @@ -5837,7 +5838,6 @@ dependencies = [ "sp-transaction-pool", "structopt", "substrate-build-script-utils", - "vergen", ] [[package]] @@ -6328,6 +6328,7 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-consensus-sassafras", + "sp-consensus-vrf", "sp-core", "sp-inherents", "sp-io", @@ -9148,16 +9149,6 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05c78687fb1a80548ae3250346c3db86a80a7cdd77bda190189f2d0a0987c81a" -[[package]] -name = "vergen" -version = "3.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ce50d8996df1f85af15f2cd8d33daae6e479575123ef4314a51a70a230739cb" -dependencies = [ - "bitflags", - "chrono", -] - [[package]] name = "version_check" version = "0.9.1" diff --git a/bin/sassafras-template/node/Cargo.toml b/bin/sassafras-template/node/Cargo.toml index daa0b5e967e28..d0d7c73eb788e 100644 --- a/bin/sassafras-template/node/Cargo.toml +++ b/bin/sassafras-template/node/Cargo.toml @@ -26,6 +26,7 @@ sc-consensus-sassafras = { path = "../../../client/consensus/sassafras" } sp-consensus-sassafras = { path = "../../../primitives/consensus/sassafras" } sp-consensus = { path = "../../../primitives/consensus/common" } sc-client = { path = "../../../client/" } +sc-client-api = { version = "2.0.0-alpha.5", path = "../../../client/api" } sp-runtime = { path = "../../../primitives/runtime" } sc-basic-authorship = { path = "../../../client/basic-authorship" } grandpa = { package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } @@ -33,5 +34,4 @@ grandpa-primitives = { package = "sp-finality-grandpa", path = "../../../primiti sassafras-template-runtime = { path = "../runtime" } [build-dependencies] -vergen = "3.0.4" -build-script-utils = { package = "substrate-build-script-utils", path = "../../../utils/build-script-utils" } +substrate-build-script-utils = { version = "2.0.0-alpha.5", path = "../../../utils/build-script-utils" } diff --git a/bin/sassafras-template/node/build.rs b/bin/sassafras-template/node/build.rs index 222cbb409285b..e3bfe3116bf28 100644 --- a/bin/sassafras-template/node/build.rs +++ b/bin/sassafras-template/node/build.rs @@ -1,9 +1,7 @@ -use vergen::{ConstantsFlags, generate_cargo_keys}; - -const ERROR_MSG: &str = "Failed to generate metadata files"; +use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; fn main() { - generate_cargo_keys(ConstantsFlags::SHA_SHORT).expect(ERROR_MSG); + generate_cargo_keys(); - build_script_utils::rerun_if_git_head_changed(); + rerun_if_git_head_changed(); } diff --git a/bin/sassafras-template/node/src/chain_spec.rs b/bin/sassafras-template/node/src/chain_spec.rs index abc722fba5724..f0082e38b44b5 100644 --- a/bin/sassafras-template/node/src/chain_spec.rs +++ b/bin/sassafras-template/node/src/chain_spec.rs @@ -5,25 +5,14 @@ use sassafras_template_runtime::{ }; use grandpa_primitives::AuthorityId as GrandpaId; use sp_consensus_sassafras::AuthorityId as SassafrasId; -use sc_service; use sp_runtime::traits::{Verify, IdentifyAccount}; +use sc_service::ChainType; // Note this is the URL for the telemetry server //const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; /// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type. -pub type ChainSpec = sc_service::ChainSpec; - -/// The chain specification option. This is expected to come in from the CLI and -/// is little more than one of a number of alternatives which can easily be converted -/// from a string (`--chain=...`) into a `ChainSpec`. -#[derive(Clone, Debug)] -pub enum Alternative { - /// Whatever the current runtime is, with just Alice as an auth. - Development, - /// Whatever the current runtime is, with simple Alice/Bob auths. - LocalTestnet, -} +pub type ChainSpec = sc_service::GenericChainSpec; /// Helper function to generate a crypto pair from seed pub fn get_from_seed(seed: &str) -> ::Public { @@ -41,78 +30,73 @@ pub fn get_account_id_from_seed(seed: &str) -> AccountId where AccountPublic::from(get_from_seed::(seed)).into_account() } -/// Helper function to generate an authority key for Sassafras and Grandpa. -pub fn get_authority_keys_from_seed(s: &str) -> (SassafrasId, GrandpaId) { - (get_from_seed::(s), get_from_seed::(s)) +/// Helper function to generate an authority key for Aura +pub fn authority_keys_from_seed(s: &str) -> (SassafrasId, GrandpaId) { + ( + get_from_seed::(s), + get_from_seed::(s), + ) } -impl Alternative { - /// Get an actual chain config from one of the alternatives. - pub(crate) fn load(self) -> Result { - Ok(match self { - Alternative::Development => ChainSpec::from_genesis( - "Development", - "dev", - || testnet_genesis( - vec![ - get_authority_keys_from_seed("Alice"), - ], - get_account_id_from_seed::("Alice"), - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - ], - true, - ), - vec![], - None, - None, - None, - None - ), - Alternative::LocalTestnet => ChainSpec::from_genesis( - "Local Testnet", - "local_testnet", - || testnet_genesis( - vec![ - get_authority_keys_from_seed("Alice"), - get_authority_keys_from_seed("Bob"), - ], - get_account_id_from_seed::("Alice"), - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - true, - ), - vec![], - None, - None, - None, - None - ), - }) - } +pub fn development_config() -> ChainSpec { + ChainSpec::from_genesis( + "Development", + "dev", + ChainType::Development, + || testnet_genesis( + vec![ + authority_keys_from_seed("Alice"), + ], + get_account_id_from_seed::("Alice"), + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + ], + true, + ), + vec![], + None, + None, + None, + None, + ) +} - pub(crate) fn from(s: &str) -> Option { - match s { - "dev" => Some(Alternative::Development), - "" | "local" => Some(Alternative::LocalTestnet), - _ => None, - } - } +pub fn local_testnet_config() -> ChainSpec { + ChainSpec::from_genesis( + "Local Testnet", + "local_testnet", + ChainType::Local, + || testnet_genesis( + vec![ + authority_keys_from_seed("Alice"), + authority_keys_from_seed("Bob"), + ], + get_account_id_from_seed::("Alice"), + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + true, + ), + vec![], + None, + None, + None, + None, + ) } fn testnet_genesis(initial_authorities: Vec<(SassafrasId, GrandpaId)>, @@ -138,10 +122,3 @@ fn testnet_genesis(initial_authorities: Vec<(SassafrasId, GrandpaId)>, }), } } - -pub fn load_spec(id: &str) -> Result, String> { - Ok(match Alternative::from(id) { - Some(spec) => Some(spec.load()?), - None => None, - }) -} diff --git a/bin/sassafras-template/node/src/command.rs b/bin/sassafras-template/node/src/command.rs index 012c6389c3098..b443f80f0f60b 100644 --- a/bin/sassafras-template/node/src/command.rs +++ b/bin/sassafras-template/node/src/command.rs @@ -14,35 +14,64 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use sc_cli::VersionInfo; -use crate::service; +use std::sync::Arc; use crate::chain_spec; use crate::cli::Cli; +use crate::service; +use sc_cli::SubstrateCli; -/// Parse and run command line arguments -pub fn run(version: VersionInfo) -> sc_cli::Result<()> { - let opt = sc_cli::from_args::(&version); +impl SubstrateCli for Cli { + fn impl_name() -> &'static str { + "Substrate Node" + } + + fn impl_version() -> &'static str { + env!("SUBSTRATE_CLI_IMPL_VERSION") + } + + fn description() -> &'static str { + env!("CARGO_PKG_DESCRIPTION") + } + + fn author() -> &'static str { + env!("CARGO_PKG_AUTHORS") + } - let mut config = sc_service::Configuration::from_version(&version); + fn support_url() -> &'static str { + "support.anonymous.an" + } + + fn copyright_start_year() -> i32 { + 2017 + } + + fn executable_name() -> &'static str { + env!("CARGO_PKG_NAME") + } + + fn load_spec(&self, id: &str) -> Result, String> { + Ok(match id { + "dev" => Box::new(chain_spec::development_config()), + "" | "local" => Box::new(chain_spec::local_testnet_config()), + path => Box::new(chain_spec::ChainSpec::from_json_file( + std::path::PathBuf::from(path), + )?), + }) + } +} + +/// Parse and run command line arguments +pub fn run() -> sc_cli::Result<()> { + let cli = Cli::from_args(); - match opt.subcommand { + match &cli.subcommand { Some(subcommand) => { - subcommand.init(&version)?; - subcommand.update_config(&mut config, chain_spec::load_spec, &version)?; - subcommand.run( - config, - |config: _| Ok(new_full_start!(config).0), - ) - }, + let runner = cli.create_runner(subcommand)?; + runner.run_subcommand(subcommand, |config| Ok(new_full_start!(config).0)) + } None => { - opt.run.init(&version)?; - opt.run.update_config(&mut config, chain_spec::load_spec, &version)?; - opt.run.run( - config, - service::new_light, - service::new_full, - &version, - ) - }, + let runner = cli.create_runner(&cli.run)?; + runner.run_node(service::new_light, service::new_full) + } } } diff --git a/bin/sassafras-template/node/src/main.rs b/bin/sassafras-template/node/src/main.rs index 91b2c257e0cd7..369e6932a0308 100644 --- a/bin/sassafras-template/node/src/main.rs +++ b/bin/sassafras-template/node/src/main.rs @@ -8,16 +8,5 @@ mod cli; mod command; fn main() -> sc_cli::Result<()> { - let version = sc_cli::VersionInfo { - name: "Substrate Node", - commit: env!("VERGEN_SHA_SHORT"), - version: env!("CARGO_PKG_VERSION"), - executable_name: "node-template", - author: "Anonymous", - description: "Template Node", - support_url: "support.anonymous.an", - copyright_start_year: 2017, - }; - - command::run(version) + command::run() } diff --git a/bin/sassafras-template/node/src/service.rs b/bin/sassafras-template/node/src/service.rs index e31b40f10307e..e627a7b367ee3 100644 --- a/bin/sassafras-template/node/src/service.rs +++ b/bin/sassafras-template/node/src/service.rs @@ -2,8 +2,9 @@ use std::sync::Arc; use std::time::Duration; -use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}; +use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider, StorageAndProofProvider}; use sc_client::LongestChain; +use sc_client_api::ExecutorProvider; use sassafras_template_runtime::{self, GenesisConfig, opaque::Block, RuntimeApi}; use sc_service::{error::{Error as ServiceError}, AbstractService, Configuration, ServiceBuilder}; use sp_inherents::InherentDataProviders; @@ -40,11 +41,8 @@ macro_rules! new_full_start { .with_import_queue(|_config, client, mut select_chain, _transaction_pool| { let select_chain = select_chain.take() .ok_or_else(|| sc_service::Error::SelectChainRequired)?; - let (grandpa_block_import, grandpa_link) = grandpa::block_import( - client.clone(), - &*client, - select_chain, - )?; + let (grandpa_block_import, grandpa_link) = + grandpa::block_import(client.clone(), &(client.clone() as Arc<_>), select_chain)?; let justification_import = grandpa_block_import.clone(); let (block_import, sassafras_link) = sc_consensus_sassafras::block_import( @@ -73,19 +71,14 @@ macro_rules! new_full_start { } /// Builds a new service for a full client. -pub fn new_full(config: Configuration) +pub fn new_full(config: Configuration) -> Result { - let is_authority = config.roles.is_authority(); + let is_authority = config.role.is_authority(); let force_authoring = config.force_authoring; - let name = config.name.clone(); + let name = config.network.node_name.clone(); let disable_grandpa = config.disable_grandpa; - // sentry nodes announce themselves as authorities to the network - // and should run the same protocols authorities do, but it should - // never actively participate in any consensus process. - let participates_in_consensus = is_authority && !config.sentry_mode; - let (builder, mut import_setup, inherent_data_providers) = new_full_start!(config); let (block_import, grandpa_link, sassafras_link) = @@ -93,12 +86,14 @@ pub fn new_full(config: Configuration) .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); let service = builder - .with_finality_proof_provider(|client, backend| - Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, client)) as _) - )? + .with_finality_proof_provider(|client, backend| { + // GenesisAuthoritySetProvider is implemented for StorageAndProofProvider + let provider = client as Arc>; + Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, provider)) as _) + })? .build()?; - if participates_in_consensus { + if is_authority { let proposer = sc_basic_authorship::ProposerFactory::new( service.client(), service.transaction_pool() @@ -131,7 +126,7 @@ pub fn new_full(config: Configuration) // if the node isn't actively participating in consensus then it doesn't // need a keystore, regardless of which protocol we use below. - let keystore = if participates_in_consensus { + let keystore = if is_authority { Some(service.keystore()) } else { None @@ -160,9 +155,9 @@ pub fn new_full(config: Configuration) link: grandpa_link, network: service.network(), inherent_data_providers: inherent_data_providers.clone(), - on_exit: service.on_exit(), telemetry_on_connect: Some(service.telemetry_on_connect_stream()), voting_rule: grandpa::VotingRulesBuilder::default().build(), + prometheus_registry: service.prometheus_registry() }; // the GRANDPA voter task is considered infallible, i.e. @@ -183,7 +178,7 @@ pub fn new_full(config: Configuration) } /// Builds a new service for a light client. -pub fn new_light(config: Configuration) +pub fn new_light(config: Configuration) -> Result { let inherent_data_providers = InherentDataProviders::new(); @@ -207,7 +202,10 @@ pub fn new_light(config: Configuration) .map(|fetcher| fetcher.checker().clone()) .ok_or_else(|| "Trying to start light import queue without active fetch checker")?; let grandpa_block_import = grandpa::light_block_import( - client.clone(), backend, &*client.clone(), Arc::new(fetch_checker), + client.clone(), + backend, + &(client.clone() as Arc<_>), + Arc::new(fetch_checker), )?; let finality_proof_import = grandpa_block_import.clone(); @@ -233,8 +231,10 @@ pub fn new_light(config: Configuration) Ok((import_queue, finality_proof_request_builder)) })? - .with_finality_proof_provider(|client, backend| - Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, client)) as _) - )? + .with_finality_proof_provider(|client, backend| { + // GenesisAuthoritySetProvider is implemented for StorageAndProofProvider + let provider = client as Arc>; + Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, provider)) as _) + })? .build() } diff --git a/bin/sassafras-template/runtime/src/lib.rs b/bin/sassafras-template/runtime/src/lib.rs index 85ed233057c46..0d93cbe3fc697 100644 --- a/bin/sassafras-template/runtime/src/lib.rs +++ b/bin/sassafras-template/runtime/src/lib.rs @@ -11,8 +11,8 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); use sp_std::prelude::*; use sp_core::OpaqueMetadata; use sp_runtime::{ - ApplyExtrinsicResult, transaction_validity::TransactionValidity, generic, create_runtime_str, - impl_opaque_keys, MultiSignature + ApplyExtrinsicResult, generic, create_runtime_str, impl_opaque_keys, MultiSignature, + transaction_validity::{TransactionValidity, TransactionSource}, }; use sp_runtime::traits::{ BlakeTwo256, Block as BlockT, IdentityLookup, Verify, ConvertInto, IdentifyAccount @@ -294,10 +294,6 @@ impl_runtime_apis! { Executive::apply_extrinsic(extrinsic) } - fn apply_trusted_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { - Executive::apply_trusted_extrinsic(extrinsic) - } - fn finalize_block() -> ::Header { Executive::finalize_block() } @@ -319,8 +315,11 @@ impl_runtime_apis! { } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction(tx: ::Extrinsic) -> TransactionValidity { - Executive::validate_transaction(tx) + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx) } } diff --git a/client/consensus/sassafras/Cargo.toml b/client/consensus/sassafras/Cargo.toml index 5cece862c530f..7221936fbb68f 100644 --- a/client/consensus/sassafras/Cargo.toml +++ b/client/consensus/sassafras/Cargo.toml @@ -27,6 +27,7 @@ sp-api = { version = "2.0.0-alpha.2", path = "../../../primitives/api" } sp-block-builder = { version = "2.0.0-alpha.2", path = "../../../primitives/block-builder" } sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } +sp-consensus-vrf = { version = "0.8.0-alpha.5", path = "../../../primitives/consensus/vrf" } sc-consensus-uncles = { version = "0.8.0-alpha.2", path = "../uncles" } sc-consensus-slots = { version = "0.8.0-alpha.2", path = "../slots" } sc-network = { version = "0.8.0-alpha.2", path = "../../network" } diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index aacd66e9ece7f..02dbbd401ddb2 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -26,9 +26,10 @@ use sp_runtime::traits::Block as BlockT; use sp_consensus_sassafras::{ SlotNumber, AuthorityPair, SassafrasConfiguration, AuthorityId, SassafrasAuthorityWeight, SASSAFRAS_ENGINE_ID, - VRFProof, SASSAFRAS_TICKET_VRF_PREFIX, VRFOutput, + SASSAFRAS_TICKET_VRF_PREFIX, digests::{PreDigest, PrimaryPreDigest, SecondaryPreDigest}, }; +use sp_consensus_vrf::schnorrkel::{VRFProof, VRFOutput}; use sc_consensus_epochs::ViableEpochDescriptor; use sc_keystore::KeyStorePtr; use log::trace; diff --git a/client/consensus/sassafras/src/communication/mod.rs b/client/consensus/sassafras/src/communication/mod.rs index 350270b596cb9..bc27c397eff08 100644 --- a/client/consensus/sassafras/src/communication/mod.rs +++ b/client/consensus/sassafras/src/communication/mod.rs @@ -3,7 +3,8 @@ mod network; use aead::Aead; use codec::{Encode, Decode}; use futures::channel::mpsc::{UnboundedSender, UnboundedReceiver}; -use sp_consensus_sassafras::{SlotNumber, VRFProof, AuthorityId, AuthorityPair}; +use sp_consensus_sassafras::{SlotNumber, AuthorityId, AuthorityPair}; +use sp_consensus_vrf::schnorrkel::VRFProof; use sc_keystore::KeyStorePtr; use crate::PublishingSet; diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 2a91156f7f34d..2aabd7f6d7dfa 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -18,13 +18,14 @@ pub use sp_consensus_sassafras::{ SassafrasApi, ConsensusLog, SASSAFRAS_ENGINE_ID, SlotNumber, SassafrasConfiguration, - AuthorityId, AuthorityPair, AuthoritySignature, VRFOutput, - SassafrasAuthorityWeight, VRF_OUTPUT_LENGTH, VRFProof, Randomness, + AuthorityId, AuthorityPair, AuthoritySignature, + SassafrasAuthorityWeight, VRF_OUTPUT_LENGTH, Randomness, digests::{ PreDigest, CompatibleDigestItem, NextEpochDescriptor, PostBlockDescriptor, PrimaryPreDigest, SecondaryPreDigest, }, }; +pub use sp_consensus_vrf::schnorrkel::{VRFProof, VRFOutput}; pub use sp_consensus::SyncOracle; use std::{ diff --git a/frame/sassafras/src/lib.rs b/frame/sassafras/src/lib.rs index 739140cfe7385..83c15f12af896 100644 --- a/frame/sassafras/src/lib.rs +++ b/frame/sassafras/src/lib.rs @@ -38,7 +38,7 @@ use sp_staking::{ use codec::{Encode, Decode}; use sp_inherents::{InherentIdentifier, InherentData, ProvideInherent, MakeFatalError}; use sp_consensus_sassafras::{ - SASSAFRAS_ENGINE_ID, ConsensusLog, SassafrasAuthorityWeight, SlotNumber, + SASSAFRAS_ENGINE_ID, RawConsensusLog, SassafrasAuthorityWeight, SlotNumber, inherents::{INHERENT_IDENTIFIER, SassafrasInherentData}, digests::{NextEpochDescriptor, RawPreDigest}, }; @@ -385,7 +385,7 @@ impl Module { randomness: next_randomness, }; - Self::deposit_consensus(ConsensusLog::NextEpochData(next)) + Self::deposit_consensus(RawConsensusLog::::NextEpochData(next)) } // finds the start slot of the current epoch. only guaranteed to @@ -450,7 +450,7 @@ impl Module { randomness: Self::randomness(), }; - Self::deposit_consensus(ConsensusLog::NextEpochData(next)) + Self::deposit_consensus(RawConsensusLog::::NextEpochData(next)) } // the slot number of the current block being initialized @@ -551,7 +551,7 @@ impl pallet_session::OneSessionHandler for Module { } fn on_disabled(i: usize) { - Self::deposit_consensus(ConsensusLog::OnDisabled(i as u32)) + Self::deposit_consensus(RawConsensusLog::::OnDisabled(i as u32)) } } diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index 31bcfde37d77f..5dcd5ac3a94cb 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -24,12 +24,12 @@ pub mod digests; pub mod inherents; pub use sp_consensus_vrf::schnorrkel::{ - VRF_PROOF_LENGTH, VRF_OUTPUT_LENGTH, RANDOMNESS_LENGTH, - RawVRFOutput, VRFOutput, RawVRFProof, VRFProof, Randomness, + Randomness, VRF_PROOF_LENGTH, VRF_OUTPUT_LENGTH, RANDOMNESS_LENGTH, }; use sp_std::vec::Vec; use sp_runtime::{ConsensusEngineId, RuntimeDebug}; +use sp_consensus_vrf::schnorrkel; use codec::{Encode, Decode}; mod app { @@ -79,19 +79,23 @@ pub type SassafrasAuthorityWeight = u64; /// The weight of a Sassafras block. pub type SassafrasBlockWeight = u32; -/// An consensus log item for Sassafras. +/// A consensus log item for Sassafras. #[derive(Decode, Encode, Clone, RuntimeDebug)] -pub enum ConsensusLog { +pub enum RawConsensusLog { /// The epoch has changed. NextEpochData(digests::NextEpochDescriptor), /// Commitments to be included in the current block. - PostBlockData(digests::PostBlockDescriptor), + PostBlockData(digests::RawPostBlockDescriptor), /// Disable the authority with given index. OnDisabled(AuthorityIndex), } +/// A consensus log item suitable for std environment. +#[cfg(feature = "std")] +pub type ConsensusLog = RawConsensusLog; + /// Configuration data used by the Sassafras consensus engine. -#[derive(Clone, Encode, Decode, RuntimeDebug)] +#[derive(Clone, Encode, Decode, RuntimeDebug, PartialEq, Eq)] pub struct SassafrasConfiguration { /// The slot duration in milliseconds for Sassafras. pub slot_duration: u64, From dd7be378d9c7db6a10d09c0d6045b6d4d914d538 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Sun, 12 Apr 2020 19:11:01 +0200 Subject: [PATCH 70/75] Add communication traces --- .../sassafras/src/communication/mod.rs | 73 +++++++++++++++++-- 1 file changed, 66 insertions(+), 7 deletions(-) diff --git a/client/consensus/sassafras/src/communication/mod.rs b/client/consensus/sassafras/src/communication/mod.rs index bc27c397eff08..dea0cd704ad0c 100644 --- a/client/consensus/sassafras/src/communication/mod.rs +++ b/client/consensus/sassafras/src/communication/mod.rs @@ -6,12 +6,21 @@ use futures::channel::mpsc::{UnboundedSender, UnboundedReceiver}; use sp_consensus_sassafras::{SlotNumber, AuthorityId, AuthorityPair}; use sp_consensus_vrf::schnorrkel::VRFProof; use sc_keystore::KeyStorePtr; +use log::trace; use crate::PublishingSet; pub use self::network::{ SASSAFRAS_ENGINE_ID, SASSAFRAS_PROTOCOL_NAME, GossipValidator, NetworkBridge, }; +mod cost { + use sc_network::ReputationChange as Rep; +} + +mod benefit { + use sc_network::ReputationChange as Rep; +} + pub fn send_out( sender: &UnboundedSender<(AuthorityId, [u8; 32], Vec)>, set: &mut PublishingSet, @@ -29,7 +38,13 @@ pub fn send_out( let receiver_id = set.authorities[pending.submit_authority_index as usize].0.clone(); let receiver_public = match schnorrkel::PublicKey::from_bytes(receiver_id.as_ref()) { Ok(public) => public, - Err(_) => continue, + Err(_) => { + trace!( + target: "sassafras_communication", + "Sending out a pending message, but receiver id decoding failed, ignoring", + ); + continue + }, }; let (ephemeral_key, aead) = receiver_public .init_aead32_unauthenticated::(); @@ -38,14 +53,30 @@ pub fn send_out( &pending.vrf_proof.encode()[..], ) { Ok(encrypted) => encrypted, - Err(_) => continue, + Err(_) => { + trace!( + target: "sassafras_communication", + "Sending out a pending message, but encrypting it failed, ignoring", + ); + continue + }, }; match sender.unbounded_send((receiver_id, ephemeral_key.to_bytes(), encrypted)) { Ok(()) => { + trace!( + target: "sassafras_communication", + "Successfully sent out a pending message.", + ); pending.submit_status = Some(slot_number); }, - Err(_) => break, + Err(_) => { + trace!( + target: "sassafras_communication", + "Sending out a pending message, but the channel signaled failure, breaking", + ); + break + }, } } } @@ -61,13 +92,25 @@ pub fn receive_in( while let Ok(Some((receiver_id, ephemeral_key, encrypted))) = receiver.try_next() { let receiver_pair = match keystore.key_pair::(&receiver_id) { Ok(pair) => pair, - Err(_) => continue, + Err(_) => { + trace!( + target: "sassafras_communication", + "Received an encypted message, but the key pair cannot be found, ignoring." + ); + continue + }, }; let pair = crate::authorship::get_keypair(&receiver_pair); let aead = pair.secret.aead32_unauthenticated::( &match schnorrkel::PublicKey::from_bytes(&ephemeral_key) { Ok(key) => key, - Err(_) => continue, + Err(_) => { + trace!( + target: "sassafras_communication", + "Received an encrypted message, but the public key decoding failed, ignoring." + ); + continue + }, } ); let decrypted = match aead.decrypt( @@ -75,13 +118,29 @@ pub fn receive_in( &encrypted[..], ) { Ok(decrypted) => decrypted, - Err(_) => continue, + Err(_) => { + trace!( + target: "sassafras_communication", + "Received an ecrypted message, but decrypting it failed, ignoring." + ); + continue + }, }; let proof = match VRFProof::decode(&mut &decrypted[..]) { Ok(proof) => proof, - Err(_) => continue, + Err(_) => { + trace!( + target: "sassafras_communication", + "Received an encrypted message, but the proof decoding failed, ignoring." + ); + continue + }, }; + trace!( + target: "sassafras_communication", + "Received an encrypted message and decoded it as proof {:?}", proof + ); set.disclosing.push(proof); } } From 80b09a12d8eb9cf3676fca3e1d35d7e9c56296b9 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Wed, 15 Apr 2020 13:53:55 +0200 Subject: [PATCH 71/75] Short circuit sending proofs if the proof is already published --- Cargo.lock | 2 +- client/consensus/sassafras/src/communication/mod.rs | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index fd9108035318b..23cf541658d24 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6306,7 +6306,7 @@ dependencies = [ "num-rational", "num-traits 0.2.11", "parity-scale-codec", - "parking_lot 0.10.0", + "parking_lot 0.10.2", "pdqselect", "rand 0.7.3", "sc-block-builder", diff --git a/client/consensus/sassafras/src/communication/mod.rs b/client/consensus/sassafras/src/communication/mod.rs index dea0cd704ad0c..18ea9e99d46fc 100644 --- a/client/consensus/sassafras/src/communication/mod.rs +++ b/client/consensus/sassafras/src/communication/mod.rs @@ -35,6 +35,11 @@ pub fn send_out( } if pending.submit_status.is_none() { + if set.proofs.contains(&pending.vrf_proof) { + pending.submit_status = Some(slot_number); + continue + } + let receiver_id = set.authorities[pending.submit_authority_index as usize].0.clone(); let receiver_public = match schnorrkel::PublicKey::from_bytes(receiver_id.as_ref()) { Ok(public) => public, From 799ec5a6b98fafa1fbce3e9febc8e203270da799 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Wed, 15 Apr 2020 13:56:08 +0200 Subject: [PATCH 72/75] Skip adding proof to disclosing set if it's already published --- client/consensus/sassafras/src/communication/mod.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/client/consensus/sassafras/src/communication/mod.rs b/client/consensus/sassafras/src/communication/mod.rs index 18ea9e99d46fc..a77748aa514e7 100644 --- a/client/consensus/sassafras/src/communication/mod.rs +++ b/client/consensus/sassafras/src/communication/mod.rs @@ -35,7 +35,9 @@ pub fn send_out( } if pending.submit_status.is_none() { - if set.proofs.contains(&pending.vrf_proof) { + if set.proofs.contains(&pending.vrf_proof) || + set.disclosing.contains(&pending.vrf_proof) + { pending.submit_status = Some(slot_number); continue } @@ -142,6 +144,10 @@ pub fn receive_in( }, }; + if set.proofs.contains(&proof) || set.disclosing.contains(&proof) { + continue + } + trace!( target: "sassafras_communication", "Received an encrypted message and decoded it as proof {:?}", proof From bcc1ef1e1b75a2c51b0f329409a679ecc3c11ec9 Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Wed, 15 Apr 2020 14:14:41 +0200 Subject: [PATCH 73/75] Add description of how generating, publishing and validating set works --- client/consensus/sassafras/src/lib.rs | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 2aabd7f6d7dfa..4947400270ad4 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -15,6 +15,32 @@ // along with Substrate. If not, see . //! # Sassafras +//! +//! Sassafras consensus block production mechanism. +//! +//! ## Epoch procedure +//! +//! Each epoch maintains three different set of validators. The generating set, publishing set, and +//! validating set. +//! +//! The process starts with generating set `GeneratingSet`. Each validator, locally, computes a set +//! of `PendingProof`s via the `GeneratingSet::append_to_pending` function. No network communication +//! or block processing happens with this generating set. +//! +//! In the second epoch, this `GeneratingSet` is converted into a publishing set `PublishingSet`. It +//! consists of `pending` (the local `PendingProof` that were copied from `GeneratingSet`), +//! `disclosing` (VRF proofs that the validator is planning to disclose, but have not yet made it on +//! to blocks) and `proofs` (VRF proofs that made onto blocks). The validator first scans its +//! `pending`, figuring out those proofs that has not yet been disclosed, encrypt that proof via +//! another validator's public key, and transmit it via gossip. The other validator then decrypt the +//! proof and adds it into `disclosing`. Finally, the validator scans its `disclosing`, and for +//! those proofs that are not yet published, it pushes them as commitments onto blocks, during +//! authorship. When a proof is on block, it's pushed into `proofs`. +//! +//! In the thrid epoch, the `PublishingSet` is converted into a validating set +//! `ValidatingSet`. `proofs` are sorted using an inside-out order, and then associated with slot +//! numbers. Upon a given slot number, a validator uses the VRF output in `pending` to finally +//! construct the block, if a slot number matches. pub use sp_consensus_sassafras::{ SassafrasApi, ConsensusLog, SASSAFRAS_ENGINE_ID, SlotNumber, SassafrasConfiguration, From 4be9c2f6264a52d43a52763680cbe71b6b04b59d Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Fri, 24 Apr 2020 12:12:24 +0200 Subject: [PATCH 74/75] Should use VRFOutput for sortition but not VRFProof --- client/consensus/sassafras/src/authorship.rs | 18 ++++---- .../sassafras/src/communication/mod.rs | 18 ++++---- client/consensus/sassafras/src/lib.rs | 42 +++++++++---------- .../consensus/sassafras/src/verification.rs | 8 ++-- primitives/consensus/sassafras/src/digests.rs | 18 ++++---- primitives/consensus/sassafras/src/lib.rs | 6 +-- primitives/consensus/vrf/src/schnorrkel.rs | 14 +++++++ 7 files changed, 70 insertions(+), 54 deletions(-) diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index 02dbbd401ddb2..bcc4922d5544e 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -101,13 +101,15 @@ fn claim_primary_slot( epoch: &Epoch, keystore: &KeyStorePtr, ) -> Option<(PreDigest, AuthorityPair)> { - let ticket_vrf_index = epoch.validating.proofs.iter().position(|(s, _)| *s == slot_number)? as u32; - let ticket_vrf_proof = epoch.validating.proofs[ticket_vrf_index as usize].clone().1; + let ticket_vrf_index = epoch.validating.outputs + .iter() + .position(|(s, _)| *s == slot_number)? as u32; + let ticket_vrf_output = epoch.validating.outputs[ticket_vrf_index as usize].clone().1; let pending_index = epoch.validating.pending.iter() - .position(|p| p.vrf_proof == ticket_vrf_proof)?; + .position(|p| p.vrf_output == ticket_vrf_output)?; let ticket_vrf_attempt = epoch.validating.pending[pending_index].attempt; let authority_index = epoch.validating.pending[pending_index].authority_index; - let ticket_vrf_output = epoch.validating.pending[pending_index].vrf_output.clone(); + let ticket_vrf_proof = epoch.validating.pending[pending_index].vrf_proof.clone(); let keystore = keystore.read(); let pair = keystore.key_pair::( @@ -125,7 +127,7 @@ fn claim_primary_slot( let mut commitments = Vec::new(); for disclosing in &epoch.publishing.disclosing { if commitments.len() < MAX_PRE_DIGEST_COMMITMENTS && - epoch.publishing.proofs.iter().position(|p| p == disclosing).is_none() + epoch.publishing.outputs.iter().position(|o| o == disclosing).is_none() { commitments.push(disclosing.clone()); } @@ -133,7 +135,7 @@ fn claim_primary_slot( trace!(target: "sassafras", "Appending commitment length: {}", commitments.len()); let claim = PreDigest::Primary(PrimaryPreDigest { - ticket_vrf_index, ticket_vrf_attempt, ticket_vrf_output, + ticket_vrf_index, ticket_vrf_attempt, ticket_vrf_proof, authority_index, slot_number, post_vrf_proof, post_vrf_output, commitments, }); @@ -229,8 +231,8 @@ fn claim_secondary_slot( if pair.public() == *expected_author { let mut commitments = Vec::new(); for disclosing in &epoch.publishing.disclosing { - if commitments.len() < MAX_PRE_DIGEST_COMMITMENTS && epoch.publishing.proofs.iter() - .position(|p| p == disclosing) + if commitments.len() < MAX_PRE_DIGEST_COMMITMENTS && epoch.publishing.outputs.iter() + .position(|o| o == disclosing) .is_none() { commitments.push(disclosing.clone()); diff --git a/client/consensus/sassafras/src/communication/mod.rs b/client/consensus/sassafras/src/communication/mod.rs index a77748aa514e7..80e4fd809b942 100644 --- a/client/consensus/sassafras/src/communication/mod.rs +++ b/client/consensus/sassafras/src/communication/mod.rs @@ -4,7 +4,7 @@ use aead::Aead; use codec::{Encode, Decode}; use futures::channel::mpsc::{UnboundedSender, UnboundedReceiver}; use sp_consensus_sassafras::{SlotNumber, AuthorityId, AuthorityPair}; -use sp_consensus_vrf::schnorrkel::VRFProof; +use sp_consensus_vrf::schnorrkel::VRFOutput; use sc_keystore::KeyStorePtr; use log::trace; use crate::PublishingSet; @@ -35,8 +35,8 @@ pub fn send_out( } if pending.submit_status.is_none() { - if set.proofs.contains(&pending.vrf_proof) || - set.disclosing.contains(&pending.vrf_proof) + if set.outputs.contains(&pending.vrf_output) || + set.disclosing.contains(&pending.vrf_output) { pending.submit_status = Some(slot_number); continue @@ -57,7 +57,7 @@ pub fn send_out( .init_aead32_unauthenticated::(); let encrypted = match aead.encrypt( &Default::default(), - &pending.vrf_proof.encode()[..], + &pending.vrf_output.encode()[..], ) { Ok(encrypted) => encrypted, Err(_) => { @@ -133,8 +133,8 @@ pub fn receive_in( continue }, }; - let proof = match VRFProof::decode(&mut &decrypted[..]) { - Ok(proof) => proof, + let output = match VRFOutput::decode(&mut &decrypted[..]) { + Ok(output) => output, Err(_) => { trace!( target: "sassafras_communication", @@ -144,14 +144,14 @@ pub fn receive_in( }, }; - if set.proofs.contains(&proof) || set.disclosing.contains(&proof) { + if set.outputs.contains(&output) || set.disclosing.contains(&output) { continue } trace!( target: "sassafras_communication", - "Received an encrypted message and decoded it as proof {:?}", proof + "Received an encrypted message and decoded it as output {:?}", output ); - set.disclosing.push(proof); + set.disclosing.push(output); } } diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 4947400270ad4..9f14e31dd6d55 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -182,10 +182,10 @@ pub struct PublishingSet { pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, /// Randomness for this epoch. pub randomness: Randomness, - /// Proofs of all VRFs collected. - pub proofs: Vec, - /// Disclosing proofs. - pub disclosing: Vec, + /// Outputs of all VRFs collected. + pub outputs: Vec, + /// Disclosing outputs. + pub disclosing: Vec, /// Local pending proofs collected. pub pending: Vec, } @@ -215,8 +215,8 @@ pub struct ValidatingSet { pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, /// Randomness for this epoch. pub randomness: Randomness, - /// Proofs as ordered by slot numbers. - pub proofs: Vec<(SlotNumber, VRFProof)>, + /// Outputs as ordered by slot numbers. + pub outputs: Vec<(SlotNumber, VRFOutput)>, /// Pending local proofs. pub pending: Vec, } @@ -250,8 +250,8 @@ impl EpochT for Epoch { fn increment(&self, descriptor: NextEpochDescriptor) -> Epoch { let start_slot = self.validating.start_slot + self.validating.duration; - let sortition_proofs = utils::sortition( - &self.publishing.proofs, + let sortition_outputs = utils::sortition( + &self.publishing.outputs, self.validating.duration as usize ); @@ -266,7 +266,7 @@ impl EpochT for Epoch { epoch_index: self.generating.epoch_index, authorities: self.generating.authorities.clone(), randomness: self.generating.randomness, - proofs: Vec::new(), + outputs: Vec::new(), pending: self.generating.pending.clone(), disclosing: Vec::new(), }, @@ -276,7 +276,7 @@ impl EpochT for Epoch { epoch_index: self.publishing.epoch_index, authorities: self.publishing.authorities.clone(), randomness: self.publishing.randomness, - proofs: sortition_proofs + outputs: sortition_outputs .into_iter() .enumerate() .map(|(i, p)| (start_slot + i as u64, p)) @@ -400,7 +400,7 @@ impl Config { }, publishing: PublishingSet { epoch_index: 1, - proofs: Vec::new(), + outputs: Vec::new(), authorities: self.genesis_authorities.clone(), randomness: self.randomness.clone(), pending: Vec::new(), @@ -410,7 +410,7 @@ impl Config { start_slot: slot_number, duration: self.epoch_length, epoch_index: 0, - proofs: Vec::new(), + outputs: Vec::new(), authorities: self.genesis_authorities.clone(), randomness: self.randomness.clone(), pending: Vec::new(), @@ -1266,9 +1266,9 @@ impl BlockImport for SassafrasBlockImport BlockImport for SassafrasBlockImport BlockImport for SassafrasBlockImport( signature: AuthoritySignature, epoch: &Epoch, ) -> Result<(), Error> { - let ticket_vrf_proof = epoch.validating.proofs.iter() - .find(|p| p.0 == pre_digest.slot_number) + let ticket_vrf_output = epoch.validating.outputs.iter() + .find(|o| o.0 == pre_digest.slot_number) .ok_or_else(|| Error::ProofNotFound)? .1 .clone(); @@ -160,8 +160,8 @@ fn check_primary_header( schnorrkel::PublicKey::from_bytes(author.as_slice()).and_then(|p| { p.vrf_verify( ticket_transcript, - &pre_digest.ticket_vrf_output, - &ticket_vrf_proof, + &ticket_vrf_output, + &pre_digest.ticket_vrf_proof, ) }).map_err(|s| Error::VRFVerificationFailed(s))? }; diff --git a/primitives/consensus/sassafras/src/digests.rs b/primitives/consensus/sassafras/src/digests.rs index dc6186c92909a..8ea81ee60d319 100644 --- a/primitives/consensus/sassafras/src/digests.rs +++ b/primitives/consensus/sassafras/src/digests.rs @@ -38,7 +38,7 @@ pub struct RawPrimaryPreDigest, + pub commitments: Vec, } #[cfg(feature = "std")] @@ -57,18 +57,18 @@ pub type PrimaryPreDigest = RawPrimaryPreDigest { +pub struct RawSecondaryPreDigest { /// Authority index. pub authority_index: AuthorityIndex, /// Slot number. pub slot_number: SlotNumber, /// Additional commitments posted directly at pre-digest. - pub commitments: Vec, + pub commitments: Vec, } #[cfg(feature = "std")] /// Sassafras secondary slot assignment pre-digest for std environment. -pub type SecondaryPreDigest = RawSecondaryPreDigest; +pub type SecondaryPreDigest = RawSecondaryPreDigest; /// A Sassafras pre-digest. The validator pre-commit a VRF proof at `vrf_index`, and now reveal it /// as `vrf_output`. @@ -79,7 +79,7 @@ pub enum RawPreDigest), /// A secondary deterministic slot assignment. - Secondary(RawSecondaryPreDigest), + Secondary(RawSecondaryPreDigest), } #[cfg(feature = "std")] @@ -130,14 +130,14 @@ pub struct NextEpochDescriptor { /// /// This digest is generated by runtime, optional, and can be included at every block. #[derive(Clone, RuntimeDebug, Encode, Decode)] -pub struct RawPostBlockDescriptor { +pub struct RawPostBlockDescriptor { /// Commitments of tickets. - pub commitments: Vec, + pub commitments: Vec, } #[cfg(feature = "std")] /// Sassafras post-digest suitable for std environment. -pub type PostBlockDescriptor = RawPostBlockDescriptor; +pub type PostBlockDescriptor = RawPostBlockDescriptor; /// A digest item which is usable with Sassafras consensus. #[cfg(feature = "std")] diff --git a/primitives/consensus/sassafras/src/lib.rs b/primitives/consensus/sassafras/src/lib.rs index 5dcd5ac3a94cb..6ba631b058e45 100644 --- a/primitives/consensus/sassafras/src/lib.rs +++ b/primitives/consensus/sassafras/src/lib.rs @@ -81,18 +81,18 @@ pub type SassafrasBlockWeight = u32; /// A consensus log item for Sassafras. #[derive(Decode, Encode, Clone, RuntimeDebug)] -pub enum RawConsensusLog { +pub enum RawConsensusLog { /// The epoch has changed. NextEpochData(digests::NextEpochDescriptor), /// Commitments to be included in the current block. - PostBlockData(digests::RawPostBlockDescriptor), + PostBlockData(digests::RawPostBlockDescriptor), /// Disable the authority with given index. OnDisabled(AuthorityIndex), } /// A consensus log item suitable for std environment. #[cfg(feature = "std")] -pub type ConsensusLog = RawConsensusLog; +pub type ConsensusLog = RawConsensusLog; /// Configuration data used by the Sassafras consensus engine. #[derive(Clone, Encode, Decode, RuntimeDebug, PartialEq, Eq)] diff --git a/primitives/consensus/vrf/src/schnorrkel.rs b/primitives/consensus/vrf/src/schnorrkel.rs index 265572dbdaee7..df7cf5155a146 100644 --- a/primitives/consensus/vrf/src/schnorrkel.rs +++ b/primitives/consensus/vrf/src/schnorrkel.rs @@ -71,6 +71,20 @@ impl DerefMut for VRFOutput { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } +#[cfg(feature = "std")] +impl PartialOrd for VRFOutput { + fn partial_cmp(&self, other: &Self) -> Option { + self.0.as_bytes().partial_cmp(other.0.as_bytes()) + } +} + +#[cfg(feature = "std")] +impl Ord for VRFOutput { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + self.0.as_bytes().cmp(other.0.as_bytes()) + } +} + #[cfg(feature = "std")] impl Encode for VRFOutput { fn encode(&self) -> Vec { From 8d0423523ddff18be4eb25b7ac9cf28ed5e3eadc Mon Sep 17 00:00:00 2001 From: Wei Tang Date: Fri, 24 Apr 2020 12:13:55 +0200 Subject: [PATCH 75/75] Rename PendingProof -> PendingVRF --- client/consensus/sassafras/src/authorship.rs | 4 ++-- client/consensus/sassafras/src/lib.rs | 18 +++++++++--------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/client/consensus/sassafras/src/authorship.rs b/client/consensus/sassafras/src/authorship.rs index bcc4922d5544e..31b6acb52a934 100644 --- a/client/consensus/sassafras/src/authorship.rs +++ b/client/consensus/sassafras/src/authorship.rs @@ -33,7 +33,7 @@ use sp_consensus_vrf::schnorrkel::{VRFProof, VRFOutput}; use sc_consensus_epochs::ViableEpochDescriptor; use sc_keystore::KeyStorePtr; use log::trace; -use super::{Epoch, GeneratingSet, PendingProof}; +use super::{Epoch, GeneratingSet, PendingVRF}; /// Calculates the primary selection threshold for a given authority, taking /// into account `c` (`1 - c` represents the probability of a slot being empty). @@ -183,7 +183,7 @@ impl GeneratingSet { check_primary_threshold(inout, threshold) }) { - self.pending.push(PendingProof::new( + self.pending.push(PendingVRF::new( attempt, authority_index as u32, self.authorities.len() as u32, diff --git a/client/consensus/sassafras/src/lib.rs b/client/consensus/sassafras/src/lib.rs index 9f14e31dd6d55..8fe4466490ccd 100644 --- a/client/consensus/sassafras/src/lib.rs +++ b/client/consensus/sassafras/src/lib.rs @@ -110,9 +110,9 @@ mod authorship; mod utils; mod communication; -/// Information about a local pending proof. +/// Information about a local pending VRF. #[derive(Debug, Clone, Encode, Decode)] -pub struct PendingProof { +pub struct PendingVRF { /// Attempt integer number. pub attempt: u64, /// Validator index. @@ -127,7 +127,7 @@ pub struct PendingProof { pub submit_authority_index: u32, } -impl PendingProof { +impl PendingVRF { /// Create a new pending proof. pub fn new( attempt: u64, @@ -157,8 +157,8 @@ pub struct GeneratingSet { pub authorities: Vec<(AuthorityId, SassafrasAuthorityWeight)>, /// Randomness for this epoch. pub randomness: Randomness, - /// Local pending proofs collected. - pub pending: Vec, + /// Local pending VRFs collected. + pub pending: Vec, } impl GeneratingSet { @@ -186,8 +186,8 @@ pub struct PublishingSet { pub outputs: Vec, /// Disclosing outputs. pub disclosing: Vec, - /// Local pending proofs collected. - pub pending: Vec, + /// Local pending VRFs collected. + pub pending: Vec, } impl PublishingSet { @@ -217,8 +217,8 @@ pub struct ValidatingSet { pub randomness: Randomness, /// Outputs as ordered by slot numbers. pub outputs: Vec<(SlotNumber, VRFOutput)>, - /// Pending local proofs. - pub pending: Vec, + /// Pending local VRFs. + pub pending: Vec, } impl ValidatingSet {