diff --git a/Cargo.lock b/Cargo.lock index 8cec4df576509..7ae553341d4ee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1234,7 +1234,6 @@ name = "polkadot-api" version = "0.1.0" dependencies = [ "error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "log 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", "polkadot-executor 0.1.0", "polkadot-primitives 0.1.0", "polkadot-runtime 0.1.0", @@ -1291,7 +1290,9 @@ name = "polkadot-collator" version = "0.1.0" dependencies = [ "futures 0.1.18 (registry+https://github.com/rust-lang/crates.io-index)", + "polkadot-parachain 0.1.0", "polkadot-primitives 0.1.0", + "substrate-codec 0.1.0", "substrate-primitives 0.1.0", ] @@ -1307,6 +1308,7 @@ dependencies = [ "parking_lot 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)", "polkadot-api 0.1.0", "polkadot-collator 0.1.0", + "polkadot-parachain 0.1.0", "polkadot-primitives 0.1.0", "polkadot-runtime 0.1.0", "polkadot-statement-table 0.1.0", @@ -1355,6 +1357,16 @@ dependencies = [ "tempdir 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)", ] +[[package]] +name = "polkadot-parachain" +version = "0.1.0" +dependencies = [ + "error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "substrate-codec 0.1.0", + "tiny-keccak 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", + "wasmi 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "polkadot-primitives" version = "0.1.0" @@ -1454,17 +1466,6 @@ dependencies = [ "transaction-pool 1.9.0 (registry+https://github.com/rust-lang/crates.io-index)", ] -[[package]] -name = "polkadot-validator" -version = "0.1.0" -dependencies = [ - "error-chain 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", - "polkadot-primitives 0.1.0", - "serde 1.0.27 (registry+https://github.com/rust-lang/crates.io-index)", - "substrate-primitives 0.1.0", - "substrate-serializer 0.1.0", -] - [[package]] name = "pretty_assertions" version = "0.4.1" @@ -1933,9 +1934,6 @@ dependencies = [ [[package]] name = "substrate-codec" version = "0.1.0" -dependencies = [ - "substrate-runtime-std 0.1.0", -] [[package]] name = "substrate-executor" diff --git a/Cargo.toml b/Cargo.toml index 35e719d30781a..ea3dc865ab2e4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,12 +19,13 @@ members = [ "polkadot/consensus", "polkadot/executor", "polkadot/keystore", + "polkadot/parachain", "polkadot/primitives", "polkadot/runtime", "polkadot/statement-table", "polkadot/transaction-pool", - "polkadot/validator", "polkadot/service", + "substrate/bft", "substrate/client", "substrate/client/db", @@ -53,6 +54,7 @@ members = [ "substrate/serializer", "substrate/state-machine", "substrate/test-runtime", + "demo/runtime", "demo/primitives", "demo/executor", diff --git a/demo/runtime/src/lib.rs b/demo/runtime/src/lib.rs index 1f7248f68e4df..86de60abce687 100644 --- a/demo/runtime/src/lib.rs +++ b/demo/runtime/src/lib.rs @@ -76,6 +76,8 @@ impl consensus::Trait for Concrete { pub type Consensus = consensus::Module; impl timestamp::Trait for Concrete { + const SET_POSITION: u32 = 0; + type Value = u64; } diff --git a/demo/runtime/wasm/Cargo.lock b/demo/runtime/wasm/Cargo.lock index 6bf460690fe10..489e1daaaa7e8 100644 --- a/demo/runtime/wasm/Cargo.lock +++ b/demo/runtime/wasm/Cargo.lock @@ -498,9 +498,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "substrate-codec" version = "0.1.0" -dependencies = [ - "substrate-runtime-std 0.1.0", -] [[package]] name = "substrate-keyring" diff --git a/demo/runtime/wasm/target/wasm32-unknown-unknown/release/demo_runtime.compact.wasm b/demo/runtime/wasm/target/wasm32-unknown-unknown/release/demo_runtime.compact.wasm index 444bf9cdaeb36..358a69ea49e1e 100644 Binary files a/demo/runtime/wasm/target/wasm32-unknown-unknown/release/demo_runtime.compact.wasm and b/demo/runtime/wasm/target/wasm32-unknown-unknown/release/demo_runtime.compact.wasm differ diff --git a/demo/runtime/wasm/target/wasm32-unknown-unknown/release/demo_runtime.wasm b/demo/runtime/wasm/target/wasm32-unknown-unknown/release/demo_runtime.wasm index 9217fabc4b298..53036e4f65706 100755 Binary files a/demo/runtime/wasm/target/wasm32-unknown-unknown/release/demo_runtime.wasm and b/demo/runtime/wasm/target/wasm32-unknown-unknown/release/demo_runtime.wasm differ diff --git a/polkadot/api/Cargo.toml b/polkadot/api/Cargo.toml index cb2204d69e9d2..860536ac95913 100644 --- a/polkadot/api/Cargo.toml +++ b/polkadot/api/Cargo.toml @@ -5,7 +5,6 @@ authors = ["Parity Technologies "] [dependencies] error-chain = "0.11" -log = "0.3" polkadot-executor = { path = "../executor" } polkadot-runtime = { path = "../runtime" } polkadot-primitives = { path = "../primitives" } diff --git a/polkadot/api/src/lib.rs b/polkadot/api/src/lib.rs index a052785423953..fd24f52766eb8 100644 --- a/polkadot/api/src/lib.rs +++ b/polkadot/api/src/lib.rs @@ -40,8 +40,8 @@ use polkadot_executor::Executor as LocalDispatch; use substrate_executor::{NativeExecutionDispatch, NativeExecutor}; use state_machine::OverlayedChanges; use primitives::{AccountId, BlockId, Hash, Index, SessionKey, Timestamp}; -use primitives::parachain::DutyRoster; -use runtime::{Block, Header, UncheckedExtrinsic, Extrinsic, Call, TimestampCall}; +use primitives::parachain::{DutyRoster, CandidateReceipt, Id as ParaId}; +use runtime::{Block, Header, UncheckedExtrinsic, Extrinsic, Call, TimestampCall, ParachainsCall}; error_chain! { errors { @@ -135,12 +135,21 @@ pub trait PolkadotApi { /// Get the index of an account at a block. fn index(&self, at: &Self::CheckedBlockId, account: AccountId) -> Result; + /// Get the active parachains at a block. + fn active_parachains(&self, at: &Self::CheckedBlockId) -> Result>; - /// Evaluate a block and see if it gives an error. - fn evaluate_block(&self, at: &Self::CheckedBlockId, block: Block) -> Result<()>; + /// Get the validation code of a parachain at a block. If the parachain is active, this will always return `Some`. + fn parachain_code(&self, at: &Self::CheckedBlockId, parachain: ParaId) -> Result>>; + + /// Get the chain head of a parachain. If the parachain is active, this will always return `Some`. + fn parachain_head(&self, at: &Self::CheckedBlockId, parachain: ParaId) -> Result>>; + + /// Evaluate a block. Returns true if the block is good, false if it is known to be bad, + /// and an error if we can't evaluate for some reason. + fn evaluate_block(&self, at: &Self::CheckedBlockId, block: Block) -> Result; /// Create a block builder on top of the parent block. - fn build_block(&self, parent: &Self::CheckedBlockId, timestamp: Timestamp) -> Result; + fn build_block(&self, parent: &Self::CheckedBlockId, timestamp: Timestamp, parachains: Vec) -> Result; } /// A checked block ID used for the substrate-client implementation of CheckedBlockId; @@ -213,15 +222,36 @@ impl PolkadotApi for Client> with_runtime!(self, at, ::runtime::Timestamp::now) } - fn evaluate_block(&self, at: &CheckedId, block: Block) -> Result<()> { - with_runtime!(self, at, || ::runtime::Executive::execute_block(block)) + fn evaluate_block(&self, at: &CheckedId, block: Block) -> Result { + use substrate_executor::error::ErrorKind as ExecErrorKind; + + let res = with_runtime!(self, at, || ::runtime::Executive::execute_block(block)); + match res { + Ok(()) => Ok(true), + Err(err) => match err.kind() { + &ErrorKind::Executor(ExecErrorKind::Runtime) => Ok(false), + _ => Err(err) + } + } } fn index(&self, at: &CheckedId, account: AccountId) -> Result { with_runtime!(self, at, || ::runtime::System::account_index(account)) } - fn build_block(&self, parent: &CheckedId, timestamp: Timestamp) -> Result { + fn active_parachains(&self, at: &CheckedId) -> Result> { + with_runtime!(self, at, ::runtime::Parachains::active_parachains) + } + + fn parachain_code(&self, at: &CheckedId, parachain: ParaId) -> Result>> { + with_runtime!(self, at, || ::runtime::Parachains::parachain_code(parachain)) + } + + fn parachain_head(&self, at: &CheckedId, parachain: ParaId) -> Result>> { + with_runtime!(self, at, || ::runtime::Parachains::parachain_head(parachain)) + } + + fn build_block(&self, parent: &CheckedId, timestamp: Timestamp, parachains: Vec) -> Result { let parent = parent.block_id(); let header = Header { parent_hash: self.block_hash_from_id(parent)?.ok_or(ErrorKind::UnknownBlock(*parent))?, @@ -239,6 +269,14 @@ impl PolkadotApi for Client> function: Call::Timestamp(TimestampCall::set(timestamp)), }, signature: Default::default(), + }, + UncheckedExtrinsic { + extrinsic: Extrinsic { + signed: Default::default(), + index: Default::default(), + function: Call::Parachains(ParachainsCall::set_heads(parachains)), + }, + signature: Default::default(), } ]; @@ -275,7 +313,7 @@ pub struct ClientBlockBuilder { impl ClientBlockBuilder where S::Error: Into { - // initialises a block ready to allow extrinsics to be applied. + // initialises a block, ready to allow extrinsics to be applied. fn initialise_block(&mut self) -> Result<()> { let result = { let mut ext = state_machine::Ext::new(&mut self.changes, &self.state); @@ -406,7 +444,7 @@ mod tests { let client = client(); let id = client.check_id(BlockId::Number(0)).unwrap(); - let block_builder = client.build_block(&id, 1_000_000).unwrap(); + let block_builder = client.build_block(&id, 1_000_000, Vec::new()).unwrap(); let block = block_builder.bake(); assert_eq!(block.header.number, 1); diff --git a/polkadot/collator/Cargo.toml b/polkadot/collator/Cargo.toml index 6f1fa1630228f..42fc14c666c18 100644 --- a/polkadot/collator/Cargo.toml +++ b/polkadot/collator/Cargo.toml @@ -1,10 +1,12 @@ [package] name = "polkadot-collator" version = "0.1.0" -authors = ["Parity Technologies "] +authors = ["Parity Technologies "] description = "Abstract collation logic" [dependencies] futures = "0.1.17" +substrate-codec = { path = "../../substrate/codec", version = "0.1" } substrate-primitives = { path = "../../substrate/primitives", version = "0.1" } polkadot-primitives = { path = "../primitives", version = "0.1" } +polkadot-parachain = { path = "../parachain", version = "0.1" } diff --git a/polkadot/collator/src/lib.rs b/polkadot/collator/src/lib.rs index 14b430ebbad17..967a8e6de6fc7 100644 --- a/polkadot/collator/src/lib.rs +++ b/polkadot/collator/src/lib.rs @@ -45,6 +45,7 @@ //! to be performed, as the collation logic itself. extern crate futures; +extern crate substrate_codec as codec; extern crate substrate_primitives as primitives; extern crate polkadot_primitives; @@ -82,7 +83,6 @@ pub trait RelayChainContext { } /// Collate the necessary ingress queue using the given context. -// TODO: impl trait pub fn collate_ingress<'a, R>(relay_context: R) -> Box + 'a> where @@ -105,7 +105,7 @@ pub fn collate_ingress<'a, R>(relay_context: R) // and then by the parachain ID. // // then transform that into the consolidated egress queue. - let future = stream::futures_unordered(egress_fetch) + Box::new(stream::futures_unordered(egress_fetch) .fold(BTreeMap::new(), |mut map, (routing_id, egresses)| { for (depth, egress) in egresses.into_iter().rev().enumerate() { let depth = -(depth as i64); @@ -116,9 +116,7 @@ pub fn collate_ingress<'a, R>(relay_context: R) }) .map(|ordered| ordered.into_iter().map(|((_, id), egress)| (id, egress))) .map(|i| i.collect::>()) - .map(ConsolidatedIngress); - - Box::new(future) + .map(ConsolidatedIngress)) } /// Produce a candidate for the parachain. diff --git a/polkadot/consensus/Cargo.toml b/polkadot/consensus/Cargo.toml index 29d5588865159..5be758a91bd60 100644 --- a/polkadot/consensus/Cargo.toml +++ b/polkadot/consensus/Cargo.toml @@ -13,6 +13,7 @@ log = "0.3" exit-future = "0.1" polkadot-api = { path = "../api" } polkadot-collator = { path = "../collator" } +polkadot-parachain = { path = "../parachain" } polkadot-primitives = { path = "../primitives" } polkadot-runtime = { path = "../runtime" } polkadot-statement-table = { path = "../statement-table" } diff --git a/polkadot/consensus/src/collation.rs b/polkadot/consensus/src/collation.rs new file mode 100644 index 0000000000000..3738ae900413a --- /dev/null +++ b/polkadot/consensus/src/collation.rs @@ -0,0 +1,176 @@ +// Copyright 2017 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Validator-side view of collation. +//! +//! This module contains type definitions, a trait for a batch of collators, and a trait for +//! attempting to fetch a collation repeatedly until a valid one is obtained. + +use std::sync::Arc; + +use polkadot_api::PolkadotApi; +use polkadot_primitives::{Hash, AccountId}; +use polkadot_primitives::parachain::{Id as ParaId, Chain, BlockData, Extrinsic, CandidateReceipt}; + +use futures::prelude::*; + +/// A full collation. +pub struct Collation { + /// Block data. + pub block_data: BlockData, + /// The candidate receipt itself. + pub receipt: CandidateReceipt, +} + +/// Encapsulates connections to collators and allows collation on any parachain. +/// +/// This is expected to be a lightweight, shared type like an `Arc`. +pub trait Collators: Clone { + /// Errors when producing collations. + type Error; + /// A full collation. + type Collation: IntoFuture; + + /// Collate on a specific parachain, building on a given relay chain parent hash. + fn collate(&self, parachain: ParaId, relay_parent: Hash) -> Self::Collation; + + /// Note a bad collator. TODO: take proof + fn note_bad_collator(&self, collator: AccountId); +} + +/// A future which resolves when a collation is available. +/// +/// This future is fused. +pub struct CollationFetch { + parachain: Option, + relay_parent_hash: Hash, + relay_parent: P::CheckedBlockId, + collators: C, + live_fetch: Option<::Future>, + client: Arc

, +} + +impl CollationFetch { + /// Create a new collation fetcher for the given chain. + pub fn new(parachain: Chain, relay_parent: P::CheckedBlockId, relay_parent_hash: Hash, collators: C, client: Arc

) -> Self { + CollationFetch { + relay_parent_hash, + relay_parent, + collators, + client, + parachain: match parachain { + Chain::Parachain(id) => Some(id), + Chain::Relay => None, + }, + live_fetch: None, + } + } +} + +impl Future for CollationFetch { + type Item = (Collation, Extrinsic); + type Error = C::Error; + + fn poll(&mut self) -> Poll<(Collation, Extrinsic), C::Error> { + let parachain = match self.parachain.as_ref() { + Some(p) => p.clone(), + None => return Ok(Async::NotReady), + }; + + loop { + let x = { + let (r, c) = (self.relay_parent_hash, &self.collators); + let poll = self.live_fetch + .get_or_insert_with(move || c.collate(parachain, r).into_future()) + .poll(); + + if let Err(_) = poll { self.parachain = None } + try_ready!(poll) + }; + + match validate_collation(&*self.client, &self.relay_parent, &x) { + Ok(()) => { + self.parachain = None; + + // TODO: generate extrinsic while verifying. + return Ok(Async::Ready((x, Extrinsic))); + } + Err(e) => { + debug!("Failed to validate parachain due to API error: {}", e); + + // just continue if we got a bad collation or failed to validate + self.live_fetch = None; + self.collators.note_bad_collator(x.receipt.collator) + } + } + } + } +} + +// Errors that can occur when validating a parachain. +error_chain! { + types { Error, ErrorKind, ResultExt; } + + errors { + InactiveParachain(id: ParaId) { + description("Collated for inactive parachain"), + display("Collated for inactive parachain: {:?}", id), + } + ValidationFailure { + description("Parachain candidate failed validation."), + display("Parachain candidate failed validation."), + } + WrongHeadData(expected: Vec, got: Vec) { + description("Parachain validation produced wrong head data."), + display("Parachain validation produced wrong head data (expected: {:?}, got {:?}", expected, got), + } + } + + links { + PolkadotApi(::polkadot_api::Error, ::polkadot_api::ErrorKind); + } +} + +/// Check whether a given collation is valid. Returns `Ok` on success, error otherwise. +pub fn validate_collation(client: &P, relay_parent: &P::CheckedBlockId, collation: &Collation) -> Result<(), Error> { + use parachain::{self, ValidationParams}; + + let para_id = collation.receipt.parachain_index; + let validation_code = client.parachain_code(relay_parent, para_id)? + .ok_or_else(|| ErrorKind::InactiveParachain(para_id))?; + + let chain_head = client.parachain_head(relay_parent, para_id)? + .ok_or_else(|| ErrorKind::InactiveParachain(para_id))?; + + let params = ValidationParams { + parent_head: chain_head, + block_data: collation.block_data.0.clone(), + }; + + match parachain::wasm::validate_candidate(&validation_code, params) { + Ok(result) => { + if result.head_data == collation.receipt.head_data.0 { + Ok(()) + } else { + Err(ErrorKind::WrongHeadData( + collation.receipt.head_data.0.clone(), + result.head_data + ).into()) + } + } + Err(_) => Err(ErrorKind::ValidationFailure.into()) + } +} diff --git a/polkadot/consensus/src/dynamic_inclusion.rs b/polkadot/consensus/src/dynamic_inclusion.rs new file mode 100644 index 0000000000000..d48e486274598 --- /dev/null +++ b/polkadot/consensus/src/dynamic_inclusion.rs @@ -0,0 +1,130 @@ +// Copyright 2017 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Dynamic inclusion threshold over time. + +use std::time::{Duration, Instant}; + +fn duration_to_micros(duration: &Duration) -> u64 { + duration.as_secs() * 1_000_000 + (duration.subsec_nanos() / 1000) as u64 +} + +/// Dynamic inclusion threshold over time. +/// +/// The acceptable proportion of parachains which must have parachain candidates +/// reduces over time (eventually going to zero). +#[derive(Debug, Clone)] +pub struct DynamicInclusion { + start: Instant, + y: u64, + m: u64, +} + +impl DynamicInclusion { + /// Constructs a new dynamic inclusion threshold calculator based on the time now, + /// how many parachain candidates are required at the beginning, and when an empty + /// block will be allowed. + pub fn new(initial: usize, start: Instant, allow_empty: Duration) -> Self { + // linear function f(n_candidates) -> valid after microseconds + // f(0) = allow_empty + // f(initial) = 0 + // m is actually the negative slope to avoid using signed arithmetic. + let (y, m) = if initial != 0 { + let y = duration_to_micros(&allow_empty); + + (y, y / initial as u64) + } else { + (0, 0) + }; + + DynamicInclusion { + start, + y, + m, + } + } + + /// Returns the duration from `now` after which the amount of included parachain candidates + /// would be enough, or `None` if it is sufficient now. + /// + /// Panics if `now` is earlier than the `start`. + pub fn acceptable_in(&self, now: Instant, included: usize) -> Option { + let elapsed = now.duration_since(self.start); + let elapsed = duration_to_micros(&elapsed); + + let valid_after = self.y.saturating_sub(self.m * included as u64); + + if elapsed >= valid_after { + None + } else { + Some(Duration::from_millis((valid_after - elapsed) as u64 / 1000)) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn full_immediately_allowed() { + let now = Instant::now(); + + let dynamic = DynamicInclusion::new( + 10, + now, + Duration::from_millis(4000), + ); + + assert!(dynamic.acceptable_in(now, 10).is_none()); + assert!(dynamic.acceptable_in(now, 11).is_none()); + assert!(dynamic.acceptable_in(now + Duration::from_millis(2000), 10).is_none()); + } + + #[test] + fn half_allowed_halfway() { + let now = Instant::now(); + + let dynamic = DynamicInclusion::new( + 10, + now, + Duration::from_millis(4000), + ); + + assert_eq!(dynamic.acceptable_in(now, 5), Some(Duration::from_millis(2000))); + assert!(dynamic.acceptable_in(now + Duration::from_millis(2000), 5).is_none()); + assert!(dynamic.acceptable_in(now + Duration::from_millis(3000), 5).is_none()); + assert!(dynamic.acceptable_in(now + Duration::from_millis(4000), 5).is_none()); + } + + #[test] + fn zero_initial_is_flat() { + let now = Instant::now(); + + let dynamic = DynamicInclusion::new( + 0, + now, + Duration::from_secs(10_000), + ); + + for i in 0..10_001 { + let now = now + Duration::from_secs(i); + assert!(dynamic.acceptable_in(now, 0).is_none()); + assert!(dynamic.acceptable_in(now, 1).is_none()); + assert!(dynamic.acceptable_in(now, 10).is_none()); + } + } +} diff --git a/polkadot/consensus/src/error.rs b/polkadot/consensus/src/error.rs index e2a4b3a483177..ebc998ab69ec9 100644 --- a/polkadot/consensus/src/error.rs +++ b/polkadot/consensus/src/error.rs @@ -16,45 +16,30 @@ //! Errors that can occur during the consensus process. -use primitives::block::{HeaderHash, Number}; +use polkadot_primitives::AccountId; + error_chain! { links { PolkadotApi(::polkadot_api::Error, ::polkadot_api::ErrorKind); Bft(::bft::Error, ::bft::ErrorKind); } - foreign_links { - Io(::std::io::Error); - SharedIo(::futures::future::SharedError<::std::io::Error>); - } - errors { InvalidDutyRosterLength(expected: usize, got: usize) { description("Duty Roster had invalid length"), display("Invalid duty roster length: expected {}, got {}", expected, got), } - ProposalNotForPolkadot { - description("Proposal provided not a Polkadot block."), - display("Proposal provided not a Polkadot block."), - } - TimestampInFuture { - description("Proposal had timestamp too far in the future."), - display("Proposal had timestamp too far in the future."), - } - WrongParentHash(expected: HeaderHash, got: HeaderHash) { - description("Proposal had wrong parent hash."), - display("Proposal had wrong parent hash. Expected {:?}, got {:?}", expected, got), + NotValidator(id: AccountId) { + description("Local account ID not a validator at this block."), + display("Local account ID ({:?}) not a validator at this block.", id), } - WrongNumber(expected: Number, got: Number) { - description("Proposal had wrong number."), - display("Proposal had wrong number. Expected {:?}, got {:?}", expected, got), + PrematureDestruction { + description("Proposer destroyed before finishing proposing or evaluating"), + display("Proposer destroyed before finishing proposing or evaluating"), } - ProposalTooLarge(size: usize) { - description("Proposal exceeded the maximum size."), - display( - "Proposal exceeded the maximum size of {} by {} bytes.", - ::MAX_TRANSACTIONS_SIZE, ::MAX_TRANSACTIONS_SIZE.saturating_sub(*size) - ), + Timer(e: String) { + description("Failed to register or resolve async timer."), + display("Timer failed: {}", e), } Executor(e: ::futures::future::ExecuteErrorKind) { description("Unable to dispatch agreement future"), diff --git a/polkadot/consensus/src/evaluation.rs b/polkadot/consensus/src/evaluation.rs new file mode 100644 index 0000000000000..b5ef81d2446a1 --- /dev/null +++ b/polkadot/consensus/src/evaluation.rs @@ -0,0 +1,135 @@ +// Copyright 2017 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Polkadot block evaluation and evaluation errors. + +use super::MAX_TRANSACTIONS_SIZE; + +use codec::Slicable; +use polkadot_runtime::Block as PolkadotGenericBlock; +use polkadot_primitives::Timestamp; +use polkadot_primitives::parachain::Id as ParaId; +use primitives::block::{Block as SubstrateBlock, HeaderHash, Number as BlockNumber}; +use transaction_pool::PolkadotBlock; + +error_chain! { + links { + PolkadotApi(::polkadot_api::Error, ::polkadot_api::ErrorKind); + } + + errors { + ProposalNotForPolkadot { + description("Proposal provided not a Polkadot block."), + display("Proposal provided not a Polkadot block."), + } + TimestampInFuture { + description("Proposal had timestamp too far in the future."), + display("Proposal had timestamp too far in the future."), + } + TooManyCandidates(expected: usize, got: usize) { + description("Proposal included more candidates than is possible."), + display("Proposal included {} candidates for {} parachains", got, expected), + } + ParachainOutOfOrder { + description("Proposal included parachains out of order."), + display("Proposal included parachains out of order."), + } + UnknownParachain(id: ParaId) { + description("Proposal included unregistered parachain."), + display("Proposal included unregistered parachain {:?}", id), + } + WrongParentHash(expected: HeaderHash, got: HeaderHash) { + description("Proposal had wrong parent hash."), + display("Proposal had wrong parent hash. Expected {:?}, got {:?}", expected, got), + } + WrongNumber(expected: BlockNumber, got: BlockNumber) { + description("Proposal had wrong number."), + display("Proposal had wrong number. Expected {:?}, got {:?}", expected, got), + } + ProposalTooLarge(size: usize) { + description("Proposal exceeded the maximum size."), + display( + "Proposal exceeded the maximum size of {} by {} bytes.", + MAX_TRANSACTIONS_SIZE, MAX_TRANSACTIONS_SIZE.saturating_sub(*size) + ), + } + } +} + +/// Attempt to evaluate a substrate block as a polkadot block, returning error +/// upon any initial validity checks failing. +pub fn evaluate_initial( + proposal: &SubstrateBlock, + now: Timestamp, + parent_hash: &HeaderHash, + parent_number: BlockNumber, + active_parachains: &[ParaId], +) -> Result { + const MAX_TIMESTAMP_DRIFT: Timestamp = 60; + + let encoded = Slicable::encode(proposal); + let proposal = PolkadotGenericBlock::decode(&mut &encoded[..]) + .and_then(|b| PolkadotBlock::from(b).ok()) + .ok_or_else(|| ErrorKind::ProposalNotForPolkadot)?; + + let transactions_size = proposal.extrinsics.iter().fold(0, |a, tx| { + a + Slicable::encode(tx).len() + }); + + if transactions_size > MAX_TRANSACTIONS_SIZE { + bail!(ErrorKind::ProposalTooLarge(transactions_size)) + } + + if proposal.header.parent_hash != *parent_hash { + bail!(ErrorKind::WrongParentHash(*parent_hash, proposal.header.parent_hash)); + } + + if proposal.header.number != parent_number + 1 { + bail!(ErrorKind::WrongNumber(parent_number + 1, proposal.header.number)); + } + + let block_timestamp = proposal.timestamp(); + + // lenient maximum -- small drifts will just be delayed using a timer. + if block_timestamp > now + MAX_TIMESTAMP_DRIFT { + bail!(ErrorKind::TimestampInFuture) + } + + { + let n_parachains = active_parachains.len(); + if proposal.parachain_heads().len() > n_parachains { + bail!(ErrorKind::TooManyCandidates(n_parachains, proposal.parachain_heads().len())); + } + + let mut last_id = None; + let mut iter = active_parachains.iter(); + for head in proposal.parachain_heads() { + // proposed heads must be ascending order by parachain ID without duplicate. + if last_id.as_ref().map_or(false, |x| x >= &head.parachain_index) { + bail!(ErrorKind::ParachainOutOfOrder); + } + + if !iter.any(|x| x == &head.parachain_index) { + // must be unknown since active parachains are always sorted. + bail!(ErrorKind::UnknownParachain(head.parachain_index)) + } + + last_id = Some(head.parachain_index); + } + } + + Ok(proposal) +} diff --git a/polkadot/consensus/src/lib.rs b/polkadot/consensus/src/lib.rs index e004a44df5888..f9e3b35a26001 100644 --- a/polkadot/consensus/src/lib.rs +++ b/polkadot/consensus/src/lib.rs @@ -29,15 +29,16 @@ //! //! Groups themselves may be compromised by malicious authorities. -extern crate futures; extern crate ed25519; extern crate parking_lot; extern crate polkadot_api; extern crate polkadot_collator as collator; extern crate polkadot_statement_table as table; +extern crate polkadot_parachain as parachain; extern crate polkadot_primitives; extern crate polkadot_transaction_pool as transaction_pool; extern crate polkadot_runtime; + extern crate substrate_bft as bft; extern crate substrate_codec as codec; extern crate substrate_primitives as primitives; @@ -46,46 +47,60 @@ extern crate substrate_network; extern crate exit_future; extern crate tokio_core; -extern crate substrate_keyring; extern crate substrate_client as client; #[macro_use] extern crate error_chain; + +#[macro_use] +extern crate futures; + #[macro_use] extern crate log; +#[cfg(test)] +extern crate substrate_keyring; + use std::collections::{HashMap, HashSet}; use std::sync::Arc; +use std::time::{Duration, Instant}; use codec::Slicable; -use table::{Table, Context as TableContextTrait}; use table::generic::Statement as GenericStatement; use runtime_support::Hashable; use polkadot_api::{PolkadotApi, BlockBuilder}; use polkadot_primitives::{Hash, Timestamp}; -use polkadot_primitives::parachain::{Id as ParaId, DutyRoster, BlockData, Extrinsic, CandidateReceipt}; -use polkadot_runtime::Block as PolkadotGenericBlock; +use polkadot_primitives::parachain::{Id as ParaId, Chain, DutyRoster, BlockData, Extrinsic, CandidateReceipt}; use primitives::block::{Block as SubstrateBlock, Header as SubstrateHeader, HeaderHash, Id as BlockId, Number as BlockNumber}; use primitives::AuthorityId; -use transaction_pool::{Ready, TransactionPool, PolkadotBlock}; +use transaction_pool::{Ready, TransactionPool}; +use tokio_core::reactor::{Handle, Timeout, Interval}; use futures::prelude::*; -use futures::future; -use future::Shared; +use futures::future::{self, Shared}; use parking_lot::Mutex; -use tokio_core::reactor::{Handle, Timeout}; +use collation::CollationFetch; +use dynamic_inclusion::DynamicInclusion; +pub use self::collation::{Collators, Collation}; pub use self::error::{ErrorKind, Error}; +pub use self::shared_table::{SharedTable, StatementSource, StatementProducer, ProducedStatements}; pub use service::Service; +mod collation; +mod dynamic_inclusion; +mod evaluation; mod error; mod service; +mod shared_table; // block size limit. const MAX_TRANSACTIONS_SIZE: usize = 4 * 1024 * 1024; /// A handle to a statement table router. -pub trait TableRouter { +/// +/// This is expected to be a lightweight, shared type like an `Arc`. +pub trait TableRouter: Clone { /// Errors when fetching data from the network. type Error; /// Future that resolves when candidate data is fetched. @@ -93,7 +108,7 @@ pub trait TableRouter { /// Future that resolves when extrinsic candidate data is fetched. type FetchExtrinsic: IntoFuture; - /// Note local candidate data. + /// Note local candidate data, making it available on the network to other validators. fn local_candidate_data(&self, hash: Hash, block_data: BlockData, extrinsic: Extrinsic); /// Fetch block data for a specific candidate. @@ -126,46 +141,6 @@ pub struct GroupInfo { pub needed_availability: usize, } -struct TableContext { - parent_hash: Hash, - key: Arc, - groups: HashMap, -} - -impl table::Context for TableContext { - fn is_member_of(&self, authority: &AuthorityId, group: &ParaId) -> bool { - self.groups.get(group).map_or(false, |g| g.validity_guarantors.contains(authority)) - } - - fn is_availability_guarantor_of(&self, authority: &AuthorityId, group: &ParaId) -> bool { - self.groups.get(group).map_or(false, |g| g.availability_guarantors.contains(authority)) - } - - fn requisite_votes(&self, group: &ParaId) -> (usize, usize) { - self.groups.get(group).map_or( - (usize::max_value(), usize::max_value()), - |g| (g.needed_validity, g.needed_availability), - ) - } -} - -impl TableContext { - fn local_id(&self) -> AuthorityId { - self.key.public().0 - } - - fn sign_statement(&self, statement: table::Statement) -> table::SignedStatement { - let signature = sign_table_statement(&statement, &self.key, &self.parent_hash).into(); - let local_id = self.key.public().0; - - table::SignedStatement { - statement, - signature, - sender: local_id, - } - } -} - /// Sign a table statement against a parent hash. /// The actual message signed is the encoded statement concatenated with the /// parent hash. @@ -185,247 +160,7 @@ pub fn sign_table_statement(statement: &table::Statement, key: &ed25519::Pair, p key.sign(&encoded) } -// A shared table object. -struct SharedTableInner { - table: Table, - proposed_digest: Option, - checked_validity: HashSet, - checked_availability: HashSet, -} - -impl SharedTableInner { - // Import a single statement. Provide a handle to a table router. - fn import_statement( - &mut self, - context: &TableContext, - router: &R, - statement: table::SignedStatement, - received_from: Option, - ) -> StatementProducer<::Future, ::Future> { - let mut producer = StatementProducer { - fetch_block_data: None, - fetch_extrinsic: None, - produced_statements: Default::default(), - _key: context.key.clone(), - }; - - let summary = match self.table.import_statement(context, statement, received_from) { - Some(summary) => summary, - None => return producer, - }; - - let local_id = context.local_id(); - let is_validity_member = context.is_member_of(&local_id, &summary.group_id); - let is_availability_member = - context.is_availability_guarantor_of(&local_id, &summary.group_id); - - let digest = &summary.candidate; - - // TODO: consider a strategy based on the number of candidate votes as well. - // only check validity if this wasn't locally proposed. - let checking_validity = is_validity_member - && self.proposed_digest.as_ref().map_or(true, |d| d != digest) - && self.checked_validity.insert(digest.clone()); - - let checking_availability = is_availability_member && self.checked_availability.insert(digest.clone()); - - if checking_validity || checking_availability { - match self.table.get_candidate(&digest) { - None => {} // TODO: handle table inconsistency somehow? - Some(candidate) => { - if checking_validity { - producer.fetch_block_data = Some(router.fetch_block_data(candidate).into_future().fuse()); - } - - if checking_availability { - producer.fetch_extrinsic = Some(router.fetch_extrinsic_data(candidate).into_future().fuse()); - } - } - } - } - - producer - } -} - -/// Produced statements about a specific candidate. -/// Both may be `None`. -#[derive(Default)] -pub struct ProducedStatements { - /// A statement about the validity of the candidate. - pub validity: Option, - /// A statement about the availability of the candidate. - pub availability: Option, -} - -/// Future that produces statements about a specific candidate. -pub struct StatementProducer { - fetch_block_data: Option>, - fetch_extrinsic: Option>, - produced_statements: ProducedStatements, - _key: Arc, -} - -impl Future for StatementProducer - where - D: Future, - E: Future, -{ - type Item = ProducedStatements; - type Error = Err; - - fn poll(&mut self) -> Poll { - let mut done = true; - if let Some(ref mut fetch_block_data) = self.fetch_block_data { - match fetch_block_data.poll()? { - Async::Ready(_block_data) => { - // TODO [PoC-2] : validate block data here and make statement. - }, - Async::NotReady => { - done = false; - } - } - } - - if let Some(ref mut fetch_extrinsic) = self.fetch_extrinsic { - match fetch_extrinsic.poll()? { - Async::Ready(_extrinsic) => { - // TODO [PoC-2]: guarantee availability of data and make statment. - } - Async::NotReady => { - done = false; - } - } - } - - if done { - Ok(Async::Ready(::std::mem::replace(&mut self.produced_statements, Default::default()))) - } else { - Ok(Async::NotReady) - } - } -} - -/// A shared table object. -pub struct SharedTable { - context: Arc, - inner: Arc>, -} - -impl Clone for SharedTable { - fn clone(&self) -> Self { - SharedTable { - context: self.context.clone(), - inner: self.inner.clone(), - } - } -} - -impl SharedTable { - /// Create a new shared table. - /// - /// Provide the key to sign with, and the parent hash of the relay chain - /// block being built. - pub fn new(groups: HashMap, key: Arc, parent_hash: Hash) -> Self { - SharedTable { - context: Arc::new(TableContext { groups, key, parent_hash }), - inner: Arc::new(Mutex::new(SharedTableInner { - table: Table::default(), - proposed_digest: None, - checked_validity: HashSet::new(), - checked_availability: HashSet::new(), - })) - } - } - - /// Get group info. - pub fn group_info(&self) -> &HashMap { - &self.context.groups - } - - /// Import a single statement. Provide a handle to a table router - /// for dispatching any other requests which come up. - pub fn import_statement( - &self, - router: &R, - statement: table::SignedStatement, - received_from: Option, - ) -> StatementProducer<::Future, ::Future> { - self.inner.lock().import_statement(&*self.context, router, statement, received_from) - } - - /// Sign and import a local statement. - pub fn sign_and_import( - &self, - router: &R, - statement: table::Statement, - ) -> StatementProducer<::Future, ::Future> { - let proposed_digest = match statement { - GenericStatement::Candidate(ref c) => Some(c.hash()), - _ => None, - }; - - let signed_statement = self.context.sign_statement(statement); - - let mut inner = self.inner.lock(); - if proposed_digest.is_some() { - inner.proposed_digest = proposed_digest; - } - - inner.import_statement(&*self.context, router, signed_statement, None) - } - - /// Import many statements at once. - /// - /// Provide an iterator yielding pairs of (statement, received_from). - pub fn import_statements(&self, router: &R, iterable: I) -> U - where - R: TableRouter, - I: IntoIterator)>, - U: ::std::iter::FromIterator::Future, - ::Future> - >, - { - let mut inner = self.inner.lock(); - - iterable.into_iter().map(move |(statement, received_from)| { - inner.import_statement(&*self.context, router, statement, received_from) - }).collect() - } - - /// Check if a proposal is valid. - pub fn proposal_valid(&self, _proposal: &SubstrateBlock) -> bool { - false // TODO - } - - /// Execute a closure using a specific candidate. - /// - /// Deadlocks if called recursively. - pub fn with_candidate(&self, digest: &Hash, f: F) -> U - where F: FnOnce(Option<&CandidateReceipt>) -> U - { - let inner = self.inner.lock(); - f(inner.table.get_candidate(digest)) - } - - /// Get all witnessed misbehavior. - pub fn get_misbehavior(&self) -> HashMap { - self.inner.lock().table.get_misbehavior().clone() - } - - /// Fill a statement batch. - pub fn fill_batch(&self, batch: &mut B) { - self.inner.lock().table.fill_batch(batch); - } - - /// Get the local proposed block's hash. - pub fn proposed_hash(&self) -> Option { - self.inner.lock().proposed_digest.clone() - } -} - -fn make_group_info(roster: DutyRoster, authorities: &[AuthorityId]) -> Result, Error> { +fn make_group_info(roster: DutyRoster, authorities: &[AuthorityId], local_id: AuthorityId) -> Result<(HashMap, LocalDuty), Error> { if roster.validator_duty.len() != authorities.len() { bail!(ErrorKind::InvalidDutyRosterLength(authorities.len(), roster.validator_duty.len())) } @@ -434,11 +169,14 @@ fn make_group_info(roster: DutyRoster, authorities: &[AuthorityId]) -> Result {}, // does nothing for now. @@ -467,23 +205,45 @@ fn make_group_info(roster: DutyRoster, authorities: &[AuthorityId]) -> Result { + let local_duty = LocalDuty { + validation: local_validation, + }; + + Ok((map, local_duty)) + } + None => bail!(ErrorKind::NotValidator(local_id)), + } +} + +fn timer_error(e: &::std::io::Error) -> Error { + ErrorKind::Timer(format!("{}", e)).into() } /// Polkadot proposer factory. -pub struct ProposerFactory { +pub struct ProposerFactory { /// The client instance. pub client: Arc, /// The transaction pool. pub transaction_pool: Arc>, /// The backing network handle. pub network: N, - /// Handle to the underlying tokio-core. + /// Parachain collators. + pub collators: P, + /// The timer used to schedule proposal intervals. pub handle: Handle, + /// The duration after which parachain-empty blocks will be allowed. + pub parachain_empty_duration: Duration, } -impl bft::ProposerFactory for ProposerFactory { - type Proposer = Proposer; +impl bft::ProposerFactory for ProposerFactory + where + C: PolkadotApi, + N: Network, + P: Collators, +{ + type Proposer = Proposer; type Error = Error; fn init(&self, parent_header: &SubstrateHeader, authorities: &[AuthorityId], sign_with: Arc) -> Result { @@ -497,135 +257,224 @@ impl bft::ProposerFactory for ProposerFactory let duty_roster = self.client.duty_roster(&checked_id)?; let random_seed = self.client.random_seed(&checked_id)?; - let group_info = make_group_info(duty_roster, authorities)?; + let (group_info, local_duty) = make_group_info( + duty_roster, + authorities, + sign_with.public().0, + )?; + + let active_parachains = self.client.active_parachains(&checked_id)?; + + let n_parachains = active_parachains.len(); let table = Arc::new(SharedTable::new(group_info, sign_with.clone(), parent_hash)); let router = self.network.table_router(table.clone()); + let dynamic_inclusion = DynamicInclusion::new( + n_parachains, + Instant::now(), + self.parachain_empty_duration.clone(), + ); - let timeout = Timeout::new(DELAY_UNTIL, &self.handle)?; + let timeout = Timeout::new(DELAY_UNTIL, &self.handle) + .map_err(|e| timer_error(&e))?; debug!(target: "bft", "Initialising consensus proposer. Refusing to evaluate for {:?} from now.", DELAY_UNTIL); // TODO [PoC-2]: kick off collation process. Ok(Proposer { + client: self.client.clone(), + collators: self.collators.clone(), + delay: timeout.shared(), + handle: self.handle.clone(), + dynamic_inclusion, + local_duty, + local_key: sign_with, parent_hash, - parent_number: parent_header.number, parent_id: checked_id, + parent_number: parent_header.number, random_seed, - local_key: sign_with, - client: self.client.clone(), + router, + table, transaction_pool: self.transaction_pool.clone(), - delay: timeout.shared(), - _table: table, - _router: router, }) } } -fn current_timestamp() -> Timestamp { - use std::time; - - time::SystemTime::now().duration_since(time::UNIX_EPOCH) - .expect("now always later than unix epoch; qed") - .as_secs() +struct LocalDuty { + validation: Chain, } /// The Polkadot proposer logic. -pub struct Proposer { +pub struct Proposer { + client: Arc, + collators: P, + delay: Shared, + dynamic_inclusion: DynamicInclusion, + handle: Handle, + local_duty: LocalDuty, + local_key: Arc, parent_hash: HeaderHash, - parent_number: BlockNumber, parent_id: C::CheckedBlockId, + parent_number: BlockNumber, random_seed: Hash, - client: Arc, - local_key: Arc, + router: R, + table: Arc, transaction_pool: Arc>, - delay: Shared, - _table: Arc, - _router: R, } -impl bft::Proposer for Proposer { +impl bft::Proposer for Proposer + where + C: PolkadotApi, + R: TableRouter, + P: Collators, +{ type Error = Error; - type Create = Result; + type Create = future::Either< + CreateProposal, + future::FutureResult, + >; type Evaluate = Box>; fn propose(&self) -> Self::Create { - debug!(target: "bft", "proposing block on top of parent ({}, {:?})", self.parent_number, self.parent_hash); + const ATTEMPT_PROPOSE_EVERY: Duration = Duration::from_millis(100); + + let initial_included = self.table.includable_count(); + let enough_candidates = self.dynamic_inclusion.acceptable_in( + Instant::now(), + initial_included, + ).unwrap_or_default(); + + let timing = { + let delay = self.delay.clone(); + let dynamic_inclusion = self.dynamic_inclusion.clone(); + let make_timing = move |handle| -> Result { + let attempt_propose = Interval::new(ATTEMPT_PROPOSE_EVERY, handle)?; + let enough_candidates = Timeout::new(enough_candidates, handle)?; + Ok(ProposalTiming { + attempt_propose, + enough_candidates, + dynamic_inclusion, + minimum_delay: Some(delay), + last_included: initial_included, + }) + }; - // TODO: handle case when current timestamp behind that in state. - let mut block_builder = self.client.build_block( - &self.parent_id, - current_timestamp() - )?; + match make_timing(&self.handle) { + Ok(timing) => timing, + Err(e) => { + return future::Either::B(future::err(timer_error(&e))); + } + } + }; - let readiness_evaluator = Ready::create(self.parent_id.clone(), &*self.client); + future::Either::A(CreateProposal { + parent_hash: self.parent_hash.clone(), + parent_number: self.parent_number.clone(), + parent_id: self.parent_id.clone(), + client: self.client.clone(), + transaction_pool: self.transaction_pool.clone(), + collation: CollationFetch::new( + self.local_duty.validation, + self.parent_id.clone(), + self.parent_hash.clone(), + self.collators.clone(), + self.client.clone() + ), + table: self.table.clone(), + router: self.router.clone(), + timing, + }) + } - { - let mut pool = self.transaction_pool.lock(); - let mut unqueue_invalid = Vec::new(); - let mut pending_size = 0; - pool.cull(None, readiness_evaluator.clone()); - for pending in pool.pending(readiness_evaluator.clone()) { - // skip and cull transactions which are too large. - if pending.encoded_size() > MAX_TRANSACTIONS_SIZE { - unqueue_invalid.push(pending.hash().clone()); - continue - } + fn evaluate(&self, proposal: &SubstrateBlock) -> Self::Evaluate { + debug!(target: "bft", "evaluating block on top of parent ({}, {:?})", self.parent_number, self.parent_hash); - if pending_size + pending.encoded_size() >= MAX_TRANSACTIONS_SIZE { break } + let active_parachains = match self.client.active_parachains(&self.parent_id) { + Ok(x) => x, + Err(e) => return Box::new(future::err(e.into())) as Box<_>, + }; - match block_builder.push_extrinsic(pending.as_transaction().clone()) { - Ok(()) => { - pending_size += pending.encoded_size(); - } - Err(_) => { - unqueue_invalid.push(pending.hash().clone()); - } - } - } + let current_timestamp = current_timestamp(); - for tx_hash in unqueue_invalid { - pool.remove(&tx_hash, false); + // do initial serialization and structural integrity checks. + let maybe_proposal = evaluation::evaluate_initial( + proposal, + current_timestamp, + &self.parent_hash, + self.parent_number, + &active_parachains, + ); + + let proposal = match maybe_proposal { + Ok(p) => p, + Err(e) => { + // TODO: these errors are easily re-checked in runtime. + debug!(target: "bft", "Invalid proposal: {:?}", e); + return Box::new(future::ok(false)); } - } + }; - let polkadot_block = block_builder.bake(); - info!("Proposing block [number: {}; hash: {}; parent_hash: {}; extrinsics: [{}]]", - polkadot_block.header.number, - Hash::from(polkadot_block.header.blake2_256()), - polkadot_block.header.parent_hash, - polkadot_block.extrinsics.iter() - .map(|xt| format!("{}", Hash::from(xt.blake2_256()))) - .collect::>() - .join(", ") - ); + let vote_delays = { + // delay casting vote until able (according to minimum block time) + let minimum_delay = self.delay.clone() + .map_err(|e| timer_error(&*e)); + + let included_candidate_hashes = proposal + .parachain_heads() + .iter() + .map(|candidate| candidate.hash()); + + // delay casting vote until we have proof that all candidates are + // includable. + let includability_tracker = self.table.track_includability(included_candidate_hashes) + .map_err(|_| ErrorKind::PrematureDestruction.into()); + + // the duration at which the given number of parachains is acceptable. + let count_delay = self.dynamic_inclusion.acceptable_in( + Instant::now(), + proposal.parachain_heads().len(), + ); + + // the duration until the given timestamp is current + let proposed_timestamp = proposal.timestamp(); + let timestamp_delay = if proposed_timestamp > current_timestamp { + Some(Duration::from_secs(proposed_timestamp - current_timestamp)) + } else { + None + }; - let substrate_block = Slicable::decode(&mut polkadot_block.encode().as_slice()) - .expect("polkadot blocks defined to serialize to substrate blocks correctly; qed"); + // construct a future from the maximum of the two durations. + let temporary_delay = match ::std::cmp::max(timestamp_delay, count_delay) { + Some(duration) => { + let maybe_timeout = Timeout::new(duration, &self.handle); - assert!(evaluate_proposal(&substrate_block, &*self.client, current_timestamp(), &self.parent_hash, self.parent_number, &self.parent_id).is_ok()); + let f = future::result(maybe_timeout) + .and_then(|timeout| timeout) + .map_err(|e| timer_error(&e)); - Ok(substrate_block) - } + future::Either::A(f) + } + None => future::Either::B(future::ok(())), + }; - // TODO: certain kinds of errors here should lead to a misbehavior report. - fn evaluate(&self, proposal: &SubstrateBlock) -> Self::Evaluate { - debug!(target: "bft", "evaluating block on top of parent ({}, {:?})", self.parent_number, self.parent_hash); + minimum_delay.join3(includability_tracker, temporary_delay) + }; - let evaluated = match evaluate_proposal(proposal, &*self.client, current_timestamp(), &self.parent_hash, self.parent_number, &self.parent_id) { - Ok(x) => Ok(x), - Err(e) => match *e.kind() { - ErrorKind::PolkadotApi(polkadot_api::ErrorKind::Executor(_)) => Ok(false), - ErrorKind::ProposalNotForPolkadot => Ok(false), - ErrorKind::TimestampInFuture => Ok(false), - ErrorKind::WrongParentHash(_, _) => Ok(false), - ErrorKind::ProposalTooLarge(_) => Ok(false), - _ => Err(e), + // evaluate whether the block is actually valid. + // TODO: is it better to delay this until the delays are finished? + let evaluated = self.client.evaluate_block(&self.parent_id, proposal.into()).map_err(Into::into); + let future = future::result(evaluated).and_then(move |good| { + let end_result = future::ok(good); + if good { + // delay a "good" vote. + future::Either::A(vote_delays.and_then(|_| end_result)) + } else { + // don't delay a "bad" evaluation. + future::Either::B(end_result) } - }; + }); - // delay casting vote until able. - Box::new(self.delay.clone().map_err(Error::from).and_then(move |_| evaluated)) + Box::new(future) as Box<_> } fn round_proposer(&self, round_number: usize, authorities: &[AuthorityId]) -> AuthorityId { @@ -697,45 +546,186 @@ impl bft::Proposer for Proposer { } } -fn evaluate_proposal( - proposal: &SubstrateBlock, - client: &C, - now: Timestamp, - parent_hash: &HeaderHash, - parent_number: BlockNumber, - parent_id: &C::CheckedBlockId, -) -> Result { - const MAX_TIMESTAMP_DRIFT: Timestamp = 4; +fn current_timestamp() -> Timestamp { + use std::time; - let encoded = Slicable::encode(proposal); - let proposal = PolkadotGenericBlock::decode(&mut &encoded[..]) - .and_then(|b| PolkadotBlock::from(b).ok()) - .ok_or_else(|| ErrorKind::ProposalNotForPolkadot)?; + time::SystemTime::now().duration_since(time::UNIX_EPOCH) + .expect("now always later than unix epoch; qed") + .as_secs() +} - let transactions_size = proposal.extrinsics.iter().fold(0, |a, tx| { - a + Slicable::encode(tx).len() - }); +struct ProposalTiming { + attempt_propose: Interval, + dynamic_inclusion: DynamicInclusion, + enough_candidates: Timeout, + minimum_delay: Option>, + last_included: usize, +} - if transactions_size > MAX_TRANSACTIONS_SIZE { - bail!(ErrorKind::ProposalTooLarge(transactions_size)) - } +impl ProposalTiming { + // whether it's time to attempt a proposal. + // shouldn't be called outside of the context of a task. + fn poll(&mut self, included: usize) -> Poll<(), Error> { + // first drain from the interval so when the minimum delay is up + // we don't have any notifications built up. + // + // this interval is just meant to produce periodic task wakeups + // that lead to the `dynamic_inclusion` getting updated as necessary. + if let Async::Ready(x) = self.attempt_propose.poll() + .map_err(|e| timer_error(&e))? + { + x.expect("timer still alive; intervals never end; qed"); + } - if proposal.header.parent_hash != *parent_hash { - bail!(ErrorKind::WrongParentHash(*parent_hash, proposal.header.parent_hash)); - } + if let Some(ref mut min) = self.minimum_delay { + try_ready!(min.poll().map_err(|e| timer_error(&*e))); + } + + self.minimum_delay = None; // after this point, the future must have completed. - if proposal.header.number != parent_number + 1 { - bail!(ErrorKind::WrongNumber(parent_number + 1, proposal.header.number)) + if included == self.last_included { + return self.enough_candidates.poll().map_err(|e| timer_error(&e)); + } + + // the amount of includable candidates has changed. schedule a wakeup + // if it's not sufficient anymore. + let now = Instant::now(); + match self.dynamic_inclusion.acceptable_in(now, included) { + Some(duration) => { + self.last_included = included; + self.enough_candidates.reset(now + duration); + self.enough_candidates.poll().map_err(|e| timer_error(&e)) + } + None => { + Ok(Async::Ready(())) + } + } } +} - let block_timestamp = proposal.timestamp(); +/// Future which resolves upon the creation of a proposal. +pub struct CreateProposal { + parent_hash: HeaderHash, + parent_number: BlockNumber, + parent_id: C::CheckedBlockId, + client: Arc, + transaction_pool: Arc>, + collation: CollationFetch, + router: R, + table: Arc, + timing: ProposalTiming, +} + +impl CreateProposal + where + C: PolkadotApi, + R: TableRouter, + P: Collators, +{ + fn propose_with(&self, candidates: Vec) -> Result { + // TODO: handle case when current timestamp behind that in state. + let timestamp = current_timestamp(); + let mut block_builder = self.client.build_block( + &self.parent_id, + timestamp, + candidates, + )?; + + let readiness_evaluator = Ready::create(self.parent_id.clone(), &*self.client); + + { + let mut pool = self.transaction_pool.lock(); + let mut unqueue_invalid = Vec::new(); + let mut pending_size = 0; + + pool.cull(None, readiness_evaluator.clone()); + for pending in pool.pending(readiness_evaluator.clone()) { + // skip and cull transactions which are too large. + if pending.encoded_size() > MAX_TRANSACTIONS_SIZE { + unqueue_invalid.push(pending.hash().clone()); + continue + } + + if pending_size + pending.encoded_size() >= MAX_TRANSACTIONS_SIZE { break } + + match block_builder.push_extrinsic(pending.as_transaction().clone()) { + Ok(()) => { + pending_size += pending.encoded_size(); + } + Err(e) => { + trace!(target: "transaction-pool", "Invalid transaction: {}", e); + unqueue_invalid.push(pending.hash().clone()); + } + } + } + + for tx_hash in unqueue_invalid { + pool.remove(&tx_hash, false); + } + } + + let polkadot_block = block_builder.bake(); + + info!("Proposing block [number: {}; hash: {}; parent_hash: {}; extrinsics: [{}]]", + polkadot_block.header.number, + Hash::from(polkadot_block.header.blake2_256()), + polkadot_block.header.parent_hash, + polkadot_block.extrinsics.iter() + .map(|xt| format!("{}", Hash::from(xt.blake2_256()))) + .collect::>() + .join(", ") + ); + + let substrate_block = Slicable::decode(&mut polkadot_block.encode().as_slice()) + .expect("polkadot blocks defined to serialize to substrate blocks correctly; qed"); + + // TODO: full re-evaluation + let active_parachains = self.client.active_parachains(&self.parent_id)?; + assert!(evaluation::evaluate_initial( + &substrate_block, + timestamp, + &self.parent_hash, + self.parent_number, + &active_parachains, + ).is_ok()); - // TODO: just defer using `tokio_timer` to delay prepare vote. - if block_timestamp > now + MAX_TIMESTAMP_DRIFT { - bail!(ErrorKind::TimestampInFuture) + Ok(substrate_block) } +} + +impl Future for CreateProposal + where + C: PolkadotApi, + R: TableRouter, + P: Collators, +{ + type Item = SubstrateBlock; + type Error = Error; + + fn poll(&mut self) -> Poll { + // 1. poll local collation future. + match self.collation.poll() { + Ok(Async::Ready((collation, extrinsic))) => { + let hash = collation.receipt.hash(); + self.router.local_candidate_data(hash, collation.block_data, extrinsic); - // execute the block. - client.evaluate_block(parent_id, proposal.into())?; - Ok(true) + // TODO: if we are an availability guarantor also, we should produce an availability statement. + self.table.sign_and_import(&self.router, GenericStatement::Candidate(collation.receipt)); + } + Ok(Async::NotReady) => {}, + Err(_) => {}, // TODO: handle this failure to collate. + } + + // 2. try to propose if we have enough includable candidates and other + // delays have concluded. + let included = self.table.includable_count(); + try_ready!(self.timing.poll(included)); + + // 3. propose + let proposed_candidates = self.table.with_proposal(|proposed_set| { + proposed_set.into_iter().cloned().collect() + }); + + self.propose_with(proposed_candidates).map(Async::Ready) + } } diff --git a/polkadot/consensus/src/service.rs b/polkadot/consensus/src/service.rs index 97384040e9c86..cae27b16fd9b3 100644 --- a/polkadot/consensus/src/service.rs +++ b/polkadot/consensus/src/service.rs @@ -22,19 +22,23 @@ use std::thread; use std::time::{Duration, Instant}; use std::sync::Arc; -use futures::{future, Future, Stream, Sink, Async, Canceled, Poll}; -use parking_lot::Mutex; -use substrate_network as net; -use tokio_core::reactor; + +use bft::{self, BftService}; use client::{BlockchainEvents, ChainHead}; -use runtime_support::Hashable; +use ed25519; +use futures::prelude::*; +use futures::{future, Canceled}; +use parking_lot::Mutex; +use polkadot_api::PolkadotApi; +use polkadot_primitives::AccountId; +use polkadot_primitives::parachain::{Id as ParaId, BlockData, Extrinsic, CandidateReceipt}; use primitives::{Hash, AuthorityId}; use primitives::block::{Id as BlockId, HeaderHash, Header}; -use polkadot_primitives::parachain::{BlockData, Extrinsic, CandidateReceipt}; -use polkadot_api::PolkadotApi; -use bft::{self, BftService}; +use runtime_support::Hashable; +use substrate_network as net; +use tokio_core::reactor; use transaction_pool::TransactionPool; -use ed25519; + use super::{TableRouter, SharedTable, ProposerFactory}; use error; @@ -174,6 +178,15 @@ impl Sink for BftSink { struct Network(Arc); +impl super::Network for Network { + type TableRouter = Router; + fn table_router(&self, _table: Arc) -> Self::TableRouter { + Router { + network: self.0.clone() + } + } +} + fn start_bft( header: &Header, handle: reactor::Handle, @@ -224,6 +237,7 @@ impl Service { client: Arc, network: Arc, transaction_pool: Arc>, + parachain_empty_duration: Duration, key: ed25519::Pair, ) -> Service where @@ -233,10 +247,13 @@ impl Service { let thread = thread::spawn(move || { let mut core = reactor::Core::new().expect("tokio::Core could not be created"); let key = Arc::new(key); + let factory = ProposerFactory { client: client.clone(), transaction_pool: transaction_pool.clone(), network: Network(network.clone()), + collators: NoCollators, + parachain_empty_duration, handle: core.handle(), }; let bft_service = Arc::new(BftService::new(client.clone(), key, factory)); @@ -312,17 +329,25 @@ impl Drop for Service { } } -impl super::Network for Network { - type TableRouter = Router; - fn table_router(&self, _table: Arc) -> Self::TableRouter { - Router { - network: self.0.clone() - } +// Collators implementation which never collates anything. +// TODO: do a real implementation. +#[derive(Clone, Copy)] +struct NoCollators; + +impl ::collation::Collators for NoCollators { + type Error = (); + type Collation = future::Empty<::collation::Collation, ()>; + + fn collate(&self, _parachain: ParaId, _relay_parent: Hash) -> Self::Collation { + future::empty() } + + fn note_bad_collator(&self, _collator: AccountId) { } } type FetchCandidateAdapter = future::Map) -> BlockData>; +#[derive(Clone)] struct Router { network: Arc, } diff --git a/polkadot/consensus/src/shared_table/includable.rs b/polkadot/consensus/src/shared_table/includable.rs new file mode 100644 index 0000000000000..873c3af94c403 --- /dev/null +++ b/polkadot/consensus/src/shared_table/includable.rs @@ -0,0 +1,137 @@ +// Copyright 2017 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Implements a future which resolves when all of the candidates referenced are includable. + +use std::collections::HashMap; + +use futures::prelude::*; +use futures::sync::oneshot; + +use polkadot_primitives::Hash; + +/// Track includability of a set of candidates, +pub(super) fn track>(candidates: I) -> (IncludabilitySender, Includable) { + let (tx, rx) = oneshot::channel(); + let tracking: HashMap<_, _> = candidates.into_iter().collect(); + let includable_count = tracking.values().filter(|x| **x).count(); + + let mut sender = IncludabilitySender { + tracking, + includable_count, + sender: Some(tx), + }; + + sender.try_complete(); + + ( + sender, + Includable(rx), + ) +} + +/// The sending end of the includability sender. +pub(super) struct IncludabilitySender { + tracking: HashMap, + includable_count: usize, + sender: Option>, +} + +impl IncludabilitySender { + /// update the inner candidate. wakes up the task as necessary. + /// returns `Err(Canceled)` if the other end has hung up. + /// + /// returns `true` when this is completed and should be destroyed. + pub fn update_candidate(&mut self, candidate: Hash, includable: bool) -> bool { + use std::collections::hash_map::Entry; + + match self.tracking.entry(candidate) { + Entry::Vacant(_) => {} + Entry::Occupied(mut entry) => { + let old = entry.insert(includable); + if !old && includable { + self.includable_count += 1; + } else if old && !includable { + self.includable_count -= 1; + } + } + } + + self.try_complete() + } + + /// whether the sender is completed. + pub fn is_complete(&self) -> bool { + self.sender.is_none() + } + + fn try_complete(&mut self) -> bool { + if self.includable_count == self.tracking.len() { + if let Some(sender) = self.sender.take() { + let _ = sender.send(()); + } + + true + } else { + false + } + } +} + +/// Future that resolves when all the candidates within are includable. +pub struct Includable(oneshot::Receiver<()>); + +impl Future for Includable { + type Item = (); + type Error = oneshot::Canceled; + + fn poll(&mut self) -> Poll<(), oneshot::Canceled> { + self.0.poll() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn it_works() { + let hash1 = [1; 32].into(); + let hash2 = [2; 32].into(); + let hash3 = [3; 32].into(); + + let (mut sender, recv) = track([ + (hash1, true), + (hash2, true), + (hash2, false), // overwrite should favor latter. + (hash3, true), + ].iter().cloned()); + + assert!(!sender.is_complete()); + + // true -> false transition is possible and should be handled. + sender.update_candidate(hash1, false); + assert!(!sender.is_complete()); + + sender.update_candidate(hash2, true); + assert!(!sender.is_complete()); + + sender.update_candidate(hash1, true); + assert!(sender.is_complete()); + + recv.wait().unwrap(); + } +} diff --git a/polkadot/consensus/src/shared_table/mod.rs b/polkadot/consensus/src/shared_table/mod.rs new file mode 100644 index 0000000000000..23e936eefea80 --- /dev/null +++ b/polkadot/consensus/src/shared_table/mod.rs @@ -0,0 +1,566 @@ +// Copyright 2017 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Parachain statement table meant to to shared with a message router +//! and a consensus proposer. + +use std::collections::{HashMap, HashSet}; +use std::sync::Arc; + +use table::{self, Table, Context as TableContextTrait}; +use table::generic::Statement as GenericStatement; +use collation::Collation; +use polkadot_primitives::Hash; +use polkadot_primitives::parachain::{Id as ParaId, BlockData, Extrinsic, CandidateReceipt}; +use primitives::AuthorityId; + +use parking_lot::Mutex; +use futures::{future, prelude::*}; + +use super::{GroupInfo, TableRouter}; +use self::includable::IncludabilitySender; + +mod includable; + +pub use self::includable::Includable; + +struct TableContext { + parent_hash: Hash, + key: Arc<::ed25519::Pair>, + groups: HashMap, +} + +impl table::Context for TableContext { + fn is_member_of(&self, authority: &AuthorityId, group: &ParaId) -> bool { + self.groups.get(group).map_or(false, |g| g.validity_guarantors.contains(authority)) + } + + fn is_availability_guarantor_of(&self, authority: &AuthorityId, group: &ParaId) -> bool { + self.groups.get(group).map_or(false, |g| g.availability_guarantors.contains(authority)) + } + + fn requisite_votes(&self, group: &ParaId) -> (usize, usize) { + self.groups.get(group).map_or( + (usize::max_value(), usize::max_value()), + |g| (g.needed_validity, g.needed_availability), + ) + } +} + +impl TableContext { + fn local_id(&self) -> AuthorityId { + self.key.public().0 + } + + fn sign_statement(&self, statement: table::Statement) -> table::SignedStatement { + let signature = ::sign_table_statement(&statement, &self.key, &self.parent_hash).into(); + let local_id = self.key.public().0; + + table::SignedStatement { + statement, + signature, + sender: local_id, + } + } +} + +/// Source of statements +pub enum StatementSource { + /// Locally produced statement. + Local, + /// Received statement from remote source, with optional sender. + Remote(Option), +} + +// A shared table object. +struct SharedTableInner { + table: Table, + proposed_digest: Option, + checked_validity: HashSet, + checked_availability: HashSet, + trackers: Vec, +} + +impl SharedTableInner { + // Import a single statement. Provide a handle to a table router and a function + // used to determine if a referenced candidate is valid. + fn import_statement bool>( + &mut self, + context: &TableContext, + router: &R, + statement: table::SignedStatement, + statement_source: StatementSource, + check_candidate: C, + ) -> StatementProducer< + ::Future, + ::Future, + C, + > { + // this blank producer does nothing until we attach some futures + // and set a candidate digest. + let received_from = match statement_source { + StatementSource::Local => return Default::default(), + StatementSource::Remote(from) => from, + }; + + let summary = match self.table.import_statement(context, statement, received_from) { + Some(summary) => summary, + None => return Default::default(), + }; + + self.update_trackers(&summary.candidate, context); + + let local_id = context.local_id(); + + let is_validity_member = context.is_member_of(&local_id, &summary.group_id); + let is_availability_member = + context.is_availability_guarantor_of(&local_id, &summary.group_id); + + let digest = &summary.candidate; + + // TODO: consider a strategy based on the number of candidate votes as well. + // only check validity if this wasn't locally proposed. + let checking_validity = is_validity_member + && self.proposed_digest.as_ref().map_or(true, |d| d != digest) + && self.checked_validity.insert(digest.clone()); + + let checking_availability = is_availability_member + && self.checked_availability.insert(digest.clone()); + + let work = if checking_validity || checking_availability { + match self.table.get_candidate(&digest) { + None => None, // TODO: handle table inconsistency somehow? + Some(candidate) => { + let fetch_block_data = + router.fetch_block_data(candidate).into_future().fuse(); + + let fetch_extrinsic = if checking_availability { + Some( + router.fetch_extrinsic_data(candidate).into_future().fuse() + ) + } else { + None + }; + + Some(Work { + candidate_receipt: candidate.clone(), + fetch_block_data, + fetch_extrinsic, + evaluate: checking_validity, + check_candidate, + }) + } + } + } else { + None + }; + + StatementProducer { + produced_statements: Default::default(), + work, + } + } + + fn update_trackers(&mut self, candidate: &Hash, context: &TableContext) { + let includable = self.table.candidate_includable(candidate, context); + for i in (0..self.trackers.len()).rev() { + if self.trackers[i].update_candidate(candidate.clone(), includable) { + self.trackers.swap_remove(i); + } + } + } +} + +/// Produced statements about a specific candidate. +/// Both may be `None`. +#[derive(Default)] +pub struct ProducedStatements { + /// A statement about the validity of the candidate. + pub validity: Option, + /// A statement about availability of data. If this is `Some`, + /// then `block_data` and `extrinsic` should be `Some` as well. + pub availability: Option, + /// Block data to ensure availability of. + pub block_data: Option, + /// Extrinsic data to ensure availability of. + pub extrinsic: Option, +} + +/// Future that produces statements about a specific candidate. +pub struct StatementProducer { + produced_statements: ProducedStatements, + work: Option>, +} + +struct Work { + candidate_receipt: CandidateReceipt, + fetch_block_data: future::Fuse, + fetch_extrinsic: Option>, + evaluate: bool, + check_candidate: C +} + +impl Default for StatementProducer { + fn default() -> Self { + StatementProducer { + produced_statements: Default::default(), + work: None, + } + } +} + +impl Future for StatementProducer + where + D: Future, + E: Future, + C: FnMut(Collation) -> bool, +{ + type Item = ProducedStatements; + type Error = Err; + + fn poll(&mut self) -> Poll { + let work = match self.work { + Some(ref mut work) => work, + None => return Ok(Async::Ready(::std::mem::replace(&mut self.produced_statements, Default::default()))), + }; + + if let Async::Ready(block_data) = work.fetch_block_data.poll()? { + self.produced_statements.block_data = Some(block_data.clone()); + if work.evaluate { + let is_good = (work.check_candidate)(Collation { + block_data, + receipt: work.candidate_receipt.clone(), + }); + + let hash = work.candidate_receipt.hash(); + self.produced_statements.validity = Some(if is_good { + GenericStatement::Valid(hash) + } else { + GenericStatement::Invalid(hash) + }); + } + } + + if let Some(ref mut fetch_extrinsic) = work.fetch_extrinsic { + if let Async::Ready(extrinsic) = fetch_extrinsic.poll()? { + self.produced_statements.extrinsic = Some(extrinsic); + } + } + + let done = self.produced_statements.block_data.is_some() && { + if work.evaluate { + true + } else if self.produced_statements.extrinsic.is_some() { + self.produced_statements.availability = + Some(GenericStatement::Available(work.candidate_receipt.hash())); + + true + } else { + false + } + }; + + if done { + Ok(Async::Ready(::std::mem::replace(&mut self.produced_statements, Default::default()))) + } else { + Ok(Async::NotReady) + } + } +} + +/// A shared table object. +pub struct SharedTable { + context: Arc, + inner: Arc>, +} + +impl Clone for SharedTable { + fn clone(&self) -> Self { + SharedTable { + context: self.context.clone(), + inner: self.inner.clone(), + } + } +} + +impl SharedTable { + /// Create a new shared table. + /// + /// Provide the key to sign with, and the parent hash of the relay chain + /// block being built. + pub fn new(groups: HashMap, key: Arc<::ed25519::Pair>, parent_hash: Hash) -> Self { + SharedTable { + context: Arc::new(TableContext { groups, key, parent_hash }), + inner: Arc::new(Mutex::new(SharedTableInner { + table: Table::default(), + proposed_digest: None, + checked_validity: HashSet::new(), + checked_availability: HashSet::new(), + trackers: Vec::new(), + })) + } + } + + /// Get group info. + pub fn group_info(&self) -> &HashMap { + &self.context.groups + } + + /// Import a single statement. Provide a handle to a table router + /// for dispatching any other requests which come up. + pub fn import_statement bool>( + &self, + router: &R, + statement: table::SignedStatement, + received_from: StatementSource, + check_candidate: C, + ) -> StatementProducer<::Future, ::Future, C> { + self.inner.lock().import_statement(&*self.context, router, statement, received_from, check_candidate) + } + + /// Sign and import a local statement. + pub fn sign_and_import( + &self, + router: &R, + statement: table::Statement, + ) { + let proposed_digest = match statement { + GenericStatement::Candidate(ref c) => Some(c.hash()), + _ => None, + }; + + let signed_statement = self.context.sign_statement(statement); + + let mut inner = self.inner.lock(); + if proposed_digest.is_some() { + inner.proposed_digest = proposed_digest; + } + + let producer = inner.import_statement( + &*self.context, + router, + signed_statement, + StatementSource::Local, + |_| true, + ); + + assert!(producer.work.is_none(), "local statement import never leads to additional work; qed"); + } + + /// Import many statements at once. + /// + /// Provide an iterator yielding pairs of (statement, statement_source). + pub fn import_statements(&self, router: &R, iterable: I) -> U + where + R: TableRouter, + I: IntoIterator, + C: FnMut(Collation) -> bool, + U: ::std::iter::FromIterator::Future, + ::Future, + C, + >>, + { + let mut inner = self.inner.lock(); + + iterable.into_iter().map(move |(statement, statement_source, check_candidate)| { + inner.import_statement(&*self.context, router, statement, statement_source, check_candidate) + }).collect() + } + + /// Execute a closure using a specific candidate. + /// + /// Deadlocks if called recursively. + pub fn with_candidate(&self, digest: &Hash, f: F) -> U + where F: FnOnce(Option<&CandidateReceipt>) -> U + { + let inner = self.inner.lock(); + f(inner.table.get_candidate(digest)) + } + + /// Execute a closure using the current proposed set. + /// + /// Deadlocks if called recursively. + pub fn with_proposal(&self, f: F) -> U + where F: FnOnce(Vec<&CandidateReceipt>) -> U + { + let inner = self.inner.lock(); + f(inner.table.proposed_candidates(&*self.context)) + } + + /// Get the number of parachains which have available candidates. + pub fn includable_count(&self) -> usize { + self.inner.lock().table.includable_count() + } + + /// Get all witnessed misbehavior. + pub fn get_misbehavior(&self) -> HashMap { + self.inner.lock().table.get_misbehavior().clone() + } + + /// Fill a statement batch. + pub fn fill_batch(&self, batch: &mut B) { + self.inner.lock().table.fill_batch(batch); + } + + /// Track includability of a given set of candidate hashes. + pub fn track_includability(&self, iterable: I) -> Includable + where I: IntoIterator + { + let mut inner = self.inner.lock(); + + let (tx, rx) = includable::track(iterable.into_iter().map(|x| { + let includable = inner.table.candidate_includable(&x, &*self.context); + (x, includable) + })); + + if !tx.is_complete() { + inner.trackers.push(tx); + } + + rx + } +} + +#[cfg(test)] +mod tests { + use super::*; + use substrate_keyring::Keyring; + + #[derive(Clone)] + struct DummyRouter; + impl TableRouter for DummyRouter { + type Error = (); + type FetchCandidate = ::futures::future::Empty; + type FetchExtrinsic = ::futures::future::Empty; + + /// Note local candidate data, making it available on the network to other validators. + fn local_candidate_data(&self, _hash: Hash, _block_data: BlockData, _extrinsic: Extrinsic) { + + } + + /// Fetch block data for a specific candidate. + fn fetch_block_data(&self, _candidate: &CandidateReceipt) -> Self::FetchCandidate { + ::futures::future::empty() + } + + /// Fetch extrinsic data for a specific candidate. + fn fetch_extrinsic_data(&self, _candidate: &CandidateReceipt) -> Self::FetchExtrinsic { + ::futures::future::empty() + } + } + + #[test] + fn statement_triggers_fetch_and_evaluate() { + let mut groups = HashMap::new(); + + let para_id = ParaId::from(1); + let local_id = Keyring::Alice.to_raw_public(); + let local_key = Arc::new(Keyring::Alice.pair()); + + let validity_other = Keyring::Bob.to_raw_public(); + let validity_other_key = Keyring::Bob.pair(); + let parent_hash = Default::default(); + + groups.insert(para_id, GroupInfo { + validity_guarantors: [local_id, validity_other].iter().cloned().collect(), + availability_guarantors: Default::default(), + needed_validity: 2, + needed_availability: 0, + }); + + let shared_table = SharedTable::new(groups, local_key.clone(), parent_hash); + + let candidate = CandidateReceipt { + parachain_index: para_id, + collator: [1; 32], + head_data: ::polkadot_primitives::parachain::HeadData(vec![1, 2, 3, 4]), + balance_uploads: Vec::new(), + egress_queue_roots: Vec::new(), + fees: 1_000_000, + }; + + let candidate_statement = GenericStatement::Candidate(candidate); + + let signature = ::sign_table_statement(&candidate_statement, &validity_other_key, &parent_hash); + let signed_statement = ::table::generic::SignedStatement { + statement: candidate_statement, + signature: signature.into(), + sender: validity_other, + }; + + let producer = shared_table.import_statement( + &DummyRouter, + signed_statement, + StatementSource::Remote(None), + |_| true, + ); + + assert!(producer.work.is_some(), "candidate and local validity group are same"); + assert!(producer.work.as_ref().unwrap().evaluate, "should evaluate validity"); + } + + #[test] + fn statement_triggers_fetch_and_availability() { + let mut groups = HashMap::new(); + + let para_id = ParaId::from(1); + let local_id = Keyring::Alice.to_raw_public(); + let local_key = Arc::new(Keyring::Alice.pair()); + + let validity_other = Keyring::Bob.to_raw_public(); + let validity_other_key = Keyring::Bob.pair(); + let parent_hash = Default::default(); + + groups.insert(para_id, GroupInfo { + validity_guarantors: [validity_other].iter().cloned().collect(), + availability_guarantors: [local_id].iter().cloned().collect(), + needed_validity: 1, + needed_availability: 1, + }); + + let shared_table = SharedTable::new(groups, local_key.clone(), parent_hash); + + let candidate = CandidateReceipt { + parachain_index: para_id, + collator: [1; 32], + head_data: ::polkadot_primitives::parachain::HeadData(vec![1, 2, 3, 4]), + balance_uploads: Vec::new(), + egress_queue_roots: Vec::new(), + fees: 1_000_000, + }; + + let candidate_statement = GenericStatement::Candidate(candidate); + + let signature = ::sign_table_statement(&candidate_statement, &validity_other_key, &parent_hash); + let signed_statement = ::table::generic::SignedStatement { + statement: candidate_statement, + signature: signature.into(), + sender: validity_other, + }; + + let producer = shared_table.import_statement( + &DummyRouter, + signed_statement, + StatementSource::Remote(None), + |_| true, + ); + + assert!(producer.work.is_some(), "candidate and local availability group are same"); + assert!(producer.work.as_ref().unwrap().fetch_extrinsic.is_some(), "should fetch extrinsic when guaranteeing availability"); + assert!(!producer.work.as_ref().unwrap().evaluate, "should not evaluate validity"); + } +} diff --git a/polkadot/parachain/Cargo.toml b/polkadot/parachain/Cargo.toml new file mode 100644 index 0000000000000..47cde571e1567 --- /dev/null +++ b/polkadot/parachain/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "polkadot-parachain" +version = "0.1.0" +authors = ["Parity Technologies "] +description = "Types and utilities for creating and working with parachains" + +[dependencies] +substrate-codec = { path = "../../substrate/codec", default-features = false } +wasmi = { version = "0.1", optional = true } +error-chain = { version = "0.11", optional = true } + +[dev-dependencies] +tiny-keccak = "1.4" + +[features] +default = ["std"] +std = ["substrate-codec/std", "wasmi", "error-chain"] diff --git a/polkadot/parachain/src/lib.rs b/polkadot/parachain/src/lib.rs new file mode 100644 index 0000000000000..2f3c064f33f36 --- /dev/null +++ b/polkadot/parachain/src/lib.rs @@ -0,0 +1,146 @@ +// Copyright 2017 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Defines primitive types for creating or validating a parachain. +//! +//! When compiled with standard library support, this crate exports a `wasm` +//! module that can be used to validate parachain WASM. +//! +//! ## Parachain WASM +//! +//! Polkadot parachain WASM is in the form of a module which imports a memory +//! instance and exports a function `validate`. +//! +//! `validate` accepts as input two `i32` values, representing a pointer/length pair +//! respectively, that encodes `ValidationParams`. +//! +//! `validate` returns an `i32` which is a pointer to a little-endian 32-bit integer denoting a length. +//! Subtracting the length from the initial pointer will give a new pointer to the actual return data, +//! +//! ASCII-diagram demonstrating the return data format: +//! +//! ```ignore +//! [return data][len (LE-u32)] +//! ^~~returned pointer +//! ``` +//! +//! The `load_params` and `write_result` functions provide utilities for setting up +//! a parachain WASM module in Rust. + +#![cfg_attr(not(feature = "std"), no_std)] +#![cfg_attr(not(feature = "std"), feature(alloc))] + +/// Re-export of substrate-codec. +pub extern crate substrate_codec as codec; + +#[cfg(not(feature = "std"))] +extern crate alloc; + +#[cfg(feature = "std")] +extern crate core; + +#[cfg(feature = "std")] +extern crate wasmi; + +#[cfg(feature = "std")] +#[macro_use] +extern crate error_chain; + +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; +use codec::Slicable; + +#[cfg(feature = "std")] +pub mod wasm; + +/// Validation parameters for evaluating the parachain validity function. +// TODO: consolidated ingress and balance downloads +#[derive(PartialEq, Eq)] +#[cfg_attr(feature = "std", derive(Debug))] +pub struct ValidationParams { + /// The collation body. + pub block_data: Vec, + /// Previous head-data. + pub parent_head: Vec, +} + +impl Slicable for ValidationParams { + fn encode(&self) -> Vec { + let mut v = Vec::new(); + + self.block_data.using_encoded(|s| v.extend(s)); + self.parent_head.using_encoded(|s| v.extend(s)); + + v + } + + fn decode(input: &mut I) -> Option { + Some(ValidationParams { + block_data: Slicable::decode(input)?, + parent_head: Slicable::decode(input)?, + }) + } +} + +/// The result of parachain validation. +// TODO: egress and balance uploads +#[derive(PartialEq, Eq)] +#[cfg_attr(feature = "std", derive(Debug))] +pub struct ValidationResult { + /// New head data that should be included in the relay chain state. + pub head_data: Vec +} + +impl Slicable for ValidationResult { + fn encode(&self) -> Vec { + self.head_data.encode() + } + + fn decode(input: &mut I) -> Option { + Some(ValidationResult { + head_data: Slicable::decode(input)?, + }) + } +} + +/// Load the validation params from memory when implementing a Rust parachain. +/// +/// Offset and length must have been provided by the validation +/// function's entry point. +pub unsafe fn load_params(offset: usize, len: usize) -> ValidationParams { + let mut slice = ::core::slice::from_raw_parts(offset as *const u8, len); + + ValidationParams::decode(&mut slice).expect("Invalid input data") +} + +/// Allocate the validation result in memory, getting the return-pointer back. +/// +/// As described in the crate docs, this is a pointer to the appended length +/// of the vector. +pub fn write_result(result: ValidationResult) -> usize { + let mut encoded = result.encode(); + let len = encoded.len(); + + assert!(len <= u32::max_value() as usize, "Len too large for parachain-WASM abi"); + (len as u32).using_encoded(|s| encoded.extend(s)); + + // do not alter `encoded` beyond this point. may reallocate. + let end_ptr = &encoded[len] as *const u8 as usize; + + // leak so it doesn't get zeroed. + ::core::mem::forget(encoded); + end_ptr +} diff --git a/polkadot/parachain/src/wasm.rs b/polkadot/parachain/src/wasm.rs new file mode 100644 index 0000000000000..a16383acb908b --- /dev/null +++ b/polkadot/parachain/src/wasm.rs @@ -0,0 +1,166 @@ +// Copyright 2017 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! WASM re-execution of a parachain candidate. +//! In the context of relay-chain candidate evaluation, there are some additional +//! steps to ensure that the provided input parameters are correct. +//! Assuming the parameters are correct, this module provides a wrapper around +//! a WASM VM for re-execution of a parachain candidate. + +use codec::Slicable; + +use wasmi::{self, Module, ModuleInstance, MemoryInstance, MemoryDescriptor, MemoryRef, ModuleImportResolver}; +use wasmi::{memory_units, RuntimeValue}; +use wasmi::Error as WasmError; + +use super::{ValidationParams, ValidationResult}; + +use std::cell::RefCell; + +error_chain! { + types { Error, ErrorKind, ResultExt; } + foreign_links { + Wasm(WasmError); + } + errors { + /// Call data too big. WASM32 only has a 32-bit address space. + ParamsTooLarge(len: usize) { + description("Validation parameters took up too much space to execute in WASM"), + display("Validation parameters took up {} bytes, max allowed by WASM is {}", len, i32::max_value()), + } + /// Bad return data or type. + BadReturn { + description("Validation function returned invalid data."), + display("Validation function returned invalid data."), + } + } +} + +struct Resolver { + max_memory: u32, // in pages. + memory: RefCell>, +} + +impl ModuleImportResolver for Resolver { + fn resolve_memory( + &self, + field_name: &str, + descriptor: &MemoryDescriptor, + ) -> Result { + if field_name == "memory" { + let effective_max = descriptor.maximum().unwrap_or(self.max_memory); + if descriptor.initial() > self.max_memory || effective_max > self.max_memory { + Err(WasmError::Instantiation("Module requested too much memory".to_owned())) + } else { + let mem = MemoryInstance::alloc( + memory_units::Pages(descriptor.initial() as usize), + descriptor.maximum().map(|x| memory_units::Pages(x as usize)), + )?; + *self.memory.borrow_mut() = Some(mem.clone()); + Ok(mem) + } + } else { + Err(WasmError::Instantiation("Memory imported under unknown name".to_owned())) + } + } +} + +/// Validate a candidate under the given validation code. +/// +/// This will fail if the validation code is not a proper parachain validation module. +pub fn validate_candidate(validation_code: &[u8], params: ValidationParams) -> Result { + use wasmi::LINEAR_MEMORY_PAGE_SIZE; + + // maximum memory in bytes + const MAX_MEM: u32 = 1024 * 1024 * 1024; // 1 GiB + + // instantiate the module. + let (module, memory) = { + let module = Module::from_buffer(validation_code)?; + + let module_resolver = Resolver { + max_memory: MAX_MEM / LINEAR_MEMORY_PAGE_SIZE.0 as u32, + memory: RefCell::new(None), + }; + + let module = ModuleInstance::new( + &module, + &wasmi::ImportsBuilder::new().with_resolver("env", &module_resolver), + )?.run_start(&mut wasmi::NopExternals).map_err(WasmError::Trap)?; + + let memory = module_resolver.memory.borrow_mut() + .as_ref() + .ok_or_else(|| WasmError::Instantiation("No imported memory instance".to_owned()))? + .clone(); + + (module, memory) + }; + + // allocate call data in memory. + let (offset, len) = { + let encoded_call_data = params.encode(); + + // hard limit from WASM. + if encoded_call_data.len() > i32::max_value() as usize { + bail!(ErrorKind::ParamsTooLarge(encoded_call_data.len())); + } + + let call_data_pages = (encoded_call_data.len() / LINEAR_MEMORY_PAGE_SIZE.0) + + (encoded_call_data.len() % LINEAR_MEMORY_PAGE_SIZE.0); + + let call_data_pages = wasmi::memory_units::Pages(call_data_pages); + + if memory.current_size() < call_data_pages { + memory.grow(call_data_pages - memory.current_size())?; + } + + memory.set(0, &encoded_call_data).expect("enough memory allocated just before this; \ + copying never fails if memory is large enough; qed"); + + (0, encoded_call_data.len() as i32) + }; + + let output = module.invoke_export( + "validate", + &[RuntimeValue::I32(offset), RuntimeValue::I32(len)], + &mut wasmi::NopExternals, + )?; + + match output { + Some(RuntimeValue::I32(len_offset)) => { + let len_offset = len_offset as u32; + + let mut len_bytes = [0u8; 4]; + memory.get_into(len_offset, &mut len_bytes)?; + + let len = u32::decode(&mut &len_bytes[..]) + .ok_or_else(|| ErrorKind::BadReturn)?; + + let return_offset = if len > len_offset { + bail!(ErrorKind::BadReturn); + } else { + len_offset - len + }; + + // TODO: optimize when `wasmi` lets you inspect memory with a closure. + let raw_return = memory.get(return_offset, len as usize)?; + ValidationResult::decode(&mut &raw_return[..]) + .ok_or_else(|| ErrorKind::BadReturn) + .map_err(Into::into) + } + _ => bail!(ErrorKind::BadReturn), + } +} diff --git a/polkadot/parachain/test-chains/.gitignore b/polkadot/parachain/test-chains/.gitignore new file mode 100644 index 0000000000000..2c96eb1b6517f --- /dev/null +++ b/polkadot/parachain/test-chains/.gitignore @@ -0,0 +1,2 @@ +target/ +Cargo.lock diff --git a/polkadot/parachain/test-chains/basic_add/Cargo.toml b/polkadot/parachain/test-chains/basic_add/Cargo.toml new file mode 100644 index 0000000000000..afc9e9f13103d --- /dev/null +++ b/polkadot/parachain/test-chains/basic_add/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "basic_add" +version = "0.1.0" +authors = ["Parity Technologies "] +description = "Test parachain which adds to a number as its state transition" + +[lib] +crate-type = ["cdylib"] + +[dependencies] +polkadot-parachain = { path = "../../", default-features = false } +wee_alloc = "0.2.0" +tiny-keccak = "1.4" +pwasm-libc = "0.2" + +[features] +default = ["std"] +std = ["polkadot-parachain/std"] + +[profile.release] +panic = "abort" +lto = true + +[workspace] +members = [] diff --git a/polkadot/parachain/test-chains/basic_add/src/lib.rs b/polkadot/parachain/test-chains/basic_add/src/lib.rs new file mode 100644 index 0000000000000..3f877b16d1bcb --- /dev/null +++ b/polkadot/parachain/test-chains/basic_add/src/lib.rs @@ -0,0 +1,143 @@ +// Copyright 2017 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Basic parachain that adds a number as part of its state. + +#![cfg_attr(not(feature = "std"), no_std)] +#![cfg_attr(not(feature = "std"), feature(alloc, core_intrinsics, global_allocator, lang_items))] + +#[cfg(not(feature = "std"))] +extern crate alloc; + +extern crate polkadot_parachain as parachain; +extern crate wee_alloc; +extern crate tiny_keccak; +extern crate pwasm_libc; + +use parachain::codec::{Slicable, Input}; + +#[cfg(not(feature = "std"))] +mod wasm; + +#[cfg(not(feature = "std"))] +pub use wasm::*; + +#[cfg(not(feature = "std"))] +use alloc::vec::Vec; + +// Define global allocator. +#[cfg(not(feature = "std"))] +#[global_allocator] +static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT; + +// Head data for this parachain. +#[derive(Default, Clone)] +struct HeadData { + // Block number + number: u64, + // parent block keccak256 + parent_hash: [u8; 32], + // hash of post-execution state. + post_state: [u8; 32], +} + +impl Slicable for HeadData { + fn encode(&self) -> Vec { + let mut v = Vec::new(); + + self.number.using_encoded(|s| v.extend(s)); + self.parent_hash.using_encoded(|s| v.extend(s)); + self.post_state.using_encoded(|s| v.extend(s)); + + v + } + + fn decode(input: &mut I) -> Option { + Some(HeadData { + number: Slicable::decode(input)?, + parent_hash: Slicable::decode(input)?, + post_state: Slicable::decode(input)?, + }) + } +} + +// Block data for this parachain. +#[derive(Default, Clone)] +struct BlockData { + // State to begin from. + state: u64, + // Amount to add (overflowing) + add: u64, +} + +impl Slicable for BlockData { + fn encode(&self) -> Vec { + let mut v = Vec::new(); + + self.state.using_encoded(|s| v.extend(s)); + self.add.using_encoded(|s| v.extend(s)); + + v + } + + fn decode(input: &mut I) -> Option { + Some(BlockData { + state: Slicable::decode(input)?, + add: Slicable::decode(input)?, + }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use parachain::ValidationParams; + + const TEST_CODE: &[u8] = include_bytes!("../wasm/test.wasm"); + + fn hash_state(state: u64) -> [u8; 32] { + ::tiny_keccak::keccak256(state.encode().as_slice()) + } + + fn hash_head(head: &HeadData) -> [u8; 32] { + ::tiny_keccak::keccak256(head.encode().as_slice()) + } + + #[test] + fn execute_good_on_parent() { + let parent_head = HeadData { + number: 0, + parent_hash: [0; 32], + post_state: hash_state(0), + }; + + let block_data = BlockData { + state: 0, + add: 512, + }; + + let ret = parachain::wasm::validate_candidate(TEST_CODE, ValidationParams { + parent_head: parent_head.encode(), + block_data: block_data.encode(), + }).unwrap(); + + let new_head = HeadData::decode(&mut &ret.head_data[..]).unwrap(); + + assert_eq!(new_head.number, 1); + assert_eq!(new_head.parent_hash, hash_head(&parent_head)); + assert_eq!(new_head.post_state, hash_state(512)); + } +} diff --git a/polkadot/parachain/test-chains/basic_add/src/src b/polkadot/parachain/test-chains/basic_add/src/src new file mode 120000 index 0000000000000..e8310385c56dc --- /dev/null +++ b/polkadot/parachain/test-chains/basic_add/src/src @@ -0,0 +1 @@ +src \ No newline at end of file diff --git a/polkadot/parachain/test-chains/basic_add/src/wasm.rs b/polkadot/parachain/test-chains/basic_add/src/wasm.rs new file mode 100644 index 0000000000000..fa1bf64f43cea --- /dev/null +++ b/polkadot/parachain/test-chains/basic_add/src/wasm.rs @@ -0,0 +1,58 @@ +// Copyright 2017 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Defines WASM module logic. + +use parachain::{self, ValidationResult}; +use parachain::codec::Slicable; +use super::{HeadData, BlockData}; + +#[lang = "panic_fmt"] +#[no_mangle] +pub extern fn panic_fmt( + _args: ::core::fmt::Arguments, + _file: &'static str, + _line: u32, + _col: u32, +) -> ! { + use core::intrinsics; + unsafe { + intrinsics::abort(); + } +} + +#[no_mangle] +pub extern fn validate(offset: usize, len: usize) -> usize { + let hash_state = |state: u64| ::tiny_keccak::keccak256(state.encode().as_slice()); + + let params = unsafe { ::parachain::load_params(offset, len) }; + let parent_head = HeadData::decode(&mut ¶ms.parent_head[..]) + .expect("invalid parent head format."); + + let block_data = BlockData::decode(&mut ¶ms.block_data[..]) + .expect("invalid block data format."); + + assert_eq!(hash_state(block_data.state), parent_head.post_state, "wrong post-state proof"); + let new_state = block_data.state.saturating_add(block_data.add); + + let new_head = HeadData { + number: parent_head.number + 1, + parent_hash: ::tiny_keccak::keccak256(¶ms.parent_head[..]), + post_state: hash_state(new_state), + }; + + parachain::write_result(ValidationResult { head_data: new_head.encode() }) +} diff --git a/polkadot/parachain/test-chains/build_test_chains.sh b/polkadot/parachain/test-chains/build_test_chains.sh new file mode 100755 index 0000000000000..6523e46f14e7e --- /dev/null +++ b/polkadot/parachain/test-chains/build_test_chains.sh @@ -0,0 +1,14 @@ +#!/bin/sh +set -e + +rm -rf ./target +for i in */ +do + i=${i%/} + cd $i + + # TODO: stop using exact nightly when wee-alloc works on normal nightly. + RUSTFLAGS="-C link-arg=--import-memory" cargo +nightly-2018-03-07 build --target=wasm32-unknown-unknown --release --no-default-features + wasm-gc target/wasm32-unknown-unknown/release/$i.wasm ../../tests/res/$i.wasm + cd .. +done diff --git a/polkadot/parachain/tests/basic_add.rs b/polkadot/parachain/tests/basic_add.rs new file mode 100644 index 0000000000000..c5fbe467a433d --- /dev/null +++ b/polkadot/parachain/tests/basic_add.rs @@ -0,0 +1,170 @@ +// Copyright 2017 Parity Technologies (UK) Ltd. +// This file is part of Polkadot. + +// Polkadot is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Polkadot is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Polkadot. If not, see . + +//! Basic parachain that adds a number as part of its state. + +extern crate polkadot_parachain as parachain; +extern crate tiny_keccak; + +use parachain::ValidationParams; +use parachain::codec::{Slicable, Input}; + +// Head data for this parachain. +#[derive(Default, Clone)] +struct HeadData { + // Block number + number: u64, + // parent block keccak256 + parent_hash: [u8; 32], + // hash of post-execution state. + post_state: [u8; 32], +} + +impl Slicable for HeadData { + fn encode(&self) -> Vec { + let mut v = Vec::new(); + + self.number.using_encoded(|s| v.extend(s)); + self.parent_hash.using_encoded(|s| v.extend(s)); + self.post_state.using_encoded(|s| v.extend(s)); + + v + } + + fn decode(input: &mut I) -> Option { + Some(HeadData { + number: Slicable::decode(input)?, + parent_hash: Slicable::decode(input)?, + post_state: Slicable::decode(input)?, + }) + } +} + +// Block data for this parachain. +#[derive(Default, Clone)] +struct BlockData { + // State to begin from. + state: u64, + // Amount to add (overflowing) + add: u64, +} + +impl Slicable for BlockData { + fn encode(&self) -> Vec { + let mut v = Vec::new(); + + self.state.using_encoded(|s| v.extend(s)); + self.add.using_encoded(|s| v.extend(s)); + + v + } + + fn decode(input: &mut I) -> Option { + Some(BlockData { + state: Slicable::decode(input)?, + add: Slicable::decode(input)?, + }) + } +} + +const TEST_CODE: &[u8] = include_bytes!("res/basic_add.wasm"); + +fn hash_state(state: u64) -> [u8; 32] { + ::tiny_keccak::keccak256(state.encode().as_slice()) +} + +fn hash_head(head: &HeadData) -> [u8; 32] { + ::tiny_keccak::keccak256(head.encode().as_slice()) +} + +#[test] +fn execute_good_on_parent() { + let parent_head = HeadData { + number: 0, + parent_hash: [0; 32], + post_state: hash_state(0), + }; + + let block_data = BlockData { + state: 0, + add: 512, + }; + + let ret = parachain::wasm::validate_candidate(TEST_CODE, ValidationParams { + parent_head: parent_head.encode(), + block_data: block_data.encode(), + }).unwrap(); + + let new_head = HeadData::decode(&mut &ret.head_data[..]).unwrap(); + + assert_eq!(new_head.number, 1); + assert_eq!(new_head.parent_hash, hash_head(&parent_head)); + assert_eq!(new_head.post_state, hash_state(512)); +} + +#[test] +fn execute_good_chain_on_parent() { + let mut number = 0; + let mut parent_hash = [0; 32]; + let mut last_state = 0; + + for add in 0..10 { + let parent_head = HeadData { + number, + parent_hash, + post_state: hash_state(last_state), + }; + + let block_data = BlockData { + state: last_state, + add, + }; + + let ret = parachain::wasm::validate_candidate(TEST_CODE, ValidationParams { + parent_head: parent_head.encode(), + block_data: block_data.encode(), + }).unwrap(); + + let new_head = HeadData::decode(&mut &ret.head_data[..]).unwrap(); + + assert_eq!(new_head.number, number + 1); + assert_eq!(new_head.parent_hash, hash_head(&parent_head)); + assert_eq!(new_head.post_state, hash_state(last_state + add)); + + number += 1; + parent_hash = hash_head(&new_head); + last_state += add; + } +} + +#[test] +fn execute_bad_on_parent() { + let parent_head = HeadData { + number: 0, + parent_hash: [0; 32], + post_state: hash_state(0), + }; + + let block_data = BlockData { + state: 256, // start state is wrong. + add: 256, + }; + + let _ret = parachain::wasm::validate_candidate(TEST_CODE, ValidationParams { + parent_head: parent_head.encode(), + block_data: block_data.encode(), + }).unwrap_err(); +} diff --git a/polkadot/parachain/tests/res/basic_add.wasm b/polkadot/parachain/tests/res/basic_add.wasm new file mode 100755 index 0000000000000..83f52db817f81 Binary files /dev/null and b/polkadot/parachain/tests/res/basic_add.wasm differ diff --git a/polkadot/primitives/src/lib.rs b/polkadot/primitives/src/lib.rs index 1db009c73546d..b9a636162c912 100644 --- a/polkadot/primitives/src/lib.rs +++ b/polkadot/primitives/src/lib.rs @@ -36,7 +36,6 @@ extern crate substrate_serializer; extern crate substrate_codec as codec; pub mod parachain; -pub mod validator; /// Virtual account ID that represents the idea of a dispatch/statement being signed by everybody /// (who matters). Essentially this means that a majority of validators have decided it is diff --git a/polkadot/primitives/src/parachain.rs b/polkadot/primitives/src/parachain.rs index 1dde3b508d1c3..3a87b1faa2d11 100644 --- a/polkadot/primitives/src/parachain.rs +++ b/polkadot/primitives/src/parachain.rs @@ -37,6 +37,13 @@ impl From for Id { fn from(x: u32) -> Self { Id(x) } } +impl Id { + /// Convert this Id into its inner representation. + pub fn into_inner(self) -> u32 { + self.0 + } +} + impl Slicable for Id { fn decode(input: &mut I) -> Option { u32::decode(input).map(Id) @@ -86,8 +93,6 @@ impl Slicable for Chain { } } - - /// The duty roster specifying what jobs each validator must do. #[derive(Clone, PartialEq)] #[cfg_attr(feature = "std", derive(Default, Debug))] diff --git a/polkadot/primitives/src/validator.rs b/polkadot/primitives/src/validator.rs deleted file mode 100644 index 0bac607594c6a..0000000000000 --- a/polkadot/primitives/src/validator.rs +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2017 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Validator primitives. - -#[cfg(feature = "std")] -use primitives::bytes; -use rstd::vec::Vec; -use parachain; - -/// Parachain outgoing message. -#[derive(PartialEq, Eq)] -#[cfg_attr(feature = "std", derive(Serialize, Debug))] -pub struct EgressPost(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); - -/// Balance upload. -#[derive(PartialEq, Eq)] -#[cfg_attr(feature = "std", derive(Serialize, Debug))] -pub struct BalanceUpload(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); - -/// Balance download. -#[derive(PartialEq, Eq)] -#[cfg_attr(feature = "std", derive(Serialize, Debug))] -pub struct BalanceDownload(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); - -/// The result of parachain validation. -#[derive(PartialEq, Eq)] -#[cfg_attr(feature = "std", derive(Serialize, Debug))] -#[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] -#[cfg_attr(feature = "std", serde(deny_unknown_fields))] -pub struct ValidationResult { - /// New head data that should be included in the relay chain state. - pub head_data: parachain::HeadData, - /// Outgoing messages (a vec for each parachain). - pub egress_queues: Vec>, - /// Balance uploads - pub balance_uploads: Vec, -} - -#[cfg(test)] -mod tests { - use super::*; - use substrate_serializer as ser; - - #[test] - fn test_validation_result() { - assert_eq!(ser::to_string_pretty(&ValidationResult { - head_data: parachain::HeadData(vec![1]), - egress_queues: vec![vec![EgressPost(vec![1])]], - balance_uploads: vec![BalanceUpload(vec![2])], - }), r#"{ - "headData": "0x01", - "egressQueues": [ - [ - "0x01" - ] - ], - "balanceUploads": [ - "0x02" - ] -}"#); - } -} diff --git a/polkadot/runtime/Cargo.toml b/polkadot/runtime/Cargo.toml index cb6c99de354c0..95044242fef1b 100644 --- a/polkadot/runtime/Cargo.toml +++ b/polkadot/runtime/Cargo.toml @@ -5,7 +5,6 @@ authors = ["Parity Technologies "] [dependencies] rustc-hex = "1.0" -hex-literal = "0.1.0" log = { version = "0.3", optional = true } serde = { version = "1.0", default_features = false } serde_derive = { version = "1.0", optional = true } @@ -28,6 +27,9 @@ substrate-runtime-system = { path = "../../substrate/runtime/system" } substrate-runtime-timestamp = { path = "../../substrate/runtime/timestamp" } polkadot-primitives = { path = "../primitives" } +[dev-dependencies] +hex-literal = "0.1.0" + [features] default = ["std"] std = [ diff --git a/polkadot/runtime/src/lib.rs b/polkadot/runtime/src/lib.rs index 1cfcd31497c6d..9b86eb91cb338 100644 --- a/polkadot/runtime/src/lib.rs +++ b/polkadot/runtime/src/lib.rs @@ -27,7 +27,7 @@ extern crate substrate_runtime_support as runtime_support; #[macro_use] extern crate substrate_runtime_primitives as runtime_primitives; -#[cfg(feature = "std")] +#[cfg(test)] #[macro_use] extern crate hex_literal; @@ -55,7 +55,19 @@ use runtime_io::BlakeTwo256; use polkadot_primitives::{AccountId, Balance, BlockNumber, Hash, Index, Log, SessionKey, Signature}; use runtime_primitives::generic; use runtime_primitives::traits::{Identity, HasPublicAux}; -#[cfg(feature = "std")] pub use runtime_primitives::BuildExternalities; + +#[cfg(feature = "std")] +pub use runtime_primitives::BuildExternalities; + +pub use consensus::Call as ConsensusCall; +pub use timestamp::Call as TimestampCall; +pub use parachains::Call as ParachainsCall; + + +/// The position of the timestamp set extrinsic. +pub const TIMESTAMP_SET_POSITION: u32 = 0; +/// The position of the parachains set extrinsic. +pub const PARACHAINS_SET_POSITION: u32 = 1; /// Concrete runtime type used to parameterize the various modules. pub struct Concrete; @@ -77,19 +89,18 @@ impl system::Trait for Concrete { pub type System = system::Module; impl consensus::Trait for Concrete { - type PublicAux = ::PublicAux; + type PublicAux = ::PublicAux; type SessionKey = SessionKey; } /// Consensus module for this concrete runtime. pub type Consensus = consensus::Module; -pub use consensus::Call as ConsensusCall; impl timestamp::Trait for Concrete { + const SET_POSITION: u32 = TIMESTAMP_SET_POSITION; type Value = u64; } /// Timestamp module for this concrete runtime. pub type Timestamp = timestamp::Module; -pub use timestamp::Call as TimestampCall; impl session::Trait for Concrete { type ConvertAccountIdToSessionKey = Identity; @@ -116,7 +127,11 @@ pub type Council = council::Module; /// Council voting module for this concrete runtime. pub type CouncilVoting = council::voting::Module; -impl parachains::Trait for Concrete {} +impl parachains::Trait for Concrete { + const SET_POSITION: u32 = PARACHAINS_SET_POSITION; + + type PublicAux = ::PublicAux; +} pub type Parachains = parachains::Module; impl_outer_dispatch! { @@ -128,6 +143,7 @@ impl_outer_dispatch! { Democracy = 5, Council = 6, CouncilVoting = 7, + Parachains = 8, } pub enum PrivCall { diff --git a/polkadot/runtime/src/parachains.rs b/polkadot/runtime/src/parachains.rs index ef46a25146e62..1a6a77d7febd1 100644 --- a/polkadot/runtime/src/parachains.rs +++ b/polkadot/runtime/src/parachains.rs @@ -17,44 +17,70 @@ //! Main parachains logic. For now this is just the determination of which validators do what. use polkadot_primitives; -#[cfg(any(feature = "std", test))] use {runtime_io, runtime_primitives}; use rstd::prelude::*; -#[cfg(any(feature = "std", test))] use rstd::marker::PhantomData; use codec::{Slicable, Joiner}; use runtime_support::Hashable; -#[cfg(any(feature = "std", test))] use runtime_support::StorageValue; -use runtime_primitives::traits::Executable; -use polkadot_primitives::parachain::{Id, Chain, DutyRoster}; + +use runtime_primitives::traits::{Executable, RefInto, MaybeEmpty}; +use polkadot_primitives::parachain::{Id, Chain, DutyRoster, CandidateReceipt}; use {system, session}; -pub trait Trait: system::Trait + session::Trait {} +use runtime_support::{StorageValue, StorageMap}; + +#[cfg(any(feature = "std", test))] +use rstd::marker::PhantomData; + +#[cfg(any(feature = "std", test))] +use {runtime_io, runtime_primitives}; + +pub trait Trait: system::Trait + session::Trait { + /// The position of the set_heads call in the block. + const SET_POSITION: u32; + + type PublicAux: RefInto + MaybeEmpty; +} decl_module! { pub struct Module; + pub enum Call where aux: ::PublicAux { + // provide candidate receipts for parachains, in ascending order by id. + fn set_heads(aux, heads: Vec) = 0; + } } decl_storage! { - pub trait Store for Module; - // The number of parachains registered at present. - pub Count get(count): b"para:count" => default u32; + trait Store for Module; + // Vector of all parachain IDs. + pub Parachains get(active_parachains): b"para:chains" => default Vec; + // The parachains registered at present. + pub Code get(parachain_code): b"para:code" => map [ Id => Vec ]; + // The heads of the parachains registered at present. these are kept sorted. + pub Heads get(parachain_head): b"para:head" => map [ Id => Vec ]; + + // Did the parachain heads get updated in this block? + DidUpdate: b"para:did" => default bool; } impl Module { - /// Calculate the current block's duty roster. + /// Calculate the current block's duty roster using system's random seed. pub fn calculate_duty_roster() -> DutyRoster { - let parachain_count = Self::count(); - let validator_count = >::validator_count(); + let parachains = Self::active_parachains(); + let parachain_count = parachains.len(); + let validator_count = >::validator_count() as usize; let validators_per_parachain = if parachain_count != 0 { (validator_count - 1) / parachain_count } else { 0 }; let mut roles_val = (0..validator_count).map(|i| match i { - i if i < parachain_count * validators_per_parachain => - Chain::Parachain(Id::from(i / validators_per_parachain as u32)), + i if i < parachain_count * validators_per_parachain => { + let idx = i / validators_per_parachain; + Chain::Parachain(parachains[idx].clone()) + } _ => Chain::Relay, }).collect::>(); + let mut roles_gua = roles_val.clone(); - let h = >::random_seed(); - let mut seed = h.to_vec().and(b"validator_role_pairs").blake2_256(); + let random_seed = system::Module::::random_seed(); + let mut seed = random_seed.to_vec().and(b"validator_role_pairs").blake2_256(); // shuffle for i in 0..(validator_count - 1) { @@ -83,16 +109,76 @@ impl Module { guarantor_duty: roles_gua, } } + + /// Register a parachain with given code. + /// Fails if given ID is already used. + pub fn register_parachain(id: Id, code: Vec, initial_head_data: Vec) { + let mut parachains = Self::active_parachains(); + match parachains.binary_search(&id) { + Ok(_) => panic!("Parachain with id {} already exists", id.into_inner()), + Err(idx) => parachains.insert(idx, id), + } + + >::insert(id, code); + >::put(parachains); + >::insert(id, initial_head_data); + } + + /// Deregister a parachain with given id + pub fn deregister_parachain(id: Id) { + let mut parachains = Self::active_parachains(); + match parachains.binary_search(&id) { + Ok(idx) => { parachains.remove(idx); } + Err(_) => {} + } + + >::remove(id); + >::remove(id); + >::put(parachains); + } + + fn set_heads(aux: &::PublicAux, heads: Vec) { + assert!(aux.is_empty()); + assert!(!>::exists(), "Parachain heads must be updated only once in the block"); + assert!( + >::extrinsic_index() == T::SET_POSITION, + "Parachain heads update extrinsic must be at position {} in the block", + T::SET_POSITION + ); + + let active_parachains = Self::active_parachains(); + let mut iter = active_parachains.iter(); + + // perform this check before writing to storage. + for head in &heads { + assert!( + iter.find(|&p| p == &head.parachain_index).is_some(), + "Submitted candidate for unregistered or out-of-order parachain {}", + head.parachain_index.into_inner() + ); + } + + for head in heads { + let id = head.parachain_index.clone(); + >::insert(id, head.head_data.0); + } + + >::put(true); + } } impl Executable for Module { fn execute() { + assert!(::DidUpdate::take(), "Parachain heads must be updated once in the block"); } } +/// Parachains module genesis configuration. #[cfg(any(feature = "std", test))] pub struct GenesisConfig { - pub count: u32, + /// The initial parachains, mapped to code. + pub parachains: Vec<(Id, Vec)>, + /// Phantom data. pub phantom: PhantomData, } @@ -100,7 +186,7 @@ pub struct GenesisConfig { impl Default for GenesisConfig { fn default() -> Self { GenesisConfig { - count: 0, + parachains: Vec::new(), phantom: PhantomData, } } @@ -109,12 +195,26 @@ impl Default for GenesisConfig { #[cfg(any(feature = "std", test))] impl runtime_primitives::BuildExternalities for GenesisConfig { - fn build_externalities(self) -> runtime_io::TestExternalities { + fn build_externalities(mut self) -> runtime_io::TestExternalities { + use std::collections::HashMap; use runtime_io::twox_128; use codec::Slicable; - map![ - twox_128(>::key()).to_vec() => self.count.encode() - ] + + self.parachains.sort_unstable_by_key(|&(ref id, _)| id.clone()); + self.parachains.dedup_by_key(|&mut (ref id, _)| id.clone()); + + let only_ids: Vec<_> = self.parachains.iter().map(|&(ref id, _)| id).cloned().collect(); + + let mut map: HashMap<_, _> = map![ + twox_128(>::key()).to_vec() => only_ids.encode() + ]; + + for (id, code) in self.parachains { + let key = twox_128(&>::key_for(&id)).to_vec(); + map.insert(key, code.encode()); + } + + map.into() } } @@ -148,12 +248,15 @@ mod tests { impl session::Trait for Test { type ConvertAccountIdToSessionKey = Identity; } - impl Trait for Test {} + impl Trait for Test { + const SET_POSITION: u32 = 0; + + type PublicAux = ::PublicAux; + } - type System = system::Module; type Parachains = Module; - fn new_test_ext() -> runtime_io::TestExternalities { + fn new_test_ext(parachains: Vec<(Id, Vec)>) -> runtime_io::TestExternalities { let mut t = system::GenesisConfig::::default().build_externalities(); t.extend(consensus::GenesisConfig::{ code: vec![], @@ -164,22 +267,59 @@ mod tests { validators: vec![1, 2, 3, 4, 5, 6, 7, 8], }.build_externalities()); t.extend(GenesisConfig::{ - count: 2, + parachains: parachains, phantom: PhantomData, }.build_externalities()); t } #[test] - fn simple_setup_should_work() { - with_externalities(&mut new_test_ext(), || { - assert_eq!(Parachains::count(), 2); + fn active_parachains_should_work() { + let parachains = vec![ + (5u32.into(), vec![1,2,3]), + (100u32.into(), vec![4,5,6]), + ]; + + with_externalities(&mut new_test_ext(parachains), || { + assert_eq!(Parachains::active_parachains(), vec![5u32.into(), 100u32.into()]); + assert_eq!(Parachains::parachain_code(&5u32.into()), Some(vec![1,2,3])); + assert_eq!(Parachains::parachain_code(&100u32.into()), Some(vec![4,5,6])); }); } #[test] - fn should_work() { - with_externalities(&mut new_test_ext(), || { + fn register_deregister() { + let parachains = vec![ + (5u32.into(), vec![1,2,3]), + (100u32.into(), vec![4,5,6]), + ]; + + with_externalities(&mut new_test_ext(parachains), || { + assert_eq!(Parachains::active_parachains(), vec![5u32.into(), 100u32.into()]); + + assert_eq!(Parachains::parachain_code(&5u32.into()), Some(vec![1,2,3])); + assert_eq!(Parachains::parachain_code(&100u32.into()), Some(vec![4,5,6])); + + Parachains::register_parachain(99u32.into(), vec![7,8,9], vec![1, 1, 1]); + + assert_eq!(Parachains::active_parachains(), vec![5u32.into(), 99u32.into(), 100u32.into()]); + assert_eq!(Parachains::parachain_code(&99u32.into()), Some(vec![7,8,9])); + + Parachains::deregister_parachain(5u32.into()); + + assert_eq!(Parachains::active_parachains(), vec![99u32.into(), 100u32.into()]); + assert_eq!(Parachains::parachain_code(&5u32.into()), None); + }); + } + + #[test] + fn duty_roster_works() { + let parachains = vec![ + (0u32.into(), vec![]), + (1u32.into(), vec![]), + ]; + + with_externalities(&mut new_test_ext(parachains), || { let check_roster = |duty_roster: &DutyRoster| { assert_eq!(duty_roster.validator_duty.len(), 8); assert_eq!(duty_roster.guarantor_duty.len(), 8); @@ -191,16 +331,17 @@ mod tests { assert_eq!(duty_roster.guarantor_duty.iter().filter(|&&j| j == Chain::Relay).count(), 2); }; - System::set_random_seed([0u8; 32].into()); + system::Module::::set_random_seed([0u8; 32].into()); let duty_roster_0 = Parachains::calculate_duty_roster(); check_roster(&duty_roster_0); - System::set_random_seed([1u8; 32].into()); + system::Module::::set_random_seed([1u8; 32].into()); let duty_roster_1 = Parachains::calculate_duty_roster(); check_roster(&duty_roster_1); assert!(duty_roster_0 != duty_roster_1); - System::set_random_seed([2u8; 32].into()); + + system::Module::::set_random_seed([2u8; 32].into()); let duty_roster_2 = Parachains::calculate_duty_roster(); check_roster(&duty_roster_2); assert!(duty_roster_0 != duty_roster_2); diff --git a/polkadot/runtime/wasm/Cargo.lock b/polkadot/runtime/wasm/Cargo.lock index 77658199f1d18..bc00ce5ccd8cd 100644 --- a/polkadot/runtime/wasm/Cargo.lock +++ b/polkadot/runtime/wasm/Cargo.lock @@ -498,9 +498,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "substrate-codec" version = "0.1.0" -dependencies = [ - "substrate-runtime-std 0.1.0", -] [[package]] name = "substrate-keyring" diff --git a/polkadot/runtime/wasm/target/wasm32-unknown-unknown/release/polkadot_runtime.compact.wasm b/polkadot/runtime/wasm/target/wasm32-unknown-unknown/release/polkadot_runtime.compact.wasm index 134df026b5453..35f1cfd18b2c5 100644 Binary files a/polkadot/runtime/wasm/target/wasm32-unknown-unknown/release/polkadot_runtime.compact.wasm and b/polkadot/runtime/wasm/target/wasm32-unknown-unknown/release/polkadot_runtime.compact.wasm differ diff --git a/polkadot/runtime/wasm/target/wasm32-unknown-unknown/release/polkadot_runtime.wasm b/polkadot/runtime/wasm/target/wasm32-unknown-unknown/release/polkadot_runtime.wasm index 6c962c62d8156..7bc43f97d5520 100755 Binary files a/polkadot/runtime/wasm/target/wasm32-unknown-unknown/release/polkadot_runtime.wasm and b/polkadot/runtime/wasm/target/wasm32-unknown-unknown/release/polkadot_runtime.wasm differ diff --git a/polkadot/service/src/lib.rs b/polkadot/service/src/lib.rs index ee24cd0eb59c4..7ed649b672d4c 100644 --- a/polkadot/service/src/lib.rs +++ b/polkadot/service/src/lib.rs @@ -351,7 +351,13 @@ impl Service { // Load the first available key. Code above makes sure it exisis. let key = keystore.load(&keystore.contents()?[0], "")?; info!("Using authority key {:?}", key.public()); - Some(consensus::Service::new(client.clone(), network.clone(), transaction_pool.clone(), key)) + Some(consensus::Service::new( + client.clone(), + network.clone(), + transaction_pool.clone(), + ::std::time::Duration::from_millis(4000), // TODO: dynamic + key, + )) } else { None }; diff --git a/polkadot/statement-table/src/generic.rs b/polkadot/statement-table/src/generic.rs index 11665fe114636..4436730b3efce 100644 --- a/polkadot/statement-table/src/generic.rs +++ b/polkadot/statement-table/src/generic.rs @@ -268,6 +268,7 @@ pub struct Table { authority_data: HashMap>, detected_misbehavior: HashMap::Misbehavior>, candidate_votes: HashMap>, + includable_count: HashMap, } impl Default for Table { @@ -276,6 +277,7 @@ impl Default for Table { authority_data: HashMap::new(), detected_misbehavior: HashMap::new(), candidate_votes: HashMap::new(), + includable_count: HashMap::new(), } } } @@ -294,6 +296,11 @@ impl Table { let mut best_candidates = BTreeMap::new(); for candidate_data in self.candidate_votes.values() { let group_id = &candidate_data.group_id; + + if !self.includable_count.contains_key(group_id) { + continue + } + let (validity_t, availability_t) = context.requisite_votes(group_id); if !candidate_data.can_be_included(validity_t, availability_t) { continue } @@ -394,6 +401,11 @@ impl Table { &self.detected_misbehavior } + /// Get the current number of parachains with includable candidates. + pub fn includable_count(&self) -> usize { + self.includable_count.len() + } + /// Fill a statement batch and note messages as seen by the targets. pub fn fill_batch(&mut self, batch: &mut B) where B: StatementBatch< @@ -622,6 +634,9 @@ impl Table { Some(votes) => votes, }; + let (v_threshold, a_threshold) = context.requisite_votes(&votes.group_id); + let was_includable = votes.can_be_included(v_threshold, a_threshold); + // check that this authority actually can vote in this group. if !context.is_member_of(&from, &votes.group_id) { let (sig, valid) = match vote { @@ -686,6 +701,9 @@ impl Table { } } + let is_includable = votes.can_be_included(v_threshold, a_threshold); + update_includable_count(&mut self.includable_count, &votes.group_id, was_includable, is_includable); + (None, Some(votes.summary(digest))) } @@ -701,6 +719,9 @@ impl Table { Some(votes) => votes, }; + let (v_threshold, a_threshold) = context.requisite_votes(&votes.group_id); + let was_includable = votes.can_be_included(v_threshold, a_threshold); + // check that this authority actually can vote in this group. if !context.is_availability_guarantor_of(&from, &votes.group_id) { return ( @@ -716,10 +737,29 @@ impl Table { } votes.availability_votes.insert(from, signature); + + let is_includable = votes.can_be_included(v_threshold, a_threshold); + update_includable_count(&mut self.includable_count, &votes.group_id, was_includable, is_includable); + (None, Some(votes.summary(digest))) } } +fn update_includable_count(map: &mut HashMap, group_id: &G, was_includable: bool, is_includable: bool) { + if was_includable && !is_includable { + if let Entry::Occupied(mut entry) = map.entry(group_id.clone()) { + *entry.get_mut() -= 1; + if *entry.get() == 0 { + entry.remove(); + } + } + } + + if !was_includable && is_includable { + *map.entry(group_id.clone()).or_insert(0) += 1; + } +} + #[cfg(test)] mod tests { use super::*; @@ -746,11 +786,7 @@ mod tests { } fn create() -> Table { - Table { - authority_data: HashMap::default(), - detected_misbehavior: HashMap::default(), - candidate_votes: HashMap::default(), - } + Table::default() } #[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] @@ -806,8 +842,16 @@ mod tests { self.authorities.get(authority).map(|v| &v.1 == group).unwrap_or(false) } - fn requisite_votes(&self, _id: &GroupId) -> (usize, usize) { - (6, 34) + fn requisite_votes(&self, id: &GroupId) -> (usize, usize) { + let mut total_validity = 0; + let mut total_availability = 0; + + for &(ref validity, ref availability) in self.authorities.values() { + if validity == id { total_validity += 1 } + if availability == id { total_availability += 1 } + } + + (total_validity / 2 + 1, total_availability / 2 + 1) } } @@ -1067,6 +1111,69 @@ mod tests { assert!(!candidate.can_be_included(validity_threshold, availability_threshold)); } + #[test] + fn includability_counter() { + let context = TestContext { + authorities: { + let mut map = HashMap::new(); + map.insert(AuthorityId(1), (GroupId(2), GroupId(455))); + map.insert(AuthorityId(2), (GroupId(2), GroupId(455))); + map.insert(AuthorityId(3), (GroupId(2), GroupId(455))); + map.insert(AuthorityId(4), (GroupId(455), GroupId(2))); + map + } + }; + + // have 2/3 validity guarantors note validity. + let mut table = create(); + let statement = SignedStatement { + statement: Statement::Candidate(Candidate(2, 100)), + signature: Signature(1), + sender: AuthorityId(1), + }; + let candidate_digest = Digest(100); + + table.import_statement(&context, statement, None); + assert!(!table.detected_misbehavior.contains_key(&AuthorityId(1))); + assert!(!table.candidate_includable(&candidate_digest, &context)); + assert!(table.includable_count.is_empty()); + + let vote = SignedStatement { + statement: Statement::Valid(candidate_digest.clone()), + signature: Signature(2), + sender: AuthorityId(2), + }; + + table.import_statement(&context, vote, None); + assert!(!table.detected_misbehavior.contains_key(&AuthorityId(2))); + assert!(!table.candidate_includable(&candidate_digest, &context)); + assert!(table.includable_count.is_empty()); + + // have the availability guarantor note validity. + let vote = SignedStatement { + statement: Statement::Available(candidate_digest.clone()), + signature: Signature(4), + sender: AuthorityId(4), + }; + + table.import_statement(&context, vote, None); + assert!(!table.detected_misbehavior.contains_key(&AuthorityId(4))); + assert!(table.candidate_includable(&candidate_digest, &context)); + assert!(table.includable_count.get(&GroupId(2)).is_some()); + + // have the last validity guarantor note invalidity. now it is unincludable. + let vote = SignedStatement { + statement: Statement::Invalid(candidate_digest.clone()), + signature: Signature(3), + sender: AuthorityId(3), + }; + + table.import_statement(&context, vote, None); + assert!(!table.detected_misbehavior.contains_key(&AuthorityId(2))); + assert!(!table.candidate_includable(&candidate_digest, &context)); + assert!(table.includable_count.is_empty()); + } + #[test] fn candidate_import_gives_summary() { let context = TestContext { diff --git a/polkadot/transaction-pool/src/lib.rs b/polkadot/transaction-pool/src/lib.rs index 68166288b08ca..d375c7c3718fb 100644 --- a/polkadot/transaction-pool/src/lib.rs +++ b/polkadot/transaction-pool/src/lib.rs @@ -36,7 +36,8 @@ use std::sync::Arc; use polkadot_api::PolkadotApi; use primitives::{AccountId, Timestamp}; -use runtime::{Block, UncheckedExtrinsic, TimestampCall, Call}; +use primitives::parachain::CandidateReceipt; +use runtime::{Block, UncheckedExtrinsic, TimestampCall, ParachainsCall, Call}; use substrate_runtime_primitives::traits::{Bounded, Checkable}; use transaction_pool::{Pool, Readiness}; use transaction_pool::scoring::{Change, Choice}; @@ -59,17 +60,26 @@ pub struct PolkadotBlock { impl PolkadotBlock { /// Create a new block, checking high-level well-formedness. pub fn from(unchecked: Block) -> ::std::result::Result { - if unchecked.extrinsics.len() < 1 { + if unchecked.extrinsics.len() < 2 { return Err(unchecked); } if unchecked.extrinsics[0].is_signed() { return Err(unchecked); } + if unchecked.extrinsics[1].is_signed() { + return Err(unchecked); + } + match unchecked.extrinsics[0].extrinsic.function { Call::Timestamp(TimestampCall::set(_)) => {}, _ => return Err(unchecked), } + match unchecked.extrinsics[1].extrinsic.function { + Call::Parachains(ParachainsCall::set_heads(_)) => {}, + _ => return Err(unchecked), + } + // any further checks... Ok(PolkadotBlock { block: unchecked, location: None }) } @@ -92,6 +102,19 @@ impl PolkadotBlock { } } } + + /// Retrieve the parachain candidates proposed for this block. + pub fn parachain_heads(&self) -> &[CandidateReceipt] { + if let Call::Parachains(ParachainsCall::set_heads(ref t)) = self.block.extrinsics[1].extrinsic.function { + &t[..] + } else { + if let Some((file, line)) = self.location { + panic!("Invalid block used in `PolkadotBlock::force_from` at {}:{}", file, line); + } else { + panic!("Invalid block made it through the PolkadotBlock verification!?"); + } + } + } } #[macro_export] diff --git a/polkadot/validator/Cargo.toml b/polkadot/validator/Cargo.toml deleted file mode 100644 index 204c356a99098..0000000000000 --- a/polkadot/validator/Cargo.toml +++ /dev/null @@ -1,11 +0,0 @@ -[package] -name = "polkadot-validator" -version = "0.1.0" -authors = ["Parity Technologies "] - -[dependencies] -error-chain = "0.11" -serde = "1.0" -substrate-primitives = { path = "../../substrate/primitives" } -substrate-serializer = { path = "../../substrate/serializer" } -polkadot-primitives = { path = "../primitives" } diff --git a/polkadot/validator/src/error.rs b/polkadot/validator/src/error.rs deleted file mode 100644 index 1c8caf739f6d0..0000000000000 --- a/polkadot/validator/src/error.rs +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2017 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -use serializer; - -error_chain! { - foreign_links { - Serialization(serializer::Error); - } - errors { - Timeout { - description("Validation task has timed-out."), - display("Validation timeout."), - } - InvalidCode(details: String) { - description("The code is invalid."), - display("invalid code: '{}'", details), - } - } -} diff --git a/polkadot/validator/src/lib.rs b/polkadot/validator/src/lib.rs deleted file mode 100644 index dc139e77d5b82..0000000000000 --- a/polkadot/validator/src/lib.rs +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2017 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -//! Validator implementation. - -#[warn(missing_docs)] - -extern crate substrate_primitives as primitives; -extern crate substrate_serializer as serializer; -extern crate polkadot_primitives; -extern crate serde; - -#[macro_use] -extern crate error_chain; - -mod error; -mod parachains; -mod validator; - -pub use error::{Error, ErrorKind, Result}; -pub use validator::Validator; diff --git a/polkadot/validator/src/parachains.rs b/polkadot/validator/src/parachains.rs deleted file mode 100644 index 67ba56ed1f84c..0000000000000 --- a/polkadot/validator/src/parachains.rs +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2017 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -use std::fmt; - -use polkadot_primitives::validator; -use serde::de::DeserializeOwned; - -use error::Result; - -/// Parachain code implementation. -pub trait ParachainCode: fmt::Debug { - /// Deserialized message type. - type Message: DeserializeOwned; - /// Balance download. - type Download: DeserializeOwned; - /// Deserialized block data type. - type BlockData: DeserializeOwned; - /// Parachain head data. - type HeadData: DeserializeOwned; - /// Result - type Result: Into; - - /// Given decoded messages and proof validate it and return egress posts. - fn check( - &self, - messages: Vec<(u64, Vec)>, - downloads: Vec, - block_data: Self::BlockData, - head_data: Self::HeadData, - ) -> Result; -} - -/// Dummy implementation of the first parachain validation. -#[derive(Debug)] -pub struct ParaChain1; - -impl ParachainCode for ParaChain1 { - type Message = (); - type Download = (); - type BlockData = (); - type HeadData = (); - type Result = validator::ValidationResult; - - fn check( - &self, - _messages: Vec<(u64, Vec)>, - _downloads: Vec, - _block_data: Self::BlockData, - _head_data: Self::HeadData, - ) -> Result - { - unimplemented!() - } -} diff --git a/polkadot/validator/src/validator.rs b/polkadot/validator/src/validator.rs deleted file mode 100644 index 354bbf73dcedb..0000000000000 --- a/polkadot/validator/src/validator.rs +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2017 Parity Technologies (UK) Ltd. -// This file is part of Polkadot. - -// Polkadot is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Polkadot is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Polkadot. If not, see . - -use std::fmt; - -use polkadot_primitives::{validator, parachain}; -use serde::de::DeserializeOwned; -use serializer; - -use error::{ErrorKind, Result}; -use parachains::{ParachainCode, ParaChain1}; - -/// A dummy validator implementation. -#[derive(Debug)] -pub struct Validator { - codes: Vec>, -} - -impl Validator { - /// Create a new validator. - pub fn new() -> Self { - Validator { - codes: vec![ - Box::new(ParaChain1) as Box - ], - } - } -} - -impl Validator { - pub fn validate( - &self, - code: &[u8], - consolidated_ingress: &[(u64, Vec)], - balance_downloads: &[validator::BalanceDownload], - block_data: ¶chain::BlockData, - previous_head_data: ¶chain::HeadData, - ) -> Result { - ensure!(code.len() == 1, ErrorKind::InvalidCode(format!("The code should be a single byte."))); - - match self.codes.get(code[0] as usize) { - Some(code) => code.check(consolidated_ingress, balance_downloads, block_data, previous_head_data), - None => bail!(ErrorKind::InvalidCode(format!("Unknown parachain code."))), - } - } -} - -/// Simplified parachain code verification -trait Code: fmt::Debug { - /// Given parachain candidate block data returns it's validity - /// and possible generated egress posts. - fn check( - &self, - consolidated_ingress: &[(u64, Vec)], - balance_downloads: &[validator::BalanceDownload], - block_data: ¶chain::BlockData, - previous_head_data: ¶chain::HeadData, - ) -> Result; -} - -impl Code for T where - M: DeserializeOwned, - B: DeserializeOwned, - R: Into, - T: ParachainCode, -{ - fn check( - &self, - consolidated_ingress: &[(u64, Vec)], - balance_downloads: &[validator::BalanceDownload], - block_data: ¶chain::BlockData, - previous_head_data: ¶chain::HeadData, - ) -> Result { - let messages = consolidated_ingress.iter() - .map(|&(ref block, ref vec)| Ok((*block, vec.iter() - .map(|msg| serializer::from_slice(&msg.0).map_err(Into::into)) - .collect::>>()? - ))) - .collect::>>()?; - let downloads = balance_downloads.iter() - .map(|download| serializer::from_slice(&download.0).map_err(Into::into)) - .collect::>>()?; - let block_data = serializer::from_slice(&block_data.0)?; - let head_data = serializer::from_slice(&previous_head_data.0)?; - - Ok(self.check(messages, downloads, block_data, head_data)?.into()) - } -} diff --git a/substrate/codec/Cargo.toml b/substrate/codec/Cargo.toml index e813ff4bca9a1..a800afa926baa 100644 --- a/substrate/codec/Cargo.toml +++ b/substrate/codec/Cargo.toml @@ -5,8 +5,7 @@ version = "0.1.0" authors = ["Parity Technologies "] [dependencies] -substrate-runtime-std = { path = "../runtime-std", default_features = false } [features] default = ["std"] -std = ["substrate-runtime-std/std"] +std = [] diff --git a/substrate/codec/src/joiner.rs b/substrate/codec/src/joiner.rs index dbe590f2bd03d..f5775082fd854 100644 --- a/substrate/codec/src/joiner.rs +++ b/substrate/codec/src/joiner.rs @@ -16,7 +16,7 @@ //! Trait -use rstd::iter::Extend; +use core::iter::Extend; use super::slicable::Slicable; /// Trait to allow itself to be serialised into a value which can be extended diff --git a/substrate/codec/src/keyedvec.rs b/substrate/codec/src/keyedvec.rs index d57d1dd111972..353c7ec0e97c8 100644 --- a/substrate/codec/src/keyedvec.rs +++ b/substrate/codec/src/keyedvec.rs @@ -17,8 +17,8 @@ //! Serialiser and prepender. use slicable::Slicable; -use rstd::iter::Extend; -use rstd::vec::Vec; +use core::iter::Extend; +use alloc::vec::Vec; /// Trait to allow itselg to be serialised and prepended by a given slice. pub trait KeyedVec { diff --git a/substrate/codec/src/lib.rs b/substrate/codec/src/lib.rs index 7a9f17baeb510..b4c360fb28958 100644 --- a/substrate/codec/src/lib.rs +++ b/substrate/codec/src/lib.rs @@ -14,14 +14,23 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -//! Implements the serialization and deserialization codec for polkadot runtime -//! values. +//! Implements a serialization and deserialization codec for simple marshalling. #![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(not(feature = "std"), feature(alloc))] -#[cfg_attr(not(feature = "std"), macro_use)] -extern crate substrate_runtime_std as rstd; +#[cfg(not(feature = "std"))] +#[macro_use] +extern crate alloc; + +#[cfg(feature = "std")] +extern crate core; + +#[cfg(feature = "std")] +pub mod alloc { + pub use std::boxed; + pub use std::vec; +} mod slicable; mod joiner; diff --git a/substrate/codec/src/slicable.rs b/substrate/codec/src/slicable.rs index 4054b5ef5eccb..0ea2c0e6306ee 100644 --- a/substrate/codec/src/slicable.rs +++ b/substrate/codec/src/slicable.rs @@ -16,8 +16,9 @@ //! Serialisation. -use rstd::prelude::*; -use rstd::{mem, slice}; +use alloc::vec::Vec; +use alloc::boxed::Box; +use core::{mem, slice}; use super::joiner::Joiner; /// Trait that allows reading of data into a slice. @@ -38,7 +39,7 @@ pub trait Input { impl<'a> Input for &'a [u8] { fn read(&mut self, into: &mut [u8]) -> usize { - let len = ::rstd::cmp::min(into.len(), self.len()); + let len = ::core::cmp::min(into.len(), self.len()); into[..len].copy_from_slice(&self[..len]); *self = &self[len..]; len @@ -155,7 +156,7 @@ impl Slicable for Vec { } fn encode(&self) -> Vec { - use rstd::iter::Extend; + use core::iter::Extend; let len = self.len(); assert!(len <= u32::max_value() as usize, "Attempted to serialize vec with too many elements."); @@ -241,7 +242,7 @@ macro_rules! tuple_impl { #[allow(non_snake_case)] mod inner_tuple_impl { - use rstd::vec::Vec; + use alloc::vec::Vec; use super::{Input, Slicable}; tuple_impl!(A, B, C, D, E, F, G, H, I, J, K,); diff --git a/substrate/executor/src/wasm_executor.rs b/substrate/executor/src/wasm_executor.rs index 3c01d2fde732f..20633449b5c7d 100644 --- a/substrate/executor/src/wasm_executor.rs +++ b/substrate/executor/src/wasm_executor.rs @@ -433,9 +433,6 @@ impl CodeExecutor for WasmExecutor { method: &str, data: &[u8], ) -> Result> { - // TODO: handle all expects as errors to be returned. - println!("Wasm-Calling {}({})", method, HexDisplay::from(&data)); - let module = Module::from_buffer(code).expect("all modules compiled with rustc are valid wasm code; qed"); // start module instantiation. Don't run 'start' function yet. @@ -481,7 +478,6 @@ impl CodeExecutor for WasmExecutor { let length = (r >> 32) as u32 as usize; memory.get(offset, length) .map_err(|_| ErrorKind::Runtime.into()) - .map(|v| { println!("Returned {}", HexDisplay::from(&v)); v }) } else { Err(ErrorKind::InvalidReturn.into()) } diff --git a/substrate/executor/wasm/Cargo.lock b/substrate/executor/wasm/Cargo.lock index a60f4b2df89e6..f9291ee9c6526 100644 --- a/substrate/executor/wasm/Cargo.lock +++ b/substrate/executor/wasm/Cargo.lock @@ -67,9 +67,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "substrate-codec" version = "0.1.0" -dependencies = [ - "substrate-runtime-std 0.1.0", -] [[package]] name = "substrate-primitives" diff --git a/substrate/executor/wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm b/substrate/executor/wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm index 69dd06c2cb853..f9e5d9423d112 100644 Binary files a/substrate/executor/wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm and b/substrate/executor/wasm/target/wasm32-unknown-unknown/release/runtime_test.compact.wasm differ diff --git a/substrate/executor/wasm/target/wasm32-unknown-unknown/release/runtime_test.wasm b/substrate/executor/wasm/target/wasm32-unknown-unknown/release/runtime_test.wasm index 7979bc12e0a6e..6b9fd2905255a 100755 Binary files a/substrate/executor/wasm/target/wasm32-unknown-unknown/release/runtime_test.wasm and b/substrate/executor/wasm/target/wasm32-unknown-unknown/release/runtime_test.wasm differ diff --git a/substrate/runtime-support/src/lib.rs b/substrate/runtime-support/src/lib.rs index 12894aa8088bc..5af5ae18c5989 100644 --- a/substrate/runtime-support/src/lib.rs +++ b/substrate/runtime-support/src/lib.rs @@ -22,7 +22,8 @@ extern crate serde; #[cfg(feature = "std")] -#[allow(unused_imports)] #[macro_use] // can be removed when fixed: https://github.com/rust-lang/rust/issues/43497 +#[allow(unused_imports)] // can be removed when fixed: https://github.com/rust-lang/rust/issues/43497 +#[macro_use] extern crate serde_derive; #[cfg(feature = "std")] diff --git a/substrate/runtime/consensus/src/lib.rs b/substrate/runtime/consensus/src/lib.rs index 40c879e811a8e..a32c68adbc0c9 100644 --- a/substrate/runtime/consensus/src/lib.rs +++ b/substrate/runtime/consensus/src/lib.rs @@ -38,8 +38,8 @@ use primitives::traits::RefInto; use substrate_primitives::bft::MisbehaviorReport; -pub const AUTHORITY_AT: &'static[u8] = b":auth:"; -pub const AUTHORITY_COUNT: &'static[u8] = b":auth:len"; +pub const AUTHORITY_AT: &'static [u8] = b":auth:"; +pub const AUTHORITY_COUNT: &'static [u8] = b":auth:len"; struct AuthorityStorageVec(rstd::marker::PhantomData); impl StorageVec for AuthorityStorageVec { diff --git a/substrate/runtime/system/src/lib.rs b/substrate/runtime/system/src/lib.rs index 864a007726238..55ccd2302d004 100644 --- a/substrate/runtime/system/src/lib.rs +++ b/substrate/runtime/system/src/lib.rs @@ -160,8 +160,8 @@ impl Module { /// Set the random seed to something in particular. Can be used as an alternative to /// `initialise` for tests that don't need to bother with the other environment entries. #[cfg(any(feature = "std", test))] - pub fn set_random_seed(n: T::Hash) { - >::put(n); + pub fn set_random_seed(seed: T::Hash) { + >::put(seed); } /// Increment a particular account's nonce by 1. diff --git a/substrate/runtime/timestamp/src/lib.rs b/substrate/runtime/timestamp/src/lib.rs index ee6bad5c83188..fd03fac92821f 100644 --- a/substrate/runtime/timestamp/src/lib.rs +++ b/substrate/runtime/timestamp/src/lib.rs @@ -37,6 +37,9 @@ use runtime_support::{StorageValue, Parameter}; use runtime_primitives::traits::{HasPublicAux, Executable, MaybeEmpty}; pub trait Trait: HasPublicAux + system::Trait { + // the position of the required timestamp-set extrinsic. + const SET_POSITION: u32; + type Value: Parameter + Default; } @@ -64,7 +67,11 @@ impl Module { fn set(aux: &T::PublicAux, now: T::Value) { assert!(aux.is_empty()); assert!(!::DidUpdate::exists(), "Timestamp must be updated only once in the block"); - assert!(>::extrinsic_index() == 0, "Timestamp must be first extrinsic in the block"); + assert!( + >::extrinsic_index() == T::SET_POSITION, + "Timestamp extrinsic must be at position {} in the block", + T::SET_POSITION + ); ::Now::put(now); ::DidUpdate::put(true); } @@ -119,6 +126,7 @@ mod tests { type Header = Header; } impl Trait for Test { + const SET_POSITION: u32 = 0; type Value = u64; } type Timestamp = Module; diff --git a/substrate/test-runtime/wasm/Cargo.lock b/substrate/test-runtime/wasm/Cargo.lock index d58c810398914..5716b05d5b334 100644 --- a/substrate/test-runtime/wasm/Cargo.lock +++ b/substrate/test-runtime/wasm/Cargo.lock @@ -423,9 +423,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" [[package]] name = "substrate-codec" version = "0.1.0" -dependencies = [ - "substrate-runtime-std 0.1.0", -] [[package]] name = "substrate-primitives" diff --git a/substrate/test-runtime/wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.compact.wasm b/substrate/test-runtime/wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.compact.wasm index 892ff8e8ec999..d8d0d5eb193bf 100644 Binary files a/substrate/test-runtime/wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.compact.wasm and b/substrate/test-runtime/wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.compact.wasm differ diff --git a/substrate/test-runtime/wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.wasm b/substrate/test-runtime/wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.wasm index fac891e0b005d..222585017ca5b 100755 Binary files a/substrate/test-runtime/wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.wasm and b/substrate/test-runtime/wasm/target/wasm32-unknown-unknown/release/substrate_test_runtime.wasm differ