Skip to content
This repository was archived by the owner on Nov 15, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
28 commits
Select commit Hold shift + click to select a range
4f03019
update basic_add wasm
rphmeier Jul 18, 2018
82def8c
Merge remote-tracking branch 'upstream/master' into basic-add-collator
rphmeier Jul 18, 2018
6d7a112
wasm feature and collator feature
rphmeier Jul 18, 2018
7a2b86a
move test parachains around a little
rphmeier Jul 20, 2018
78c0226
fix wasm build for basic_add
rphmeier Jul 20, 2018
18dfc62
move basic_add to adder, introduce README
rphmeier Jul 20, 2018
2bdcaf9
minimal basic_add collator
rphmeier Jul 23, 2018
9083847
ensure collator messages are sent in the right order
rphmeier Jul 23, 2018
c157c4c
more logging
rphmeier Jul 23, 2018
6b5f5ff
route consensus statements to all peers
rphmeier Jul 23, 2018
957f69e
minor bugfixes for parachains
rphmeier Jul 23, 2018
d9e1c77
genesis builder accounts for parachain heads
rphmeier Jul 23, 2018
53889d7
Merge remote-tracking branch 'upstream/master' into rh-simple-parachain
rphmeier Jul 24, 2018
d628c3d
fix parachains tests
rphmeier Jul 24, 2018
5653984
targets for txpool
rphmeier Jul 24, 2018
6888a5b
tweak runtime + collator
rphmeier Jul 25, 2018
beded4c
Merge remote-tracking branch 'upstream/master' into rh-simple-parachain
rphmeier Jul 26, 2018
ec68919
fix version in adder-collator
rphmeier Jul 27, 2018
5c633ea
Merge remote-tracking branch 'upstream/master' into rh-simple-parachain
rphmeier Jul 27, 2018
5625a81
consistency for overflowing
rphmeier Jul 27, 2018
b8e9dfc
Merge branch 'master' into rh-simple-parachain
gavofyork Jul 29, 2018
2e7d5c8
Merge remote-tracking branch 'upstream/master' into rh-simple-parachain
rphmeier Jul 30, 2018
55cbde1
Merge branch 'rh-simple-parachain' of github.com:paritytech/polkadot …
rphmeier Jul 30, 2018
1ebdd12
adjust comment
rphmeier Jul 30, 2018
65112b3
fix stable test run
rphmeier Jul 30, 2018
5e50c6a
remove dummy registration test
rphmeier Jul 30, 2018
b393ae9
final grumbles
rphmeier Jul 31, 2018
b1e3250
Merge remote-tracking branch 'upstream/master' into rh-simple-parachain
rphmeier Aug 1, 2018
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 5 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,9 @@ members = [
"polkadot/transaction-pool",
"polkadot/service",

"polkadot/test-parachains/adder",
"polkadot/test-parachains/adder/collator",

"substrate/bft",
"substrate/cli",
"substrate/client",
Expand Down Expand Up @@ -77,6 +80,7 @@ members = [
]
exclude = [
"polkadot/runtime/wasm",
"polkadot/test-parachains/adder/wasm",
"demo/runtime/wasm",
"substrate/executor/wasm",
"substrate/pwasm-alloc",
Expand All @@ -92,4 +96,4 @@ is-it-maintained-open-issues = { repository = "paritytech/polkadot" }

[profile.release]
# Substrate runtime requires unwinding.
panic = "unwind"
panic = "unwind"
2 changes: 1 addition & 1 deletion common.sh
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ SRCS=(
"substrate/executor/wasm"
"demo/runtime/wasm"
"substrate/test-runtime/wasm"
"polkadot/parachain/test-chains/basic_add"
"polkadot/test-parachains/"
)

# Make pushd/popd silent.
Expand Down
8 changes: 4 additions & 4 deletions demo/runtime/wasm/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Binary file not shown.
Binary file not shown.
47 changes: 37 additions & 10 deletions polkadot/collator/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ extern crate polkadot_primitives;
extern crate log;

use std::collections::{BTreeSet, BTreeMap, HashSet};
use std::fmt;
use std::sync::Arc;
use std::time::{Duration, Instant};

Expand All @@ -68,12 +69,36 @@ use client::BlockchainEvents;
use polkadot_api::PolkadotApi;
use polkadot_primitives::{AccountId, BlockId, SessionKey};
use polkadot_primitives::parachain::{self, BlockData, DutyRoster, HeadData, ConsolidatedIngress, Message, Id as ParaId};
use polkadot_cli::{ServiceComponents, Service, CustomConfiguration, VersionInfo};
use polkadot_cli::{ServiceComponents, Service, CustomConfiguration};
use polkadot_cli::{Worker, IntoExit};
use tokio::timer::Deadline;

pub use polkadot_cli::VersionInfo;

const COLLATION_TIMEOUT: Duration = Duration::from_secs(30);

/// Error to return when the head data was invalid.
#[derive(Clone, Copy, Debug)]
pub struct InvalidHead;

/// Collation errors.
#[derive(Debug)]
pub enum Error<R> {
/// Error on the relay-chain side of things.
Polkadot(R),
/// Error on the collator side of things.
Collator(InvalidHead),
}

impl<R: fmt::Display> fmt::Display for Error<R> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Error::Polkadot(ref err) => write!(f, "Polkadot node error: {}", err),
Error::Collator(_) => write!(f, "Collator node error: Invalid head data"),
}
}
}

/// Parachain context needed for collation.
///
/// This can be implemented through an externally attached service or a stub.
Expand All @@ -84,7 +109,7 @@ pub trait ParachainContext: Clone {
&self,
last_head: HeadData,
ingress: I,
) -> (BlockData, HeadData);
) -> Result<(BlockData, HeadData), InvalidHead>;
}

/// Relay chain context needed to collate.
Expand Down Expand Up @@ -154,18 +179,18 @@ pub fn collate<'a, R, P>(
para_context: P,
key: Arc<ed25519::Pair>,
)
-> impl Future<Item=parachain::Collation, Error=R::Error> + 'a
-> impl Future<Item=parachain::Collation, Error=Error<R::Error>> + 'a
where
R: RelayChainContext + 'a,
R::Error: 'a,
R::FutureEgress: 'a,
P: ParachainContext + 'a,
{
collate_ingress(relay_context).map(move |ingress| {
collate_ingress(relay_context).map_err(Error::Polkadot).and_then(move |ingress| {
let (block_data, head_data) = para_context.produce_candidate(
last_head,
ingress.0.iter().flat_map(|&(id, ref msgs)| msgs.iter().cloned().map(move |msg| (id, msg)))
);
).map_err(Error::Collator)?;

let block_data_hash = block_data.hash();
let signature = key.sign(&block_data_hash.0[..]).into();
Expand All @@ -181,10 +206,10 @@ pub fn collate<'a, R, P>(
block_data_hash,
};

parachain::Collation {
Ok(parachain::Collation {
receipt,
block_data,
}
})
})
}

Expand Down Expand Up @@ -248,7 +273,7 @@ impl<P, E> Worker for CollationNode<P, E> where
($e:expr) => {
match $e {
Ok(x) => x,
Err(e) => return future::Either::A(future::err(e)),
Err(e) => return future::Either::A(future::err(Error::Polkadot(e))),
}
}
}
Expand Down Expand Up @@ -323,17 +348,19 @@ fn compute_targets(para_id: ParaId, session_keys: &[SessionKey], roster: DutyRos
///
/// Provide a future which resolves when the node should exit.
/// This function blocks until done.
pub fn run_collator<P, E>(
pub fn run_collator<P, E, I, ArgT>(
parachain_context: P,
para_id: ParaId,
exit: E,
key: Arc<ed25519::Pair>,
args: Vec<::std::ffi::OsString>,
args: I,
version: VersionInfo,
) -> polkadot_cli::error::Result<()> where
P: ParachainContext + Send + 'static,
E: IntoFuture<Item=(),Error=()>,
E::Future: Send + Clone + 'static,
I: IntoIterator<Item=ArgT>,
ArgT: Into<std::ffi::OsString> + Clone,
{
let node_logic = CollationNode { parachain_context, exit: exit.into_future(), para_id, key };
polkadot_cli::run(args, node_logic, version)
Expand Down
5 changes: 5 additions & 0 deletions polkadot/consensus/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -273,8 +273,13 @@ impl<C, N, P> bft::Environment<Block> for ProposerFactory<C, N, P>
sign_with.public().into(),
)?;

info!("Starting consensus session on top of parent {:?}. Local parachain duty is {:?}",
parent_hash, local_duty.validation);

let active_parachains = self.client.active_parachains(&id)?;

debug!(target: "consensus", "Active parachains: {:?}", active_parachains);

let n_parachains = active_parachains.len();
let table = Arc::new(SharedTable::new(group_info, sign_with.clone(), parent_hash));
let (router, input, output) = self.network.communication_for(
Expand Down
64 changes: 46 additions & 18 deletions polkadot/consensus/src/shared_table/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -143,6 +143,7 @@ impl SharedTableInner {
fetch_block_data,
fetch_extrinsic,
evaluate: checking_validity,
ensure_available: checking_availability,
})
}
}
Expand Down Expand Up @@ -206,6 +207,7 @@ struct Work<D: Future, E: Future> {
fetch_block_data: future::Fuse<D>,
fetch_extrinsic: Option<future::Fuse<E>>,
evaluate: bool,
ensure_available: bool,
}

/// Primed statement producer.
Expand Down Expand Up @@ -235,31 +237,35 @@ impl<D, E, C, Err> Future for PrimedStatementProducer<D, E, C>
});

let hash = work.candidate_receipt.hash();

debug!(target: "consensus", "Making validity statement about candidate {}: is_good? {:?}", hash, is_good);
self.inner.produced_statements.validity = match is_good {
Some(true) => Some(GenericStatement::Valid(hash)),
Some(false) => Some(GenericStatement::Invalid(hash)),
None => None,
};
}
}

if let Some(ref mut fetch_extrinsic) = work.fetch_extrinsic {
if let Async::Ready(extrinsic) = fetch_extrinsic.poll()? {
self.inner.produced_statements.extrinsic = Some(extrinsic);
work.evaluate = false;
}
}

let done = self.inner.produced_statements.block_data.is_some() && {
if work.evaluate {
true
} else if self.inner.produced_statements.extrinsic.is_some() {
if let Async::Ready(Some(extrinsic)) = work.fetch_extrinsic.poll()? {
if work.ensure_available {
let hash = work.candidate_receipt.hash();
debug!(target: "consensus", "Claiming candidate {} available.", hash);

// TODO: actually wait for block data and then ensure availability.
self.inner.produced_statements.extrinsic = Some(extrinsic);
self.inner.produced_statements.availability =
Some(GenericStatement::Available(work.candidate_receipt.hash()));
Some(GenericStatement::Available(hash));

true
} else {
false
work.ensure_available = false;
}
}

let done = match (work.evaluate, work.ensure_available) {
(false, false) => true,
_ => false,
};

if done {
Expand Down Expand Up @@ -356,10 +362,25 @@ impl SharedTable {
}

/// Sign and import a local statement.
pub fn sign_and_import(&self, statement: table::Statement) -> SignedStatement {
let proposed_digest = match statement {
GenericStatement::Candidate(ref c) => Some(c.hash()),
_ => None,
///
/// For candidate statements, this may also produce a second signed statement
/// concerning the availability of the candidate data.
pub fn sign_and_import(&self, statement: table::Statement)
-> (SignedStatement, Option<SignedStatement>)
{
let (proposed_digest, availability) = match statement {
GenericStatement::Candidate(ref c) => {
let mut availability = None;
let hash = c.hash();

// TODO: actually store the data in an availability store of some kind.
if self.context.is_availability_guarantor_of(&self.context.local_id(), &c.parachain_index) {
availability = Some(self.context.sign_statement(GenericStatement::Available(hash)));
}

(Some(hash), availability)
}
_ => (None, None),
};

let signed_statement = self.context.sign_statement(statement);
Expand All @@ -370,7 +391,13 @@ impl SharedTable {
}

inner.table.import_statement(&*self.context, signed_statement.clone());
signed_statement

// ensure the availability statement is imported after the candidate.
if let Some(a) = availability.clone() {
inner.table.import_statement(&*self.context, a);
}

(signed_statement, availability)
}

/// Execute a closure using a specific candidate.
Expand Down Expand Up @@ -543,5 +570,6 @@ mod tests {

assert!(producer.work.fetch_extrinsic.is_some(), "should fetch extrinsic when guaranteeing availability");
assert!(!producer.work.evaluate, "should not evaluate validity");
assert!(producer.work.ensure_available);
}
}
1 change: 1 addition & 0 deletions polkadot/network/src/consensus.rs
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,7 @@ impl<P: LocalPolkadotApi + Send + Sync + 'static> MessageProcessTask<P> {
}
}
ConsensusMessage::ChainSpecific(msg, _) => {
debug!(target: "consensus", "Processing consensus statement for live consensus");
if let Some(Message::Statement(parent_hash, statement)) = Decode::decode(&mut msg.as_slice()) {
if ::polkadot_consensus::check_statement(&statement.statement, &statement.signature, statement.sender, &parent_hash) {
self.table_router.import_statement(statement);
Expand Down
Loading