From ab00635d693e627c3044561d9361de35008ad757 Mon Sep 17 00:00:00 2001 From: Eric Miller Date: Thu, 29 Jul 2021 11:10:06 -0400 Subject: [PATCH 1/8] copying rustfmt from root to node-template build. Also, running rustfmt on this. --- .maintain/node-template-release/src/main.rs | 119 ++++++++++++-------- 1 file changed, 73 insertions(+), 46 deletions(-) diff --git a/.maintain/node-template-release/src/main.rs b/.maintain/node-template-release/src/main.rs index bf37797419bc..0e23144525b7 100644 --- a/.maintain/node-template-release/src/main.rs +++ b/.maintain/node-template-release/src/main.rs @@ -1,8 +1,11 @@ use structopt::StructOpt; use std::{ - path::{PathBuf, Path}, collections::HashMap, fs::{File, OpenOptions, self}, io::{Read, Write}, - process::Command + collections::HashMap, + fs::{self, File, OpenOptions}, + io::{Read, Write}, + path::{Path, PathBuf}, + process::Command, }; use glob; @@ -40,11 +43,9 @@ fn find_cargo_tomls(path: PathBuf) -> Vec { let glob = glob::glob(&path).expect("Generates globbing pattern"); let mut result = Vec::new(); - glob.into_iter().for_each(|file| { - match file { - Ok(file) => result.push(file), - Err(e) => println!("{:?}", e), - } + glob.into_iter().for_each(|file| match file { + Ok(file) => result.push(file), + Err(e) => println!("{:?}", e), }); if result.is_empty() { @@ -78,30 +79,44 @@ fn get_git_commit_id(path: &Path) -> String { /// Parse the given `Cargo.toml` into a `HashMap` fn parse_cargo_toml(file: &Path) -> CargoToml { let mut content = String::new(); - File::open(file).expect("Cargo.toml exists").read_to_string(&mut content).expect("Reads file"); + File::open(file) + .expect("Cargo.toml exists") + .read_to_string(&mut content) + .expect("Reads file"); toml::from_str(&content).expect("Cargo.toml is a valid toml file") } /// Replaces all substrate path dependencies with a git dependency. -fn replace_path_dependencies_with_git(cargo_toml_path: &Path, commit_id: &str, cargo_toml: &mut CargoToml) { +fn replace_path_dependencies_with_git( + cargo_toml_path: &Path, + commit_id: &str, + cargo_toml: &mut CargoToml, +) { let mut cargo_toml_path = cargo_toml_path.to_path_buf(); // remove `Cargo.toml` cargo_toml_path.pop(); for &table in &["dependencies", "build-dependencies", "dev-dependencies"] { - let mut dependencies: toml::value::Table = match cargo_toml - .remove(table) - .and_then(|v| v.try_into().ok()) { - Some(deps) => deps, - None => continue, - }; + let mut dependencies: toml::value::Table = + match cargo_toml.remove(table).and_then(|v| v.try_into().ok()) { + Some(deps) => deps, + None => continue, + }; let deps_rewritten = dependencies .iter() - .filter_map(|(k, v)| v.clone().try_into::().ok().map(move |v| (k, v))) - .filter(|t| t.1.contains_key("path") && { - // if the path does not exists, we need to add this as git dependency - t.1.get("path").unwrap().as_str().map(|path| !cargo_toml_path.join(path).exists()).unwrap_or(false) + .filter_map(|(k, v)| { + v.clone().try_into::().ok().map(move |v| (k, v)) + }) + .filter(|t| { + t.1.contains_key("path") && { + // if the path does not exists, we need to add this as git dependency + t.1.get("path") + .unwrap() + .as_str() + .map(|path| !cargo_toml_path.join(path).exists()) + .unwrap_or(false) + } }) .map(|(k, mut v)| { // remove `path` and add `git` and `rev` @@ -110,7 +125,8 @@ fn replace_path_dependencies_with_git(cargo_toml_path: &Path, commit_id: &str, c v.insert("rev".into(), commit_id.into()); (k.clone(), v.into()) - }).collect::>(); + }) + .collect::>(); dependencies.extend(deps_rewritten.into_iter()); @@ -135,8 +151,9 @@ fn update_top_level_cargo_toml( cargo_toml.insert("profile".into(), profile.into()); - let members = workspace_members.iter() - .map(|p| + let members = workspace_members + .iter() + .map(|p| { p.strip_prefix(node_template_path) .expect("Workspace member is a child of the node template path!") .parent() @@ -145,7 +162,7 @@ fn update_top_level_cargo_toml( .expect("The given path ends with `Cargo.toml` as file name!") .display() .to_string() - ) + }) .collect::>(); let mut members_section = toml::value::Table::new(); @@ -163,24 +180,20 @@ fn write_cargo_toml(path: &Path, cargo_toml: CargoToml) { /// Build and test the generated node-template fn build_and_test(path: &Path, cargo_tomls: &[PathBuf]) { // Build node - assert!( - Command::new("cargo") - .args(&["build", "--all"]) - .current_dir(path) - .status() - .expect("Compiles node") - .success() - ); + assert!(Command::new("cargo") + .args(&["build", "--all"]) + .current_dir(path) + .status() + .expect("Compiles node") + .success()); // Test node - assert!( - Command::new("cargo") - .args(&["test", "--all"]) - .current_dir(path) - .status() - .expect("Tests node") - .success() - ); + assert!(Command::new("cargo") + .args(&["test", "--all"]) + .current_dir(path) + .status() + .expect("Tests node") + .success()); // Remove all `target` directories for toml in cargo_tomls { @@ -189,7 +202,8 @@ fn build_and_test(path: &Path, cargo_tomls: &[PathBuf]) { target_path = target_path.join("target"); if target_path.exists() { - fs::remove_dir_all(&target_path).expect(&format!("Removes `{}`", target_path.display())); + fs::remove_dir_all(&target_path) + .expect(&format!("Removes `{}`", target_path.display())); } } } @@ -219,7 +233,10 @@ fn main() { // Check if top level Cargo.toml exists. If not, create one in the destination if !cargo_tomls.contains(&top_level_cargo_toml_path) { // create the top_level_cargo_toml - OpenOptions::new().create(true).write(true).open(top_level_cargo_toml_path.clone()) + OpenOptions::new() + .create(true) + .write(true) + .open(top_level_cargo_toml_path.clone()) .expect("Create root level `Cargo.toml` failed."); // push into our data structure @@ -233,9 +250,8 @@ fn main() { // Check if this is the top level `Cargo.toml`, as this requires some special treatments. if top_level_cargo_toml_path == *t { // All workspace member `Cargo.toml` file paths. - let workspace_members = cargo_tomls.iter() - .filter(|p| **p != top_level_cargo_toml_path) - .collect(); + let workspace_members = + cargo_tomls.iter().filter(|p| **p != top_level_cargo_toml_path).collect(); update_top_level_cargo_toml(&mut cargo_toml, workspace_members, &node_template_path); } @@ -243,10 +259,21 @@ fn main() { write_cargo_toml(&t, cargo_toml); }); + // adding root rustfmt to node template build path + let node_template_rustfmt_toml_path = node_template_path.join("rustfmt.toml"); + let root_rustfmt_toml = + &options.node_template.parent().unwrap().parent().unwrap().join("rustfmt.toml"); + if root_rustfmt_toml.exists() { + fs::copy(&root_rustfmt_toml, &node_template_rustfmt_toml_path) + .expect("Copying rustfmt.toml."); + } + build_and_test(&node_template_path, &cargo_tomls); - let output = GzEncoder::new(File::create(&options.output) - .expect("Creates output file"), Compression::default()); + let output = GzEncoder::new( + File::create(&options.output).expect("Creates output file"), + Compression::default(), + ); let mut tar = tar::Builder::new(output); tar.append_dir_all("substrate-node-template", node_template_path) .expect("Writes substrate-node-template archive"); From d180b1c0908bc3d7e836368f8ef70f876aaf7bcf Mon Sep 17 00:00:00 2001 From: Eric Miller Date: Mon, 2 Aug 2021 11:58:12 -0400 Subject: [PATCH 2/8] Update .maintain/node-template-release/src/main.rs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Bastian Köcher --- .maintain/node-template-release/src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.maintain/node-template-release/src/main.rs b/.maintain/node-template-release/src/main.rs index 0e23144525b7..7dcb1f0f4d81 100644 --- a/.maintain/node-template-release/src/main.rs +++ b/.maintain/node-template-release/src/main.rs @@ -262,7 +262,7 @@ fn main() { // adding root rustfmt to node template build path let node_template_rustfmt_toml_path = node_template_path.join("rustfmt.toml"); let root_rustfmt_toml = - &options.node_template.parent().unwrap().parent().unwrap().join("rustfmt.toml"); + &options.node_template.join("../../rustfmt.toml"); if root_rustfmt_toml.exists() { fs::copy(&root_rustfmt_toml, &node_template_rustfmt_toml_path) .expect("Copying rustfmt.toml."); From c321766afec361e8624201617326cc29da5ee8f6 Mon Sep 17 00:00:00 2001 From: Gavin Wood Date: Thu, 29 Jul 2021 22:02:28 +0200 Subject: [PATCH 3/8] Add some important events to batch & staking to ensure ease of analysis (#9462) * Add some important events to batch & staking to ensure ease of analysis * Update frame/staking/src/pallet/mod.rs Co-authored-by: Zeke Mostov <32168567+emostov@users.noreply.github.com> * Update lib.rs * fix test Co-authored-by: Zeke Mostov <32168567+emostov@users.noreply.github.com> Co-authored-by: Shawn Tabrizi --- frame/contracts/src/exec.rs | 5 +++++ frame/offences/benchmarking/src/lib.rs | 2 +- frame/staking/src/pallet/impls.rs | 11 +++++++---- frame/staking/src/pallet/mod.rs | 12 +++++++----- frame/staking/src/slashing.rs | 2 +- frame/staking/src/tests.rs | 10 +++++----- frame/utility/src/lib.rs | 4 ++++ 7 files changed, 30 insertions(+), 16 deletions(-) diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index a862a98802e4..16c4886746d1 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -2506,6 +2506,11 @@ mod tests { event: MetaEvent::System(frame_system::Event::Remarked(BOB, remark_hash)), topics: vec![], }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::Utility(pallet_utility::Event::ItemCompleted), + topics: vec![], + }, EventRecord { phase: Phase::Initialization, event: MetaEvent::Utility(pallet_utility::Event::BatchInterrupted( diff --git a/frame/offences/benchmarking/src/lib.rs b/frame/offences/benchmarking/src/lib.rs index d68e29047a7c..35e3c1aec940 100644 --- a/frame/offences/benchmarking/src/lib.rs +++ b/frame/offences/benchmarking/src/lib.rs @@ -289,7 +289,7 @@ benchmarks! { let slash_amount = slash_fraction * bond_amount; let reward_amount = slash_amount * (1 + n) / 2; let slash = |id| core::iter::once( - ::Event::from(StakingEvent::::Slash(id, BalanceOf::::from(slash_amount))) + ::Event::from(StakingEvent::::Slashed(id, BalanceOf::::from(slash_amount))) ); let chill = |id| core::iter::once( ::Event::from(StakingEvent::::Chilled(id)) diff --git a/frame/staking/src/pallet/impls.rs b/frame/staking/src/pallet/impls.rs index b42ab4551602..accd7a0cf02e 100644 --- a/frame/staking/src/pallet/impls.rs +++ b/frame/staking/src/pallet/impls.rs @@ -154,11 +154,13 @@ impl Pallet { let validator_exposure_part = Perbill::from_rational(exposure.own, exposure.total); let validator_staking_payout = validator_exposure_part * validator_leftover_payout; + Self::deposit_event(Event::::PayoutStarted(era, ledger.stash.clone())); + // We can now make total validator payout: if let Some(imbalance) = Self::make_payout(&ledger.stash, validator_staking_payout + validator_commission_payout) { - Self::deposit_event(Event::::Reward(ledger.stash, imbalance.peek())); + Self::deposit_event(Event::::Rewarded(ledger.stash, imbalance.peek())); } // Track the number of payout ops to nominators. Note: `WeightInfo::payout_stakers_alive_staked` @@ -176,7 +178,8 @@ impl Pallet { if let Some(imbalance) = Self::make_payout(&nominator.who, nominator_reward) { // Note: this logic does not count payouts for `RewardDestination::None`. nominator_payout_count += 1; - Self::deposit_event(Event::::Reward(nominator.who.clone(), imbalance.peek())); + let e = Event::::Rewarded(nominator.who.clone(), imbalance.peek()); + Self::deposit_event(e); } } @@ -354,7 +357,7 @@ impl Pallet { let issuance = T::Currency::total_issuance(); let (validator_payout, rest) = T::EraPayout::era_payout(staked, issuance, era_duration); - Self::deposit_event(Event::::EraPayout(active_era.index, validator_payout, rest)); + Self::deposit_event(Event::::EraPaid(active_era.index, validator_payout, rest)); // Set ending era reward. >::insert(&active_era.index, validator_payout); @@ -446,7 +449,7 @@ impl Pallet { return None } - Self::deposit_event(Event::StakingElection); + Self::deposit_event(Event::StakersElected); Some(Self::trigger_new_era(start_session_index, exposures)) } diff --git a/frame/staking/src/pallet/mod.rs b/frame/staking/src/pallet/mod.rs index 4e7f06ebab18..444768dbdccf 100644 --- a/frame/staking/src/pallet/mod.rs +++ b/frame/staking/src/pallet/mod.rs @@ -525,17 +525,17 @@ pub mod pallet { /// The era payout has been set; the first balance is the validator-payout; the second is /// the remainder from the maximum amount of reward. /// \[era_index, validator_payout, remainder\] - EraPayout(EraIndex, BalanceOf, BalanceOf), - /// The staker has been rewarded by this amount. \[stash, amount\] - Reward(T::AccountId, BalanceOf), + EraPaid(EraIndex, BalanceOf, BalanceOf), + /// The nominator has been rewarded by this amount. \[stash, amount\] + Rewarded(T::AccountId, BalanceOf), /// One validator (and its nominators) has been slashed by the given amount. /// \[validator, amount\] - Slash(T::AccountId, BalanceOf), + Slashed(T::AccountId, BalanceOf), /// An old slashing report from a prior era was discarded because it could /// not be processed. \[session_index\] OldSlashingReportDiscarded(SessionIndex), /// A new set of stakers was elected. - StakingElection, + StakersElected, /// An account has bonded this amount. \[stash, amount\] /// /// NOTE: This event is only emitted when funds are bonded via a dispatchable. Notably, @@ -553,6 +553,8 @@ pub mod pallet { /// An account has stopped participating as either a validator or nominator. /// \[stash\] Chilled(T::AccountId), + /// The stakers' rewards are getting paid. \[era_index, validator_stash\] + PayoutStarted(EraIndex, T::AccountId), } #[pallet::error] diff --git a/frame/staking/src/slashing.rs b/frame/staking/src/slashing.rs index 332c9ffc3906..3da79924d0a0 100644 --- a/frame/staking/src/slashing.rs +++ b/frame/staking/src/slashing.rs @@ -584,7 +584,7 @@ pub fn do_slash( >::update_ledger(&controller, &ledger); // trigger the event - >::deposit_event(super::Event::::Slash(stash.clone(), value)); + >::deposit_event(super::Event::::Slashed(stash.clone(), value)); } } diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 69ce4e335f4b..3cb7a74e8982 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -257,7 +257,7 @@ fn rewards_should_work() { ); assert_eq!( *mock::staking_events().last().unwrap(), - Event::EraPayout(0, total_payout_0, maximum_payout - total_payout_0) + Event::EraPaid(0, total_payout_0, maximum_payout - total_payout_0) ); mock::make_all_reward_payment(0); @@ -295,7 +295,7 @@ fn rewards_should_work() { ); assert_eq!( *mock::staking_events().last().unwrap(), - Event::EraPayout(1, total_payout_1, maximum_payout - total_payout_1) + Event::EraPaid(1, total_payout_1, maximum_payout - total_payout_1) ); mock::make_all_reward_payment(1); @@ -3942,7 +3942,7 @@ mod election_data_provider { run_to_block(20); assert_eq!(Staking::next_election_prediction(System::block_number()), 45); assert_eq!(staking_events().len(), 1); - assert_eq!(*staking_events().last().unwrap(), Event::StakingElection); + assert_eq!(*staking_events().last().unwrap(), Event::StakersElected); for b in 21..45 { run_to_block(b); @@ -3953,7 +3953,7 @@ mod election_data_provider { run_to_block(45); assert_eq!(Staking::next_election_prediction(System::block_number()), 70); assert_eq!(staking_events().len(), 3); - assert_eq!(*staking_events().last().unwrap(), Event::StakingElection); + assert_eq!(*staking_events().last().unwrap(), Event::StakersElected); Staking::force_no_eras(Origin::root()).unwrap(); assert_eq!(Staking::next_election_prediction(System::block_number()), u64::MAX); @@ -3976,7 +3976,7 @@ mod election_data_provider { run_to_block(55); assert_eq!(Staking::next_election_prediction(System::block_number()), 55 + 25); assert_eq!(staking_events().len(), 6); - assert_eq!(*staking_events().last().unwrap(), Event::StakingElection); + assert_eq!(*staking_events().last().unwrap(), Event::StakersElected); // The new era has been planned, forcing is changed from `ForceNew` to `NotForcing`. assert_eq!(ForceEra::::get(), Forcing::NotForcing); }) diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index 1133bd869857..81cc3c65238b 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -108,6 +108,8 @@ pub mod pallet { BatchInterrupted(u32, DispatchError), /// Batch of dispatches completed fully with no error. BatchCompleted, + /// A single item within a Batch of dispatches has completed with no error. + ItemCompleted, } #[pallet::call] @@ -173,6 +175,7 @@ pub mod pallet { // Return the actual used weight + base_weight of this call. return Ok(Some(base_weight + weight).into()) } + Self::deposit_event(Event::ItemCompleted); } Self::deposit_event(Event::BatchCompleted); let base_weight = T::WeightInfo::batch(calls_len as u32); @@ -289,6 +292,7 @@ pub mod pallet { err.post_info = Some(base_weight + weight).into(); err })?; + Self::deposit_event(Event::ItemCompleted); } Self::deposit_event(Event::BatchCompleted); let base_weight = T::WeightInfo::batch_all(calls_len as u32); From e7a8f7ad726c1e2d35f05b3b8f44dc61f14a708a Mon Sep 17 00:00:00 2001 From: Squirrel Date: Fri, 30 Jul 2021 14:27:17 +0100 Subject: [PATCH 4/8] Move client consensus parts out of primitives and into client/consensus/api (#9319) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * moved client code out of primitives * bump ci * Fixup from merge. * Removed unused deps thanks to review feedback * Removing unneeded deps * updating lock file * note about rustfmt * fixed typo to bump ci * Move lonely CacheKeyId to parent * cargo fmt * updating import style * Update docs/STYLE_GUIDE.md Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> --- Cargo.lock | 28 ++++++++-- bin/node-template/node/src/service.rs | 2 +- bin/node/cli/src/service.rs | 7 ++- bin/node/test-runner-example/Cargo.toml | 1 - bin/node/testing/Cargo.toml | 1 + bin/node/testing/src/bench.rs | 5 +- client/api/src/backend.rs | 1 - client/consensus/aura/Cargo.toml | 1 + client/consensus/aura/src/import_queue.rs | 10 ++-- client/consensus/aura/src/lib.rs | 18 +++---- client/consensus/babe/Cargo.toml | 1 + client/consensus/babe/src/lib.rs | 21 +++++--- client/consensus/babe/src/tests.rs | 6 +-- client/consensus/common/Cargo.toml | 19 ++++++- .../consensus/common/src/block_import.rs | 21 +------- .../consensus/common/src/import_queue.rs | 39 ++++++++------ .../common/src/import_queue/basic_queue.rs | 37 +++++++------ .../common/src/import_queue/buffered_link.rs | 16 +++--- client/consensus/common/src/lib.rs | 15 ++++++ .../consensus/common/src/metrics.rs | 4 +- client/consensus/manual-seal/Cargo.toml | 1 + client/consensus/manual-seal/src/consensus.rs | 2 +- .../manual-seal/src/consensus/babe.rs | 6 +-- client/consensus/manual-seal/src/error.rs | 3 +- client/consensus/manual-seal/src/lib.rs | 12 ++--- client/consensus/manual-seal/src/rpc.rs | 2 +- .../consensus/manual-seal/src/seal_block.rs | 8 ++- client/consensus/pow/Cargo.toml | 1 + client/consensus/pow/src/lib.rs | 14 ++--- client/consensus/pow/src/worker.rs | 10 ++-- client/consensus/slots/Cargo.toml | 1 + client/consensus/slots/src/lib.rs | 7 ++- client/db/Cargo.toml | 1 - client/finality-grandpa-warp-sync/Cargo.toml | 1 + client/finality-grandpa/src/import.rs | 10 ++-- client/finality-grandpa/src/lib.rs | 3 +- client/finality-grandpa/src/tests.rs | 9 ++-- client/network/Cargo.toml | 1 + client/network/src/behaviour.rs | 6 +-- client/network/src/chain.rs | 3 +- client/network/src/config.rs | 3 +- client/network/src/gossip/tests.rs | 10 ++-- client/network/src/protocol.rs | 9 ++-- client/network/src/protocol/sync.rs | 12 ++--- client/network/src/service.rs | 6 +-- client/network/src/service/tests.rs | 10 ++-- client/network/test/src/block_import.rs | 13 +++-- client/network/test/src/lib.rs | 36 ++++++------- client/rpc/Cargo.toml | 1 + client/rpc/src/chain/tests.rs | 2 +- client/rpc/src/state/tests.rs | 3 +- client/service/Cargo.toml | 1 + client/service/src/builder.rs | 6 +-- client/service/src/chain_ops/check_block.rs | 2 +- client/service/src/chain_ops/import_blocks.rs | 20 +++---- client/service/src/client/client.rs | 54 ++++++++++--------- client/service/src/lib.rs | 2 +- client/service/test/Cargo.toml | 1 + client/service/test/src/client/mod.rs | 8 +-- docs/STYLE_GUIDE.md | 3 ++ frame/support/src/dispatch.rs | 2 +- primitives/blockchain/src/backend.rs | 2 +- primitives/consensus/common/Cargo.toml | 10 ++-- primitives/consensus/common/src/lib.rs | 37 +++++++------ test-utils/client/src/client_ext.rs | 5 +- test-utils/runtime/Cargo.toml | 1 + test-utils/runtime/client/Cargo.toml | 2 +- test-utils/runtime/src/lib.rs | 4 +- test-utils/test-runner/src/lib.rs | 3 +- 69 files changed, 339 insertions(+), 283 deletions(-) rename {primitives => client}/consensus/common/src/block_import.rs (96%) rename {primitives => client}/consensus/common/src/import_queue.rs (90%) rename {primitives => client}/consensus/common/src/import_queue/basic_queue.rs (94%) rename {primitives => client}/consensus/common/src/import_queue/buffered_link.rs (92%) rename {primitives => client}/consensus/common/src/metrics.rs (96%) diff --git a/Cargo.lock b/Cargo.lock index fd6b57918b31..6cfc2d19db81 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4542,6 +4542,7 @@ dependencies = [ "sc-cli", "sc-client-api", "sc-client-db", + "sc-consensus", "sc-executor", "sc-service", "sp-api", @@ -7242,7 +7243,6 @@ dependencies = [ "sc-state-db", "sp-arithmetic", "sp-blockchain", - "sp-consensus", "sp-core", "sp-database", "sp-keyring", @@ -7260,11 +7260,24 @@ name = "sc-consensus" version = "0.10.0-dev" dependencies = [ "async-trait", + "futures 0.3.15", + "futures-timer 3.0.2", + "libp2p", + "log", "parking_lot 0.11.1", "sc-client-api", + "serde", + "sp-api", "sp-blockchain", "sp-consensus", + "sp-core", "sp-runtime", + "sp-state-machine", + "sp-test-primitives", + "sp-utils", + "substrate-prometheus-endpoint", + "thiserror", + "wasm-timer", ] [[package]] @@ -7281,6 +7294,7 @@ dependencies = [ "parking_lot 0.11.1", "sc-block-builder", "sc-client-api", + "sc-consensus", "sc-consensus-slots", "sc-executor", "sc-keystore", @@ -7331,6 +7345,7 @@ dependencies = [ "retain_mut", "sc-block-builder", "sc-client-api", + "sc-consensus", "sc-consensus-epochs", "sc-consensus-slots", "sc-consensus-uncles", @@ -7422,6 +7437,7 @@ dependencies = [ "parking_lot 0.11.1", "sc-basic-authorship", "sc-client-api", + "sc-consensus", "sc-consensus-babe", "sc-consensus-epochs", "sc-transaction-pool", @@ -7457,6 +7473,7 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.11.1", "sc-client-api", + "sc-consensus", "sp-api", "sp-block-builder", "sp-blockchain", @@ -7479,6 +7496,7 @@ dependencies = [ "log", "parity-scale-codec", "sc-client-api", + "sc-consensus", "sc-telemetry", "sp-api", "sp-application-crypto", @@ -7695,6 +7713,7 @@ dependencies = [ "rand 0.8.3", "sc-block-builder", "sc-client-api", + "sc-consensus", "sc-finality-grandpa", "sc-network", "sc-service", @@ -7797,6 +7816,7 @@ dependencies = [ "rand 0.7.3", "sc-block-builder", "sc-client-api", + "sc-consensus", "sc-peerset", "serde", "serde_json", @@ -7954,6 +7974,7 @@ dependencies = [ "serde_json", "sp-api", "sp-blockchain", + "sp-consensus", "sp-core", "sp-io", "sp-keystore", @@ -8048,6 +8069,7 @@ dependencies = [ "sc-chain-spec", "sc-client-api", "sc-client-db", + "sc-consensus", "sc-executor", "sc-finality-grandpa", "sc-informant", @@ -8110,6 +8132,7 @@ dependencies = [ "sc-block-builder", "sc-client-api", "sc-client-db", + "sc-consensus", "sc-executor", "sc-light", "sc-network", @@ -8847,7 +8870,6 @@ dependencies = [ "async-trait", "futures 0.3.15", "futures-timer 3.0.2", - "libp2p", "log", "parity-scale-codec", "parking_lot 0.11.1", @@ -9768,6 +9790,7 @@ dependencies = [ "sp-api", "sp-application-crypto", "sp-block-builder", + "sp-consensus", "sp-consensus-aura", "sp-consensus-babe", "sp-core", @@ -10024,7 +10047,6 @@ dependencies = [ "sc-network", "sc-service", "sp-api", - "sp-consensus", "sp-consensus-babe", "sp-inherents", "sp-keyring", diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index d97f29c00bca..dbdb3074d686 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -32,7 +32,7 @@ pub fn new_partial( FullClient, FullBackend, FullSelectChain, - sp_consensus::DefaultImportQueue, + sc_consensus::DefaultImportQueue, sc_transaction_pool::FullPool, ( sc_finality_grandpa::GrandpaBlockImport< diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 47bc5f5b021f..e7181d3caec3 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -46,7 +46,7 @@ pub fn new_partial( FullClient, FullBackend, FullSelectChain, - sp_consensus::DefaultImportQueue, + sc_consensus::DefaultImportQueue, sc_transaction_pool::FullPool, ( impl Fn(node_rpc::DenyUnsafe, sc_rpc::SubscriptionTaskExecutor) -> node_rpc::IoHandler, @@ -595,14 +595,13 @@ mod tests { Address, BalancesCall, Call, UncheckedExtrinsic, }; use sc_client_api::BlockBackend; + use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; use sc_consensus_babe::{BabeIntermediate, CompatibleDigestItem, INTERMEDIATE_KEY}; use sc_consensus_epochs::descendent_query; use sc_keystore::LocalKeystore; use sc_service_test::TestNetNode; use sc_transaction_pool_api::{ChainEvent, MaintainedTransactionPool}; - use sp_consensus::{ - BlockImport, BlockImportParams, BlockOrigin, Environment, ForkChoiceStrategy, Proposer, - }; + use sp_consensus::{BlockOrigin, Environment, Proposer}; use sp_core::{crypto::Pair as CryptoPair, Public, H256}; use sp_inherents::InherentDataProvider; use sp_keyring::AccountKeyring; diff --git a/bin/node/test-runner-example/Cargo.toml b/bin/node/test-runner-example/Cargo.toml index 3435a34c45c1..ef75731c38a6 100644 --- a/bin/node/test-runner-example/Cargo.toml +++ b/bin/node/test-runner-example/Cargo.toml @@ -31,7 +31,6 @@ sc-informant = { path = "../../../client/informant" } sc-consensus = { path = "../../../client/consensus/common" } sp-runtime = { path = "../../../primitives/runtime" } -sp-consensus = { path = "../../../primitives/consensus/common" } sp-keyring = { path = "../../../primitives/keyring" } sp-timestamp = { path = "../../../primitives/timestamp" } sp-api = { path = "../../../primitives/api" } diff --git a/bin/node/testing/Cargo.toml b/bin/node/testing/Cargo.toml index e2a4555e6797..656f9331c5af 100644 --- a/bin/node/testing/Cargo.toml +++ b/bin/node/testing/Cargo.toml @@ -17,6 +17,7 @@ pallet-balances = { version = "4.0.0-dev", path = "../../../frame/balances" } sc-service = { version = "0.10.0-dev", features = ["test-helpers", "db"], path = "../../../client/service" } sc-client-db = { version = "0.10.0-dev", path = "../../../client/db/", features = ["kvdb-rocksdb", "parity-db"] } sc-client-api = { version = "4.0.0-dev", path = "../../../client/api/" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } codec = { package = "parity-scale-codec", version = "2.0.0" } pallet-contracts = { version = "4.0.0-dev", path = "../../../frame/contracts" } pallet-grandpa = { version = "4.0.0-dev", path = "../../../frame/grandpa" } diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index ceca493874dc..6aaaab04b627 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -45,12 +45,11 @@ use sc_client_api::{ BlockBackend, ExecutionStrategy, }; use sc_client_db::PruningMode; +use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, ImportedAux}; use sc_executor::{NativeExecutor, WasmExecutionMethod}; use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder; -use sp_consensus::{ - BlockImport, BlockImportParams, BlockOrigin, ForkChoiceStrategy, ImportResult, ImportedAux, -}; +use sp_consensus::BlockOrigin; use sp_core::{blake2_256, ed25519, sr25519, traits::SpawnNamed, ExecutionContext, Pair, Public}; use sp_inherents::InherentData; use sp_runtime::{ diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index 965e0151c3cb..0fcd85120c89 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -42,7 +42,6 @@ use std::{ sync::Arc, }; -pub use sp_consensus::ImportedState; pub use sp_state_machine::Backend as StateBackend; use std::marker::PhantomData; diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index f5a8aaf9dadb..c23ad5550576 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -20,6 +20,7 @@ sc-block-builder = { version = "0.10.0-dev", path = "../../block-builder" } sc-client-api = { version = "4.0.0-dev", path = "../../api" } codec = { package = "parity-scale-codec", version = "2.0.0" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sp-consensus-slots = { version = "0.10.0-dev", path = "../../../primitives/consensus/slots" } derive_more = "0.99.2" futures = "0.3.9" diff --git a/client/consensus/aura/src/import_queue.rs b/client/consensus/aura/src/import_queue.rs index a8036f28f164..96045fde43a9 100644 --- a/client/consensus/aura/src/import_queue.rs +++ b/client/consensus/aura/src/import_queue.rs @@ -23,6 +23,10 @@ use codec::{Codec, Decode, Encode}; use log::{debug, info, trace}; use prometheus_endpoint::Registry; use sc_client_api::{backend::AuxStore, BlockOf, UsageProvider}; +use sc_consensus::{ + block_import::{BlockImport, BlockImportParams, ForkChoiceStrategy}, + import_queue::{BasicQueue, BoxJustificationImport, DefaultImportQueue, Verifier}, +}; use sc_consensus_slots::{check_equivocation, CheckedHeader, InherentDataProviderExt}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_TRACE}; use sp_api::{ApiExt, ProvideRuntimeApi}; @@ -31,11 +35,7 @@ use sp_blockchain::{ well_known_cache_keys::{self, Id as CacheKeyId}, HeaderBackend, ProvideCache, }; -use sp_consensus::{ - import_queue::{BasicQueue, BoxJustificationImport, DefaultImportQueue, Verifier}, - BlockImport, BlockImportParams, BlockOrigin, CanAuthorWith, Error as ConsensusError, - ForkChoiceStrategy, -}; +use sp_consensus::{BlockOrigin, CanAuthorWith, Error as ConsensusError}; use sp_consensus_aura::{ digests::CompatibleDigestItem, inherents::AuraInherentData, AuraApi, ConsensusLog, AURA_ENGINE_ID, diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 8efd39aa612e..d9c089b9561e 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -45,6 +45,7 @@ use log::{debug, trace}; use codec::{Codec, Decode, Encode}; use sc_client_api::{backend::AuxStore, BlockOf, UsageProvider}; +use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, StateAction}; use sc_consensus_slots::{ BackoffAuthoringBlocksStrategy, InherentDataProviderExt, SlotInfo, StorageChanges, }; @@ -53,8 +54,7 @@ use sp_api::ProvideRuntimeApi; use sp_application_crypto::{AppKey, AppPublic}; use sp_blockchain::{HeaderBackend, ProvideCache, Result as CResult}; use sp_consensus::{ - BlockImport, BlockImportParams, BlockOrigin, CanAuthorWith, Environment, - Error as ConsensusError, ForkChoiceStrategy, Proposer, SelectChain, StateAction, + BlockOrigin, CanAuthorWith, Environment, Error as ConsensusError, Proposer, SelectChain, }; use sp_consensus_slots::Slot; use sp_core::crypto::{Pair, Public}; @@ -185,7 +185,7 @@ where PF: Environment + Send + Sync + 'static, PF::Proposer: Proposer>, SO: SyncOracle + Send + Sync + Clone, - L: sp_consensus::JustificationSyncLink, + L: sc_consensus::JustificationSyncLink, CIDP: CreateInherentDataProviders + Send, CIDP::InherentDataProviders: InherentDataProviderExt + Send, BS: BackoffAuthoringBlocksStrategy> + Send + 'static, @@ -277,7 +277,7 @@ where I: BlockImport> + Send + Sync + 'static, Error: std::error::Error + Send + From + 'static, SO: SyncOracle + Send + Sync + Clone, - L: sp_consensus::JustificationSyncLink, + L: sc_consensus::JustificationSyncLink, BS: BackoffAuthoringBlocksStrategy> + Send + 'static, { AuraWorker { @@ -324,7 +324,7 @@ where P::Public: AppPublic + Public + Member + Encode + Decode + Hash, P::Signature: TryFrom> + Member + Encode + Decode + Hash + Debug, SO: SyncOracle + Send + Clone, - L: sp_consensus::JustificationSyncLink, + L: sc_consensus::JustificationSyncLink, BS: BackoffAuthoringBlocksStrategy> + Send + 'static, Error: std::error::Error + Send + From + 'static, { @@ -395,7 +395,7 @@ where Self::Claim, Self::EpochData, ) -> Result< - sp_consensus::BlockImportParams>, + sc_consensus::BlockImportParams>, sp_consensus::Error, > + Send + 'static, @@ -431,7 +431,7 @@ where import_block.post_digests.push(signature_digest_item); import_block.body = Some(body); import_block.state_action = - StateAction::ApplyChanges(sp_consensus::StorageChanges::Changes(storage_changes)); + StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(storage_changes)); import_block.fork_choice = Some(ForkChoiceStrategy::LongestChain); Ok(import_block) @@ -560,14 +560,14 @@ mod tests { use parking_lot::Mutex; use sc_block_builder::BlockBuilderProvider; use sc_client_api::BlockchainEvents; + use sc_consensus::BoxJustificationImport; use sc_consensus_slots::{BackoffAuthoringOnFinalizedHeadLagging, SimpleSlotWorker}; use sc_keystore::LocalKeystore; use sc_network::config::ProtocolConfig; use sc_network_test::{Block as TestBlock, *}; use sp_application_crypto::key_types::AURA; use sp_consensus::{ - import_queue::BoxJustificationImport, AlwaysCanAuthor, DisableProofRecording, - NoNetwork as DummyOracle, Proposal, SlotData, + AlwaysCanAuthor, DisableProofRecording, NoNetwork as DummyOracle, Proposal, SlotData, }; use sp_consensus_aura::sr25519::AuthorityPair; use sp_inherents::InherentData; diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index e76e293df5bb..e6538cb57aae 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -16,6 +16,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] codec = { package = "parity-scale-codec", version = "2.0.0", features = ["derive"] } sp-consensus-babe = { version = "0.10.0-dev", path = "../../../primitives/consensus/babe" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } sp-application-crypto = { version = "4.0.0-dev", path = "../../../primitives/application-crypto" } sp-keystore = { version = "0.10.0-dev", path = "../../../primitives/keystore" } diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index a8258e2c8352..b09cd6ad86b8 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -85,6 +85,13 @@ use retain_mut::RetainMut; use schnorrkel::SignatureError; use sc_client_api::{backend::AuxStore, BlockchainEvents, ProvideUncles, UsageProvider}; +use sc_consensus::{ + block_import::{ + BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, + StateAction, + }, + import_queue::{BasicQueue, BoxJustificationImport, DefaultImportQueue, Verifier}, +}; use sc_consensus_epochs::{ descendent_query, Epoch as EpochT, EpochChangesFor, SharedEpochChanges, ViableEpochDescriptor, }; @@ -100,10 +107,8 @@ use sp_blockchain::{ Error as ClientError, HeaderBackend, HeaderMetadata, ProvideCache, Result as ClientResult, }; use sp_consensus::{ - import_queue::{BasicQueue, BoxJustificationImport, CacheKeyId, DefaultImportQueue, Verifier}, - BlockCheckParams, BlockImport, BlockImportParams, BlockOrigin, CanAuthorWith, Environment, - Error as ConsensusError, ForkChoiceStrategy, ImportResult, Proposer, SelectChain, SlotData, - StateAction, + BlockOrigin, CacheKeyId, CanAuthorWith, Environment, Error as ConsensusError, Proposer, + SelectChain, SlotData, }; use sp_consensus_babe::inherents::BabeInherentData; use sp_consensus_slots::Slot; @@ -465,7 +470,7 @@ where + Sync + 'static, SO: SyncOracle + Send + Sync + Clone + 'static, - L: sp_consensus::JustificationSyncLink + 'static, + L: sc_consensus::JustificationSyncLink + 'static, CIDP: CreateInherentDataProviders + Send + Sync + 'static, CIDP::InherentDataProviders: InherentDataProviderExt + Send, BS: BackoffAuthoringBlocksStrategy> + Send + 'static, @@ -668,7 +673,7 @@ where E::Proposer: Proposer>, I: BlockImport> + Send + Sync + 'static, SO: SyncOracle + Send + Clone, - L: sp_consensus::JustificationSyncLink, + L: sc_consensus::JustificationSyncLink, BS: BackoffAuthoringBlocksStrategy>, Error: std::error::Error + Send + From + From + 'static, { @@ -774,7 +779,7 @@ where StorageChanges, Self::Claim, Self::EpochData, - ) -> Result, sp_consensus::Error> + ) -> Result, sp_consensus::Error> + Send + 'static, > { @@ -809,7 +814,7 @@ where import_block.post_digests.push(digest_item); import_block.body = Some(body); import_block.state_action = StateAction::ApplyChanges( - sp_consensus::StorageChanges::Changes(storage_changes), + sc_consensus::StorageChanges::Changes(storage_changes), ); import_block.intermediates.insert( Cow::from(INTERMEDIATE_KEY), diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index fa42df356a09..d21911a7fe50 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -29,15 +29,13 @@ use rand::RngCore; use rand_chacha::{rand_core::SeedableRng, ChaChaRng}; use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sc_client_api::{backend::TransactionFor, BlockchainEvents}; +use sc_consensus::{BoxBlockImport, BoxJustificationImport}; use sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging; use sc_keystore::LocalKeystore; use sc_network::config::ProtocolConfig; use sc_network_test::{Block as TestBlock, *}; use sp_application_crypto::key_types::BABE; -use sp_consensus::{ - import_queue::{BoxBlockImport, BoxJustificationImport}, - AlwaysCanAuthor, DisableProofRecording, NoNetwork as DummyOracle, Proposal, -}; +use sp_consensus::{AlwaysCanAuthor, DisableProofRecording, NoNetwork as DummyOracle, Proposal}; use sp_consensus_babe::{ inherents::InherentDataProvider, make_transcript, make_transcript_data, AllowedSlots, AuthorityPair, Slot, diff --git a/client/consensus/common/Cargo.toml b/client/consensus/common/Cargo.toml index c8d86b06115a..c34e5416f84b 100644 --- a/client/consensus/common/Cargo.toml +++ b/client/consensus/common/Cargo.toml @@ -13,9 +13,24 @@ readme = "README.md" targets = ["x86_64-unknown-linux-gnu"] [dependencies] -async-trait = "0.1" +thiserror = "1.0.21" +libp2p = { version = "0.37.1", default-features = false } +log = "0.4.8" +futures = { version = "0.3.1", features = ["thread-pool"] } +futures-timer = "3.0.1" sc-client-api = { version = "4.0.0-dev", path = "../../api" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } +sp-core = { path = "../../../primitives/core", version = "4.0.0-dev"} +sp-consensus = { path = "../../../primitives/consensus/common", version = "0.10.0-dev"} +sp-state-machine = { version = "0.10.0-dev", path = "../../../primitives/state-machine" } sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } -sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sp-utils = { version = "4.0.0-dev", path = "../../../primitives/utils" } +sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } parking_lot = "0.11.1" +serde = { version = "1.0", features = ["derive"] } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0" } +wasm-timer = "0.2.5" +async-trait = "0.1.42" + +[dev-dependencies] +sp-test-primitives = { version = "2.0.0", path = "../../../primitives/test-primitives" } diff --git a/primitives/consensus/common/src/block_import.rs b/client/consensus/common/src/block_import.rs similarity index 96% rename from primitives/consensus/common/src/block_import.rs rename to client/consensus/common/src/block_import.rs index c742e24a0cc0..616378fc9b18 100644 --- a/primitives/consensus/common/src/block_import.rs +++ b/client/consensus/common/src/block_import.rs @@ -24,7 +24,7 @@ use sp_runtime::{ }; use std::{any::Any, borrow::Cow, collections::HashMap, sync::Arc}; -use crate::{import_queue::CacheKeyId, Error}; +use sp_consensus::{BlockOrigin, CacheKeyId, Error}; /// Block import result. #[derive(Debug, PartialEq, Eq)] @@ -92,23 +92,6 @@ impl ImportResult { } } -/// Block data origin. -#[derive(Debug, PartialEq, Eq, Clone, Copy)] -pub enum BlockOrigin { - /// Genesis block built into the client. - Genesis, - /// Block is part of the initial sync with the network. - NetworkInitialSync, - /// Block was broadcasted on the network. - NetworkBroadcast, - /// Block that was received from the network and validated in the consensus process. - ConsensusBroadcast, - /// Block that was collated by this node. - Own, - /// Block was imported from a file. - File, -} - /// Fork choice strategy. #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum ForkChoiceStrategy { @@ -354,7 +337,7 @@ impl BlockImport for crate::import_queue::BoxBlockImp where Transaction: Send + 'static, { - type Error = crate::error::Error; + type Error = sp_consensus::error::Error; type Transaction = Transaction; /// Check block preconditions. diff --git a/primitives/consensus/common/src/import_queue.rs b/client/consensus/common/src/import_queue.rs similarity index 90% rename from primitives/consensus/common/src/import_queue.rs rename to client/consensus/common/src/import_queue.rs index 6eb8d0a750a2..b1a24e5620d3 100644 --- a/primitives/consensus/common/src/import_queue.rs +++ b/client/consensus/common/src/import_queue.rs @@ -28,6 +28,7 @@ use std::collections::HashMap; +use log::{debug, trace}; use sp_runtime::{ traits::{Block as BlockT, Header as _, NumberFor}, Justifications, @@ -35,13 +36,13 @@ use sp_runtime::{ use crate::{ block_import::{ - BlockCheckParams, BlockImport, BlockImportParams, BlockOrigin, ImportResult, ImportedAux, - ImportedState, JustificationImport, StateAction, + BlockCheckParams, BlockImport, BlockImportParams, ImportResult, ImportedAux, ImportedState, + JustificationImport, StateAction, }, - error::Error as ConsensusError, metrics::Metrics, }; pub use basic_queue::BasicQueue; +use sp_consensus::{error::Error as ConsensusError, BlockOrigin, CacheKeyId}; /// A commonly-used Import Queue type. /// @@ -80,7 +81,7 @@ pub struct IncomingBlock { pub origin: Option, /// Allow importing the block skipping state verification if parent state is missing. pub allow_missing_state: bool, - /// Skip block exection and state verification. + /// Skip block execution and state verification. pub skip_execution: bool, /// Re-validate existing block. pub import_existing: bool, @@ -88,9 +89,6 @@ pub struct IncomingBlock { pub state: Option>, } -/// Type of keys in the blockchain cache that consensus module could use for its needs. -pub type CacheKeyId = [u8; 4]; - /// Verify a justification of a block #[async_trait::async_trait] pub trait Verifier: Send + Sync { @@ -137,9 +135,10 @@ pub trait Link: Send { &mut self, _imported: usize, _count: usize, - _results: Vec<(Result>, BlockImportError>, B::Hash)>, + _results: Vec<(BlockImportResult, B::Hash)>, ) { } + /// Justification import result. fn justification_imported( &mut self, @@ -149,13 +148,14 @@ pub trait Link: Send { _success: bool, ) { } + /// Request a justification for the given block. fn request_justification(&mut self, _hash: &B::Hash, _number: NumberFor) {} } /// Block import successful result. #[derive(Debug, PartialEq)] -pub enum BlockImportResult { +pub enum BlockImportStatus { /// Imported known block. ImportedKnown(N, Option), /// Imported unknown block. @@ -181,13 +181,15 @@ pub enum BlockImportError { Other(ConsensusError), } +type BlockImportResult = Result>, BlockImportError>; + /// Single block import function. pub async fn import_single_block, Transaction: Send + 'static>( import_handle: &mut impl BlockImport, block_origin: BlockOrigin, block: IncomingBlock, verifier: &mut V, -) -> Result>, BlockImportError> { +) -> BlockImportResult { import_single_block_metered(import_handle, block_origin, block, verifier, None).await } @@ -202,7 +204,7 @@ pub(crate) async fn import_single_block_metered< block: IncomingBlock, verifier: &mut V, metrics: Option, -) -> Result>, BlockImportError> { +) -> BlockImportResult { let peer = block.origin; let (header, justifications) = match (block.header, block.justifications) { @@ -226,16 +228,18 @@ pub(crate) async fn import_single_block_metered< let import_handler = |import| match import { Ok(ImportResult::AlreadyInChain) => { trace!(target: "sync", "Block already in chain {}: {:?}", number, hash); - Ok(BlockImportResult::ImportedKnown(number, peer.clone())) + Ok(BlockImportStatus::ImportedKnown(number, peer.clone())) }, Ok(ImportResult::Imported(aux)) => - Ok(BlockImportResult::ImportedUnknown(number, aux, peer.clone())), + Ok(BlockImportStatus::ImportedUnknown(number, aux, peer.clone())), Ok(ImportResult::MissingState) => { - debug!(target: "sync", "Parent state is missing for {}: {:?}, parent: {:?}", number, hash, parent_hash); + debug!(target: "sync", "Parent state is missing for {}: {:?}, parent: {:?}", + number, hash, parent_hash); Err(BlockImportError::MissingState) }, Ok(ImportResult::UnknownParent) => { - debug!(target: "sync", "Block with unknown parent {}: {:?}, parent: {:?}", number, hash, parent_hash); + debug!(target: "sync", "Block with unknown parent {}: {:?}, parent: {:?}", + number, hash, parent_hash); Err(BlockImportError::UnknownParent) }, Ok(ImportResult::KnownBad) => { @@ -259,7 +263,7 @@ pub(crate) async fn import_single_block_metered< }) .await, )? { - BlockImportResult::ImportedUnknown { .. } => (), + BlockImportStatus::ImportedUnknown { .. } => (), r => return Ok(r), // Any other successful result means that the block is already imported. } @@ -291,7 +295,8 @@ pub(crate) async fn import_single_block_metered< import_block.indexed_body = block.indexed_body; let mut import_block = import_block.clear_storage_changes_and_mutate(); if let Some(state) = block.state { - import_block.state_action = StateAction::ApplyChanges(crate::StorageChanges::Import(state)); + let changes = crate::block_import::StorageChanges::Import(state); + import_block.state_action = StateAction::ApplyChanges(changes); } else if block.skip_execution { import_block.state_action = StateAction::Skip; } else if block.allow_missing_state { diff --git a/primitives/consensus/common/src/import_queue/basic_queue.rs b/client/consensus/common/src/import_queue/basic_queue.rs similarity index 94% rename from primitives/consensus/common/src/import_queue/basic_queue.rs rename to client/consensus/common/src/import_queue/basic_queue.rs index 2610a92ad83e..2de5f578a7a6 100644 --- a/primitives/consensus/common/src/import_queue/basic_queue.rs +++ b/client/consensus/common/src/import_queue/basic_queue.rs @@ -14,13 +14,14 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. - use futures::{ prelude::*, task::{Context, Poll}, }; use futures_timer::Delay; +use log::{debug, trace}; use prometheus_endpoint::Registry; +use sp_consensus::BlockOrigin; use sp_runtime::{ traits::{Block as BlockT, Header as HeaderT, NumberFor}, Justification, Justifications, @@ -29,10 +30,9 @@ use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnbound use std::{marker::PhantomData, pin::Pin, time::Duration}; use crate::{ - block_import::BlockOrigin, import_queue::{ buffered_link::{self, BufferedLinkReceiver, BufferedLinkSender}, - import_single_block_metered, BlockImportError, BlockImportResult, BoxBlockImport, + import_single_block_metered, BlockImportError, BlockImportStatus, BoxBlockImport, BoxJustificationImport, ImportQueue, IncomingBlock, Link, Origin, Verifier, }, metrics::Metrics, @@ -41,7 +41,7 @@ use crate::{ /// Interface to a basic block import queue that is importing blocks sequentially in a separate /// task, with plugable verification. pub struct BasicQueue { - /// Channel to send justifcation import messages to the background task. + /// Channel to send justification import messages to the background task. justification_sender: TracingUnboundedSender>, /// Channel to send block import messages to the background task. block_import_sender: TracingUnboundedSender>, @@ -156,9 +156,9 @@ mod worker_messages { /// The process of importing blocks. /// -/// This polls the `block_import_receiver` for new blocks to import and than awaits on importing these blocks. -/// After each block is imported, this async function yields once to give other futures the possibility -/// to be run. +/// This polls the `block_import_receiver` for new blocks to import and than awaits on +/// importing these blocks. After each block is imported, this async function yields once +/// to give other futures the possibility to be run. /// /// Returns when `block_import` ended. async fn block_import_process( @@ -325,12 +325,13 @@ struct ImportManyBlocksResult { /// The total number of blocks processed. block_count: usize, /// The import results for each block. - results: Vec<(Result>, BlockImportError>, B::Hash)>, + results: Vec<(Result>, BlockImportError>, B::Hash)>, } /// Import several blocks at once, returning import result for each block. /// -/// This will yield after each imported block once, to ensure that other futures can be called as well. +/// This will yield after each imported block once, to ensure that other futures can +/// be called as well. async fn import_many_blocks, Transaction: Send + 'static>( import_handle: &mut BoxBlockImport, blocks_origin: BlockOrigin, @@ -410,11 +411,11 @@ async fn import_many_blocks, Transaction: Send + 'stat } } -/// A future that will always `yield` on the first call of `poll` but schedules the current task for -/// re-execution. +/// A future that will always `yield` on the first call of `poll` but schedules the +/// current task for re-execution. /// -/// This is done by getting the waker and calling `wake_by_ref` followed by returning `Pending`. -/// The next time the `poll` is called, it will return `Ready`. +/// This is done by getting the waker and calling `wake_by_ref` followed by returning +/// `Pending`. The next time the `poll` is called, it will return `Ready`. struct Yield(bool); impl Yield { @@ -441,8 +442,10 @@ impl Future for Yield { mod tests { use super::*; use crate::{ + block_import::{ + BlockCheckParams, BlockImport, BlockImportParams, ImportResult, JustificationImport, + }, import_queue::{CacheKeyId, Verifier}, - BlockCheckParams, BlockImport, BlockImportParams, ImportResult, JustificationImport, }; use futures::{executor::block_on, Future}; use sp_test_primitives::{Block, BlockNumber, Extrinsic, Hash, Header}; @@ -463,7 +466,7 @@ mod tests { #[async_trait::async_trait] impl BlockImport for () { - type Error = crate::Error; + type Error = sp_consensus::Error; type Transaction = Extrinsic; async fn check_block( @@ -484,7 +487,7 @@ mod tests { #[async_trait::async_trait] impl JustificationImport for () { - type Error = crate::Error; + type Error = sp_consensus::Error; async fn on_start(&mut self) -> Vec<(Hash, BlockNumber)> { Vec::new() @@ -516,7 +519,7 @@ mod tests { &mut self, _imported: usize, _count: usize, - results: Vec<(Result, BlockImportError>, Hash)>, + results: Vec<(Result, BlockImportError>, Hash)>, ) { if let Some(hash) = results.into_iter().find_map(|(r, h)| r.ok().map(|_| h)) { self.events.push(Event::BlockImported(hash)); diff --git a/primitives/consensus/common/src/import_queue/buffered_link.rs b/client/consensus/common/src/import_queue/buffered_link.rs similarity index 92% rename from primitives/consensus/common/src/import_queue/buffered_link.rs rename to client/consensus/common/src/import_queue/buffered_link.rs index 8d146dfbe461..45aaf706ee1b 100644 --- a/primitives/consensus/common/src/import_queue/buffered_link.rs +++ b/client/consensus/common/src/import_queue/buffered_link.rs @@ -22,8 +22,8 @@ //! # Example //! //! ``` -//! use sp_consensus::import_queue::Link; -//! # use sp_consensus::import_queue::buffered_link::buffered_link; +//! use sc_consensus::import_queue::Link; +//! # use sc_consensus::import_queue::buffered_link::buffered_link; //! # use sp_test_primitives::Block; //! # struct DummyLink; impl Link for DummyLink {} //! # let mut my_link = DummyLink; @@ -37,7 +37,7 @@ //! }); //! ``` -use crate::import_queue::{BlockImportError, BlockImportResult, Link, Origin}; +use crate::import_queue::{Link, Origin}; use futures::prelude::*; use sp_runtime::traits::{Block as BlockT, NumberFor}; use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; @@ -46,6 +46,8 @@ use std::{ task::{Context, Poll}, }; +use super::BlockImportResult; + /// Wraps around an unbounded channel from the `futures` crate. The sender implements `Link` and /// can be used to buffer commands, and the receiver can be used to poll said commands and transfer /// them to another link. @@ -78,11 +80,7 @@ impl Clone for BufferedLinkSender { /// Internal buffered message. enum BlockImportWorkerMsg { - BlocksProcessed( - usize, - usize, - Vec<(Result>, BlockImportError>, B::Hash)>, - ), + BlocksProcessed(usize, usize, Vec<(BlockImportResult, B::Hash)>), JustificationImported(Origin, B::Hash, NumberFor, bool), RequestJustification(B::Hash, NumberFor), } @@ -92,7 +90,7 @@ impl Link for BufferedLinkSender { &mut self, imported: usize, count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)>, + results: Vec<(BlockImportResult, B::Hash)>, ) { let _ = self .tx diff --git a/client/consensus/common/src/lib.rs b/client/consensus/common/src/lib.rs index 9b4d70576919..640bad237e88 100644 --- a/client/consensus/common/src/lib.rs +++ b/client/consensus/common/src/lib.rs @@ -18,7 +18,22 @@ //! Collection of common consensus specific implementations +pub mod block_import; +pub mod import_queue; +pub mod metrics; + +pub use block_import::{ + BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, + ImportedAux, ImportedState, JustificationImport, JustificationSyncLink, StateAction, + StorageChanges, +}; +pub use import_queue::{ + import_single_block, BasicQueue, BlockImportError, BlockImportStatus, BoxBlockImport, + BoxJustificationImport, DefaultImportQueue, ImportQueue, IncomingBlock, Link, Verifier, +}; + mod longest_chain; + pub mod shared_data; pub use longest_chain::LongestChain; diff --git a/primitives/consensus/common/src/metrics.rs b/client/consensus/common/src/metrics.rs similarity index 96% rename from primitives/consensus/common/src/metrics.rs rename to client/consensus/common/src/metrics.rs index c56f68625b6a..e9af41914a6e 100644 --- a/primitives/consensus/common/src/metrics.rs +++ b/client/consensus/common/src/metrics.rs @@ -24,7 +24,7 @@ use prometheus_endpoint::{ use sp_runtime::traits::{Block as BlockT, NumberFor}; -use crate::import_queue::{BlockImportError, BlockImportResult}; +use crate::import_queue::{BlockImportError, BlockImportStatus}; /// Generic Prometheus metrics for common consensus functionality. #[derive(Clone)] @@ -71,7 +71,7 @@ impl Metrics { pub fn report_import( &self, - result: &Result>, BlockImportError>, + result: &Result>, BlockImportError>, ) { let label = match result { Ok(_) => "success", diff --git a/client/consensus/manual-seal/Cargo.toml b/client/consensus/manual-seal/Cargo.toml index 8a236b0591b8..a0de596b005b 100644 --- a/client/consensus/manual-seal/Cargo.toml +++ b/client/consensus/manual-seal/Cargo.toml @@ -26,6 +26,7 @@ assert_matches = "1.3.0" async-trait = "0.1.50" sc-client-api = { path = "../../api", version = "4.0.0-dev"} +sc-consensus = { version = "0.10.0-dev", path = "../../consensus/common" } sc-consensus-babe = { path = "../../consensus/babe", version = "0.10.0-dev"} sc-consensus-epochs = { path = "../../consensus/epochs", version = "0.10.0-dev"} sp-consensus-babe = { path = "../../../primitives/consensus/babe", version = "0.10.0-dev"} diff --git a/client/consensus/manual-seal/src/consensus.rs b/client/consensus/manual-seal/src/consensus.rs index 1f7ee413b71d..33a4c8616f6d 100644 --- a/client/consensus/manual-seal/src/consensus.rs +++ b/client/consensus/manual-seal/src/consensus.rs @@ -19,7 +19,7 @@ //! Extensions for manual seal to produce blocks valid for any runtime. use super::Error; -use sp_consensus::BlockImportParams; +use sc_consensus::BlockImportParams; use sp_inherents::InherentData; use sp_runtime::traits::{Block as BlockT, DigestFor}; diff --git a/client/consensus/manual-seal/src/consensus/babe.rs b/client/consensus/manual-seal/src/consensus/babe.rs index 3773c7c3cf12..9edcb8fd13a1 100644 --- a/client/consensus/manual-seal/src/consensus/babe.rs +++ b/client/consensus/manual-seal/src/consensus/babe.rs @@ -36,12 +36,10 @@ use std::{ time::SystemTime, }; +use sc_consensus::{BlockImportParams, ForkChoiceStrategy, Verifier}; use sp_api::{ProvideRuntimeApi, TransactionFor}; use sp_blockchain::{HeaderBackend, HeaderMetadata}; -use sp_consensus::{ - import_queue::{CacheKeyId, Verifier}, - BlockImportParams, BlockOrigin, ForkChoiceStrategy, -}; +use sp_consensus::{BlockOrigin, CacheKeyId}; use sp_consensus_babe::{ digests::{NextEpochDescriptor, PreDigest, SecondaryPlainPreDigest}, inherents::BabeInherentData, diff --git a/client/consensus/manual-seal/src/error.rs b/client/consensus/manual-seal/src/error.rs index cd7fc0ee73ce..8585e6a70d64 100644 --- a/client/consensus/manual-seal/src/error.rs +++ b/client/consensus/manual-seal/src/error.rs @@ -20,8 +20,9 @@ //! This is suitable for a testing environment. use futures::channel::{mpsc::SendError, oneshot}; +use sc_consensus::ImportResult; use sp_blockchain::Error as BlockchainError; -use sp_consensus::{Error as ConsensusError, ImportResult}; +use sp_consensus::Error as ConsensusError; use sp_inherents::Error as InherentsError; /// Error code for rpc diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index 1aacd22aa7bb..7d4dfefe50c6 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -22,12 +22,12 @@ use futures::prelude::*; use prometheus_endpoint::Registry; use sc_client_api::backend::{Backend as ClientBackend, Finalizer}; -use sp_blockchain::HeaderBackend; -use sp_consensus::{ - import_queue::{BasicQueue, BoxBlockImport, CacheKeyId, Verifier}, - BlockImport, BlockImportParams, BlockOrigin, Environment, ForkChoiceStrategy, Proposer, - SelectChain, +use sc_consensus::{ + block_import::{BlockImport, BlockImportParams, ForkChoiceStrategy}, + import_queue::{BasicQueue, BoxBlockImport, Verifier}, }; +use sp_blockchain::HeaderBackend; +use sp_consensus::{BlockOrigin, CacheKeyId, Environment, Proposer, SelectChain}; use sp_inherents::CreateInherentDataProviders; use sp_runtime::{traits::Block as BlockT, ConsensusEngineId, Justifications}; use std::{marker::PhantomData, sync::Arc}; @@ -257,9 +257,9 @@ mod tests { use super::*; use sc_basic_authorship::ProposerFactory; use sc_client_api::BlockBackend; + use sc_consensus::ImportedAux; use sc_transaction_pool::{BasicPool, Options, RevalidationType}; use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool, TransactionSource}; - use sp_consensus::ImportedAux; use sp_runtime::generic::BlockId; use substrate_test_runtime_client::{ AccountKeyring::*, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, diff --git a/client/consensus/manual-seal/src/rpc.rs b/client/consensus/manual-seal/src/rpc.rs index 0f686bc26e7d..699505b00c3c 100644 --- a/client/consensus/manual-seal/src/rpc.rs +++ b/client/consensus/manual-seal/src/rpc.rs @@ -25,8 +25,8 @@ use futures::{ }; use jsonrpc_core::Error; use jsonrpc_derive::rpc; +use sc_consensus::ImportedAux; use serde::{Deserialize, Serialize}; -use sp_consensus::ImportedAux; use sp_runtime::EncodedJustification; /// Future's type for jsonrpc diff --git a/client/consensus/manual-seal/src/seal_block.rs b/client/consensus/manual-seal/src/seal_block.rs index be97e0ccc360..502705b41162 100644 --- a/client/consensus/manual-seal/src/seal_block.rs +++ b/client/consensus/manual-seal/src/seal_block.rs @@ -20,13 +20,11 @@ use crate::{rpc, ConsensusDataProvider, CreatedBlock, Error}; use futures::prelude::*; +use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, StateAction}; use sc_transaction_pool_api::TransactionPool; use sp_api::{ProvideRuntimeApi, TransactionFor}; use sp_blockchain::HeaderBackend; -use sp_consensus::{ - self, BlockImport, BlockImportParams, BlockOrigin, Environment, ForkChoiceStrategy, - ImportResult, Proposer, SelectChain, StateAction, -}; +use sp_consensus::{self, BlockOrigin, Environment, Proposer, SelectChain}; use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; use sp_runtime::{ generic::BlockId, @@ -147,7 +145,7 @@ pub async fn seal_block( params.body = Some(body); params.finalized = finalize; params.fork_choice = Some(ForkChoiceStrategy::LongestChain); - params.state_action = StateAction::ApplyChanges(sp_consensus::StorageChanges::Changes( + params.state_action = StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes( proposal.storage_changes, )); diff --git a/client/consensus/pow/Cargo.toml b/client/consensus/pow/Cargo.toml index 77ed9ba04ce9..368005fafb13 100644 --- a/client/consensus/pow/Cargo.toml +++ b/client/consensus/pow/Cargo.toml @@ -23,6 +23,7 @@ sp-block-builder = { version = "4.0.0-dev", path = "../../../primitives/block-bu sp-inherents = { version = "4.0.0-dev", path = "../../../primitives/inherents" } sp-consensus-pow = { version = "0.10.0-dev", path = "../../../primitives/consensus/pow" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } log = "0.4.8" futures = { version = "0.3.1", features = ["compat"] } futures-timer = "3.0.1" diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index 7e5b5a59c917..85a37e73535a 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -43,19 +43,23 @@ mod worker; pub use crate::worker::{MiningBuild, MiningMetadata, MiningWorker}; +use crate::worker::UntilImportedOrTimeout; use codec::{Decode, Encode}; use futures::{Future, StreamExt}; use log::*; use parking_lot::Mutex; use prometheus_endpoint::Registry; use sc_client_api::{self, backend::AuxStore, BlockOf, BlockchainEvents}; +use sc_consensus::{ + BasicQueue, BlockCheckParams, BlockImport, BlockImportParams, BoxBlockImport, + BoxJustificationImport, ForkChoiceStrategy, ImportResult, Verifier, +}; use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder as BlockBuilderApi; use sp_blockchain::{well_known_cache_keys::Id as CacheKeyId, HeaderBackend, ProvideCache}; use sp_consensus::{ - import_queue::{BasicQueue, BoxBlockImport, BoxJustificationImport, Verifier}, - BlockCheckParams, BlockImport, BlockImportParams, BlockOrigin, CanAuthorWith, Environment, - Error as ConsensusError, ForkChoiceStrategy, ImportResult, Proposer, SelectChain, SyncOracle, + BlockOrigin, CanAuthorWith, Environment, Error as ConsensusError, Proposer, SelectChain, + SyncOracle, }; use sp_consensus_pow::{Seal, TotalDifficulty, POW_ENGINE_ID}; use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; @@ -69,8 +73,6 @@ use std::{ time::Duration, }; -use crate::worker::UntilImportedOrTimeout; - #[derive(derive_more::Display, Debug)] pub enum Error { #[display(fmt = "Header uses the wrong engine {:?}", _0)] @@ -540,7 +542,7 @@ where E::Error: std::fmt::Debug, E::Proposer: Proposer>, SO: SyncOracle + Clone + Send + Sync + 'static, - L: sp_consensus::JustificationSyncLink, + L: sc_consensus::JustificationSyncLink, CIDP: CreateInherentDataProviders, CAW: CanAuthorWith + Clone + Send + 'static, { diff --git a/client/consensus/pow/src/worker.rs b/client/consensus/pow/src/worker.rs index 572ed364c8f8..c0ca16ccad3a 100644 --- a/client/consensus/pow/src/worker.rs +++ b/client/consensus/pow/src/worker.rs @@ -23,10 +23,8 @@ use futures::{ use futures_timer::Delay; use log::*; use sc_client_api::ImportNotifications; -use sp_consensus::{ - import_queue::BoxBlockImport, BlockImportParams, BlockOrigin, Proposal, StateAction, - StorageChanges, -}; +use sc_consensus::{BlockImportParams, BoxBlockImport, StateAction, StorageChanges}; +use sp_consensus::{BlockOrigin, Proposal}; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, Header as HeaderT}, @@ -67,7 +65,7 @@ pub struct MiningWorker< Block: BlockT, Algorithm: PowAlgorithm, C: sp_api::ProvideRuntimeApi, - L: sp_consensus::JustificationSyncLink, + L: sc_consensus::JustificationSyncLink, Proof, > { pub(crate) build: Option>, @@ -82,7 +80,7 @@ where C: sp_api::ProvideRuntimeApi, Algorithm: PowAlgorithm, Algorithm::Difficulty: 'static + Send, - L: sp_consensus::JustificationSyncLink, + L: sc_consensus::JustificationSyncLink, sp_api::TransactionFor: Send + 'static, { /// Get the current best hash. `None` if the worker has just started or the client is doing diff --git a/client/consensus/slots/Cargo.toml b/client/consensus/slots/Cargo.toml index 22697e94d358..4e027ccab772 100644 --- a/client/consensus/slots/Cargo.toml +++ b/client/consensus/slots/Cargo.toml @@ -21,6 +21,7 @@ sp-trie = { version = "4.0.0-dev", path = "../../../primitives/trie" } sp-application-crypto = { version = "4.0.0-dev", path = "../../../primitives/application-crypto" } sp-arithmetic = { version = "4.0.0-dev", path = "../../../primitives/arithmetic" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sp-consensus-slots = { version = "0.10.0-dev", path = "../../../primitives/consensus/slots" } sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } sp-state-machine = { version = "0.10.0-dev", path = "../../../primitives/state-machine" } diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index 1a4f29ff8cb0..1aa8d984d3fa 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -36,12 +36,11 @@ use codec::{Decode, Encode}; use futures::{future::Either, Future, TryFutureExt}; use futures_timer::Delay; use log::{debug, error, info, warn}; +use sc_consensus::{BlockImport, JustificationSyncLink}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO, CONSENSUS_WARN}; use sp_api::{ApiRef, ProvideRuntimeApi}; use sp_arithmetic::traits::BaseArithmetic; -use sp_consensus::{ - BlockImport, CanAuthorWith, JustificationSyncLink, Proposer, SelectChain, SlotData, SyncOracle, -}; +use sp_consensus::{CanAuthorWith, Proposer, SelectChain, SlotData, SyncOracle}; use sp_consensus_slots::Slot; use sp_inherents::CreateInherentDataProviders; use sp_runtime::{ @@ -160,7 +159,7 @@ pub trait SimpleSlotWorker { Self::Claim, Self::EpochData, ) -> Result< - sp_consensus::BlockImportParams< + sc_consensus::BlockImportParams< B, >::Transaction, >, diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index 5873883a11ee..856770c31f3e 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -32,7 +32,6 @@ sp-state-machine = { version = "0.10.0-dev", path = "../../primitives/state-mach sc-executor = { version = "0.10.0-dev", path = "../executor" } sc-state-db = { version = "0.10.0-dev", path = "../state-db" } sp-trie = { version = "4.0.0-dev", path = "../../primitives/trie" } -sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } sp-database = { version = "4.0.0-dev", path = "../../primitives/database" } parity-db = { version = "0.2.4", optional = true } diff --git a/client/finality-grandpa-warp-sync/Cargo.toml b/client/finality-grandpa-warp-sync/Cargo.toml index 43a7cc0565cd..62fe59608333 100644 --- a/client/finality-grandpa-warp-sync/Cargo.toml +++ b/client/finality-grandpa-warp-sync/Cargo.toml @@ -32,5 +32,6 @@ finality-grandpa = { version = "0.14.1" } rand = "0.8" sc-block-builder = { version = "0.10.0-dev", path = "../block-builder" } sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../../client/consensus/common" } sp-keyring = { version = "4.0.0-dev", path = "../../primitives/keyring" } substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index 18e5e2c89d06..84e6fa9e1fba 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -22,14 +22,14 @@ use log::debug; use parity_scale_codec::Encode; use sc_client_api::{backend::Backend, utils::is_descendent_of}; -use sc_consensus::shared_data::{SharedDataLocked, SharedDataLockedUpgradable}; +use sc_consensus::{ + shared_data::{SharedDataLocked, SharedDataLockedUpgradable}, + BlockCheckParams, BlockImport, BlockImportParams, ImportResult, JustificationImport, +}; use sc_telemetry::TelemetryHandle; use sp_api::TransactionFor; use sp_blockchain::{well_known_cache_keys, BlockStatus}; -use sp_consensus::{ - BlockCheckParams, BlockImport, BlockImportParams, BlockOrigin, Error as ConsensusError, - ImportResult, JustificationImport, SelectChain, -}; +use sp_consensus::{BlockOrigin, Error as ConsensusError, SelectChain}; use sp_finality_grandpa::{ConsensusLog, ScheduledChange, SetId, GRANDPA_ENGINE_ID}; use sp_runtime::{ generic::{BlockId, OpaqueDigestItemId}, diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index 1e34202ef8f9..8f8ce25b60a5 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -66,11 +66,12 @@ use sc_client_api::{ BlockchainEvents, CallExecutor, ExecutionStrategy, ExecutorProvider, Finalizer, LockImportRun, TransactionFor, }; +use sc_consensus::BlockImport; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; use sp_api::ProvideRuntimeApi; use sp_application_crypto::AppKey; use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata}; -use sp_consensus::{BlockImport, SelectChain}; +use sp_consensus::SelectChain; use sp_core::crypto::Public; use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::{ diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 526451696b8b..bf9faec70753 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -24,6 +24,10 @@ use environment::HasVoted; use futures::executor::block_on; use futures_timer::Delay; use parking_lot::{Mutex, RwLock}; +use sc_consensus::{ + BlockImport, BlockImportParams, BoxJustificationImport, ForkChoiceStrategy, ImportResult, + ImportedAux, +}; use sc_network::config::{ProtocolConfig, Role}; use sc_network_test::{ Block, BlockImportAdapter, FullPeerConfig, Hash, PassThroughVerifier, Peer, PeersClient, @@ -31,10 +35,7 @@ use sc_network_test::{ }; use sp_api::{ApiRef, ProvideRuntimeApi}; use sp_blockchain::Result; -use sp_consensus::{ - import_queue::BoxJustificationImport, BlockImport, BlockImportParams, BlockOrigin, - ForkChoiceStrategy, ImportResult, ImportedAux, -}; +use sp_consensus::BlockOrigin; use sp_core::H256; use sp_finality_grandpa::{ AuthorityList, EquivocationProof, GrandpaApi, OpaqueKeyOwnershipProof, GRANDPA_ENGINE_ID, diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 7ca98150f9dd..9c6b580fb9c6 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -53,6 +53,7 @@ smallvec = "1.5.0" sp-arithmetic = { version = "4.0.0-dev", path = "../../primitives/arithmetic" } sp-blockchain = { version = "4.0.0-dev", path = "../../primitives/blockchain" } sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../consensus/common" } sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } sp-utils = { version = "4.0.0-dev", path = "../../primitives/utils" } diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 37dfc0cf99c2..73d5ec357b2c 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -36,10 +36,8 @@ use libp2p::{ }; use log::debug; use prost::Message; -use sp_consensus::{ - import_queue::{IncomingBlock, Origin}, - BlockOrigin, -}; +use sc_consensus::import_queue::{IncomingBlock, Origin}; +use sp_consensus::BlockOrigin; use sp_runtime::{ traits::{Block as BlockT, NumberFor}, Justifications, diff --git a/client/network/src/chain.rs b/client/network/src/chain.rs index 599e9d796c11..7c131dd75370 100644 --- a/client/network/src/chain.rs +++ b/client/network/src/chain.rs @@ -19,7 +19,8 @@ //! Blockchain access trait use sc_client_api::{BlockBackend, ProofProvider}; -pub use sc_client_api::{ImportedState, StorageData, StorageKey}; +pub use sc_client_api::{StorageData, StorageKey}; +pub use sc_consensus::ImportedState; use sp_blockchain::{Error, HeaderBackend, HeaderMetadata}; use sp_runtime::traits::{Block as BlockT, BlockIdTo}; diff --git a/client/network/src/config.rs b/client/network/src/config.rs index cddc52352485..2581a08d4246 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -44,7 +44,8 @@ use libp2p::{ multiaddr, wasm_ext, Multiaddr, PeerId, }; use prometheus_endpoint::Registry; -use sp_consensus::{block_validation::BlockAnnounceValidator, import_queue::ImportQueue}; +use sc_consensus::ImportQueue; +use sp_consensus::block_validation::BlockAnnounceValidator; use sp_runtime::traits::Block as BlockT; use std::{ borrow::Cow, diff --git a/client/network/src/gossip/tests.rs b/client/network/src/gossip/tests.rs index bdef28f9bebe..f4f96b863d62 100644 --- a/client/network/src/gossip/tests.rs +++ b/client/network/src/gossip/tests.rs @@ -50,7 +50,7 @@ fn build_test_full_node(network_config: config::NetworkConfiguration) struct PassThroughVerifier(bool); #[async_trait::async_trait] - impl sp_consensus::import_queue::Verifier for PassThroughVerifier { + impl sc_consensus::Verifier for PassThroughVerifier { async fn verify( &mut self, origin: sp_consensus::BlockOrigin, @@ -59,7 +59,7 @@ fn build_test_full_node(network_config: config::NetworkConfiguration) body: Option>, ) -> Result< ( - sp_consensus::BlockImportParams, + sc_consensus::BlockImportParams, Option)>>, ), String, @@ -79,16 +79,16 @@ fn build_test_full_node(network_config: config::NetworkConfiguration) )] }); - let mut import = sp_consensus::BlockImportParams::new(origin, header); + let mut import = sc_consensus::BlockImportParams::new(origin, header); import.body = body; import.finalized = self.0; import.justifications = justifications; - import.fork_choice = Some(sp_consensus::ForkChoiceStrategy::LongestChain); + import.fork_choice = Some(sc_consensus::ForkChoiceStrategy::LongestChain); Ok((import, maybe_keys)) } } - let import_queue = Box::new(sp_consensus::import_queue::BasicQueue::new( + let import_queue = Box::new(sc_consensus::BasicQueue::new( PassThroughVerifier(false), Box::new(client.clone()), None, diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 0838657fae53..2af33cd1c5a1 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -48,12 +48,9 @@ use message::{ use notifications::{Notifications, NotificationsOut}; use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, U64}; use prost::Message as _; +use sc_consensus::import_queue::{BlockImportError, BlockImportStatus, IncomingBlock, Origin}; use sp_arithmetic::traits::SaturatedConversion; -use sp_consensus::{ - block_validation::BlockAnnounceValidator, - import_queue::{BlockImportError, BlockImportResult, IncomingBlock, Origin}, - BlockOrigin, -}; +use sp_consensus::{block_validation::BlockAnnounceValidator, BlockOrigin}; use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, CheckedSub, Header as HeaderT, NumberFor, Zero}, @@ -1048,7 +1045,7 @@ impl Protocol { &mut self, imported: usize, count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)>, + results: Vec<(Result>, BlockImportError>, B::Hash)>, ) { let results = self.sync.on_blocks_processed(imported, count, results); for result in results { diff --git a/client/network/src/protocol/sync.rs b/client/network/src/protocol/sync.rs index 3e49a90e9387..8918d7adde09 100644 --- a/client/network/src/protocol/sync.rs +++ b/client/network/src/protocol/sync.rs @@ -39,11 +39,11 @@ use extra_requests::ExtraRequests; use futures::{stream::FuturesUnordered, task::Poll, Future, FutureExt, StreamExt}; use libp2p::PeerId; use log::{debug, error, info, trace, warn}; +use sc_consensus::{BlockImportError, BlockImportStatus, IncomingBlock}; use sp_arithmetic::traits::Saturating; use sp_blockchain::{Error as ClientError, HeaderMetadata}; use sp_consensus::{ block_validation::{BlockAnnounceValidator, Validation}, - import_queue::{BlockImportError, BlockImportResult, IncomingBlock}, BlockOrigin, BlockStatus, }; use sp_runtime::{ @@ -1240,7 +1240,7 @@ impl ChainSync { &'a mut self, imported: usize, count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)>, + results: Vec<(Result>, BlockImportError>, B::Hash)>, ) -> impl Iterator), BadPeer>> + 'a { trace!(target: "sync", "Imported {} of {}", imported, count); @@ -1260,12 +1260,12 @@ impl ChainSync { } match result { - Ok(BlockImportResult::ImportedKnown(number, who)) => { + Ok(BlockImportStatus::ImportedKnown(number, who)) => { if let Some(peer) = who.and_then(|p| self.peers.get_mut(&p)) { peer.update_common_number(number); } }, - Ok(BlockImportResult::ImportedUnknown(number, aux, who)) => { + Ok(BlockImportStatus::ImportedUnknown(number, aux, who)) => { if aux.clear_justification_requests { trace!( target: "sync", @@ -2454,7 +2454,7 @@ mod test { /// /// The node is connected to multiple peers. Both of these peers are having a best block (1) that /// is below our best block (3). Now peer 2 announces a fork of block 3 that we will - /// request from peer 2. After imporitng the fork, peer 2 and then peer 1 will announce block 4. + /// request from peer 2. After importing the fork, peer 2 and then peer 1 will announce block 4. /// But as peer 1 in our view is still at block 1, we will request block 2 (which we already have) /// from it. In the meanwhile peer 2 sends us block 4 and 3 and we send another request for block /// 2 to peer 2. Peer 1 answers with block 2 and then peer 2. This will need to succeed, as we @@ -2777,7 +2777,7 @@ mod test { .rev() .map(|b| { ( - Ok(BlockImportResult::ImportedUnknown( + Ok(BlockImportStatus::ImportedUnknown( b.header().number().clone(), Default::default(), Some(peer_id1.clone()), diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 89685849f5bf..83cf2d675823 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -68,8 +68,8 @@ use libp2p::{ use log::{debug, error, info, trace, warn}; use metrics::{Histogram, HistogramVec, MetricSources, Metrics}; use parking_lot::Mutex; +use sc_consensus::{BlockImportError, BlockImportStatus, ImportQueue, Link}; use sc_peerset::PeersetHandle; -use sp_consensus::import_queue::{BlockImportError, BlockImportResult, ImportQueue, Link}; use sp_runtime::traits::{Block as BlockT, NumberFor}; use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use std::{ @@ -1265,7 +1265,7 @@ impl<'a, B: BlockT + 'static, H: ExHashT> sp_consensus::SyncOracle for &'a Netwo } } -impl sp_consensus::JustificationSyncLink for NetworkService { +impl sc_consensus::JustificationSyncLink for NetworkService { fn request_justification(&self, hash: &B::Hash, number: NumberFor) { NetworkService::request_justification(self, hash, number); } @@ -2104,7 +2104,7 @@ impl<'a, B: BlockT> Link for NetworkLink<'a, B> { &mut self, imported: usize, count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)>, + results: Vec<(Result>, BlockImportError>, B::Hash)>, ) { self.protocol .behaviour_mut() diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index 7acfeadcae13..a149b09a22dd 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -47,7 +47,7 @@ fn build_test_full_node( struct PassThroughVerifier(bool); #[async_trait::async_trait] - impl sp_consensus::import_queue::Verifier for PassThroughVerifier { + impl sc_consensus::Verifier for PassThroughVerifier { async fn verify( &mut self, origin: sp_consensus::BlockOrigin, @@ -56,7 +56,7 @@ fn build_test_full_node( body: Option>, ) -> Result< ( - sp_consensus::BlockImportParams, + sc_consensus::BlockImportParams, Option)>>, ), String, @@ -75,16 +75,16 @@ fn build_test_full_node( vec![(sp_blockchain::well_known_cache_keys::AUTHORITIES, blob.to_vec())] }); - let mut import = sp_consensus::BlockImportParams::new(origin, header); + let mut import = sc_consensus::BlockImportParams::new(origin, header); import.body = body; import.finalized = self.0; import.justifications = justifications; - import.fork_choice = Some(sp_consensus::ForkChoiceStrategy::LongestChain); + import.fork_choice = Some(sc_consensus::ForkChoiceStrategy::LongestChain); Ok((import, maybe_keys)) } } - let import_queue = Box::new(sp_consensus::import_queue::BasicQueue::new( + let import_queue = Box::new(sc_consensus::BasicQueue::new( PassThroughVerifier(false), Box::new(client.clone()), None, diff --git a/client/network/test/src/block_import.rs b/client/network/test/src/block_import.rs index 4593e06250d3..7b5804e0edb7 100644 --- a/client/network/test/src/block_import.rs +++ b/client/network/test/src/block_import.rs @@ -21,12 +21,11 @@ use super::*; use futures::executor::block_on; use sc_block_builder::BlockBuilderProvider; -use sp_consensus::{ - import_queue::{ - import_single_block, BasicQueue, BlockImportError, BlockImportResult, IncomingBlock, - }, - ImportedAux, +use sc_consensus::{ + import_single_block, BasicQueue, BlockImportError, BlockImportStatus, ImportedAux, + IncomingBlock, }; +use sp_consensus::BlockOrigin; use sp_runtime::generic::BlockId; use substrate_test_runtime_client::{ self, @@ -76,7 +75,7 @@ fn import_single_good_block_works() { block, &mut PassThroughVerifier::new(true), )) { - Ok(BlockImportResult::ImportedUnknown(ref num, ref aux, ref org)) + Ok(BlockImportStatus::ImportedUnknown(ref num, ref aux, ref org)) if *num == number && *aux == expected_aux && *org == Some(peer_id) => {}, r @ _ => panic!("{:?}", r), } @@ -91,7 +90,7 @@ fn import_single_good_known_block_is_ignored() { block, &mut PassThroughVerifier::new(true), )) { - Ok(BlockImportResult::ImportedKnown(ref n, _)) if *n == number => {}, + Ok(BlockImportStatus::ImportedKnown(ref n, _)) if *n == number => {}, _ => panic!(), } } diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 0bdaa0d14e4f..553353d77ac3 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -40,7 +40,10 @@ use sc_client_api::{ BlockBackend, BlockImportNotification, BlockchainEvents, FinalityNotification, FinalityNotifications, ImportNotifications, }; -use sc_consensus::LongestChain; +use sc_consensus::{ + BasicQueue, BlockCheckParams, BlockImport, BlockImportParams, BoxJustificationImport, + ForkChoiceStrategy, ImportResult, JustificationImport, LongestChain, Verifier, +}; pub use sc_network::config::EmptyTransactionPool; use sc_network::{ block_request_handler::{self, BlockRequestHandler}, @@ -58,11 +61,8 @@ use sp_blockchain::{ HeaderBackend, Info as BlockchainInfo, Result as ClientResult, }; use sp_consensus::{ - block_import::{BlockImport, ImportResult}, block_validation::{BlockAnnounceValidator, DefaultBlockAnnounceValidator}, - import_queue::{BasicQueue, BoxJustificationImport, Verifier}, - BlockCheckParams, BlockImportParams, BlockOrigin, Error as ConsensusError, ForkChoiceStrategy, - JustificationImport, + BlockOrigin, Error as ConsensusError, }; use sp_core::H256; use sp_runtime::{ @@ -152,7 +152,7 @@ pub enum PeersClient { impl PeersClient { pub fn as_full(&self) -> Option> { match *self { - PeersClient::Full(ref client, ref _backend) => Some(client.clone()), + PeersClient::Full(ref client, _) => Some(client.clone()), _ => None, } } @@ -163,15 +163,15 @@ impl PeersClient { pub fn get_aux(&self, key: &[u8]) -> ClientResult>> { match *self { - PeersClient::Full(ref client, ref _backend) => client.get_aux(key), - PeersClient::Light(ref client, ref _backend) => client.get_aux(key), + PeersClient::Full(ref client, _) => client.get_aux(key), + PeersClient::Light(ref client, _) => client.get_aux(key), } } pub fn info(&self) -> BlockchainInfo { match *self { - PeersClient::Full(ref client, ref _backend) => client.chain_info(), - PeersClient::Light(ref client, ref _backend) => client.chain_info(), + PeersClient::Full(ref client, _) => client.chain_info(), + PeersClient::Light(ref client, _) => client.chain_info(), } } @@ -180,8 +180,8 @@ impl PeersClient { block: &BlockId, ) -> ClientResult::Header>> { match *self { - PeersClient::Full(ref client, ref _backend) => client.header(block), - PeersClient::Light(ref client, ref _backend) => client.header(block), + PeersClient::Full(ref client, _) => client.header(block), + PeersClient::Light(ref client, _) => client.header(block), } } @@ -200,22 +200,22 @@ impl PeersClient { pub fn justifications(&self, block: &BlockId) -> ClientResult> { match *self { - PeersClient::Full(ref client, ref _backend) => client.justifications(block), - PeersClient::Light(ref client, ref _backend) => client.justifications(block), + PeersClient::Full(ref client, _) => client.justifications(block), + PeersClient::Light(ref client, _) => client.justifications(block), } } pub fn finality_notification_stream(&self) -> FinalityNotifications { match *self { - PeersClient::Full(ref client, ref _backend) => client.finality_notification_stream(), - PeersClient::Light(ref client, ref _backend) => client.finality_notification_stream(), + PeersClient::Full(ref client, _) => client.finality_notification_stream(), + PeersClient::Light(ref client, _) => client.finality_notification_stream(), } } pub fn import_notification_stream(&self) -> ImportNotifications { match *self { - PeersClient::Full(ref client, ref _backend) => client.import_notification_stream(), - PeersClient::Light(ref client, ref _backend) => client.import_notification_stream(), + PeersClient::Full(ref client, _) => client.import_notification_stream(), + PeersClient::Light(ref client, _) => client.import_notification_stream(), } } diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index 67e78c8de8de..04eb8b8b3f78 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -53,6 +53,7 @@ substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/ru tokio = "0.1.22" sc-transaction-pool = { version = "4.0.0-dev", path = "../transaction-pool" } sc-cli = { version = "0.10.0-dev", path = "../cli" } +sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } [features] test-helpers = ["lazy_static"] diff --git a/client/rpc/src/chain/tests.rs b/client/rpc/src/chain/tests.rs index 9bd08a1796ad..bf682a57a341 100644 --- a/client/rpc/src/chain/tests.rs +++ b/client/rpc/src/chain/tests.rs @@ -24,11 +24,11 @@ use futures::{ executor, }; use sc_block_builder::BlockBuilderProvider; +use sp_consensus::BlockOrigin; use sp_rpc::list::ListOrValue; use substrate_test_runtime_client::{ prelude::*, runtime::{Block, Header, H256}, - sp_consensus::BlockOrigin, }; #[test] diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index dd99360bafba..3990d6ea8ad3 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -25,11 +25,12 @@ use futures::{compat::Future01CompatExt, executor}; use futures01::stream::Stream; use sc_block_builder::BlockBuilderProvider; use sc_rpc_api::DenyUnsafe; +use sp_consensus::BlockOrigin; use sp_core::{hash::H256, storage::ChildInfo, ChangesTrieConfiguration}; use sp_io::hashing::blake2_256; use sp_runtime::generic::BlockId; use std::sync::Arc; -use substrate_test_runtime_client::{prelude::*, runtime, sp_consensus::BlockOrigin}; +use substrate_test_runtime_client::{prelude::*, runtime}; const STORAGE_KEY: &[u8] = b"child"; diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 65393647f3ea..17aa41536388 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -54,6 +54,7 @@ sp-session = { version = "4.0.0-dev", path = "../../primitives/session" } sp-state-machine = { version = "0.10.0-dev", path = "../../primitives/state-machine" } sp-application-crypto = { version = "4.0.0-dev", path = "../../primitives/application-crypto" } sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../../client/consensus/common" } sp-inherents = { version = "4.0.0-dev", path = "../../primitives/inherents" } sp-storage = { version = "4.0.0-dev", path = "../../primitives/storage" } sc-network = { version = "0.10.0-dev", path = "../network" } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 2885fb6deb54..1f54850059fb 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -36,6 +36,7 @@ use sc_client_api::{ ForkBlocks, StorageProvider, UsageProvider, }; use sc_client_db::{Backend, DatabaseSettings}; +use sc_consensus::import_queue::ImportQueue; use sc_executor::{NativeExecutionDispatch, NativeExecutor, RuntimeInfo}; use sc_keystore::LocalKeystore; use sc_network::{ @@ -49,9 +50,8 @@ use sc_telemetry::{telemetry, ConnectionMessage, Telemetry, TelemetryHandle, SUB use sc_transaction_pool_api::MaintainedTransactionPool; use sp_api::{CallApiAt, ProvideRuntimeApi}; use sp_blockchain::{HeaderBackend, HeaderMetadata}; -use sp_consensus::{ - block_validation::{BlockAnnounceValidator, Chain, DefaultBlockAnnounceValidator}, - import_queue::ImportQueue, +use sp_consensus::block_validation::{ + BlockAnnounceValidator, Chain, DefaultBlockAnnounceValidator, }; use sp_core::traits::{CodeExecutor, SpawnNamed}; use sp_keystore::{CryptoStore, SyncCryptoStore, SyncCryptoStorePtr}; diff --git a/client/service/src/chain_ops/check_block.rs b/client/service/src/chain_ops/check_block.rs index ab924a3f7d9d..4728e014540e 100644 --- a/client/service/src/chain_ops/check_block.rs +++ b/client/service/src/chain_ops/check_block.rs @@ -20,7 +20,7 @@ use crate::error::Error; use codec::Encode; use futures::{future, prelude::*}; use sc_client_api::{BlockBackend, UsageProvider}; -use sp_consensus::import_queue::ImportQueue; +use sc_consensus::import_queue::ImportQueue; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use crate::chain_ops::import_blocks; diff --git a/client/service/src/chain_ops/import_blocks.rs b/client/service/src/chain_ops/import_blocks.rs index ecf028ffeb3f..396e5b80f280 100644 --- a/client/service/src/chain_ops/import_blocks.rs +++ b/client/service/src/chain_ops/import_blocks.rs @@ -19,21 +19,21 @@ use crate::{error, error::Error}; use codec::{Decode, IoReader as CodecIoReader}; use futures::{future, prelude::*}; +use futures_timer::Delay; use log::{info, warn}; use sc_chain_spec::ChainSpec; -use sp_consensus::{ - import_queue::{BlockImportError, BlockImportResult, ImportQueue, IncomingBlock, Link}, - BlockOrigin, +use sc_client_api::UsageProvider; +use sc_consensus::import_queue::{ + BlockImportError, BlockImportStatus, ImportQueue, IncomingBlock, Link, }; +use serde_json::{de::IoRead as JsonIoRead, Deserializer, StreamDeserializer}; +use sp_consensus::BlockOrigin; use sp_runtime::{ generic::SignedBlock, - traits::{Block as BlockT, Header, MaybeSerializeDeserialize, NumberFor, Zero}, + traits::{ + Block as BlockT, CheckedDiv, Header, MaybeSerializeDeserialize, NumberFor, Saturating, Zero, + }, }; - -use futures_timer::Delay; -use sc_client_api::UsageProvider; -use serde_json::{de::IoRead as JsonIoRead, Deserializer, StreamDeserializer}; -use sp_runtime::traits::{CheckedDiv, Saturating}; use std::{ convert::{TryFrom, TryInto}, io::{Read, Seek}, @@ -316,7 +316,7 @@ where &mut self, imported: usize, _num_expected_blocks: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)>, + results: Vec<(Result>, BlockImportError>, B::Hash)>, ) { self.imported_blocks += imported as u64; diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index a0d294908c5f..553584b15c02 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -45,6 +45,9 @@ use sc_client_api::{ notifications::{StorageEventStream, StorageNotifications}, CallExecutor, ExecutorProvider, KeyIterator, ProofProvider, UsageProvider, }; +use sc_consensus::{ + BlockCheckParams, BlockImportParams, ForkChoiceStrategy, ImportResult, StateAction, +}; use sc_executor::RuntimeVersion; use sc_light::fetcher::ChangesProof; use sc_telemetry::{telemetry, TelemetryHandle, SUBSTRATE_INFO}; @@ -56,10 +59,8 @@ use sp_blockchain::{ self as blockchain, well_known_cache_keys::Id as CacheKeyId, Backend as ChainBackend, Cache, CachedHeaderMetadata, Error, HeaderBackend as ChainHeaderBackend, HeaderMetadata, ProvideCache, }; -use sp_consensus::{ - BlockCheckParams, BlockImportParams, BlockOrigin, BlockStatus, Error as ConsensusError, - ForkChoiceStrategy, ImportResult, StateAction, -}; +use sp_consensus::{BlockOrigin, BlockStatus, Error as ConsensusError}; + use sp_core::{ convert_hash, storage::{well_known_keys, ChildInfo, PrefixedStorageKey, StorageData, StorageKey}, @@ -120,17 +121,18 @@ where _phantom: PhantomData, } -// used in importing a block, where additional changes are made after the runtime -// executed. +/// Used in importing a block, where additional changes are made after the runtime +/// executed. enum PrePostHeader { - // they are the same: no post-runtime digest items. + /// they are the same: no post-runtime digest items. Same(H), - // different headers (pre, post). + /// different headers (pre, post). Different(H, H), } impl PrePostHeader { - // get a reference to the "post-header" -- the header as it should be after all changes are applied. + /// get a reference to the "post-header" -- the header as it should be + /// after all changes are applied. fn post(&self) -> &H { match *self { PrePostHeader::Same(ref h) => h, @@ -138,7 +140,8 @@ impl PrePostHeader { } } - // convert to the "post-header" -- the header as it should be after all changes are applied. + /// convert to the "post-header" -- the header as it should be after + /// all changes are applied. fn into_post(self) -> H { match self { PrePostHeader::Same(h) => h, @@ -149,7 +152,7 @@ impl PrePostHeader { enum PrepareStorageChangesResult, Block: BlockT> { Discard(ImportResult), - Import(Option>>), + Import(Option>>), } /// Create an instance of in-memory client. @@ -577,7 +580,8 @@ where Ok(StorageProof::merge(proofs)) } - /// Generates CHT-based proof for roots of changes tries at given blocks (that are part of single CHT). + /// Generates CHT-based proof for roots of changes tries at given blocks + /// (that are part of single CHT). fn changes_trie_roots_proof_at_cht( &self, cht_size: NumberFor, @@ -603,11 +607,12 @@ where Ok(proof) } - /// Returns changes trie storage and all configurations that have been active in the range [first; last]. + /// Returns changes trie storage and all configurations that have been active + /// in the range [first; last]. /// /// Configurations are returned in descending order (and obviously never overlap). - /// If fail_if_disabled is false, returns maximal consequent configurations ranges, starting from last and - /// stopping on either first, or when CT have been disabled. + /// If fail_if_disabled is false, returns maximal consequent configurations ranges, + /// starting from last and stopping on either first, or when CT have been disabled. /// If fail_if_disabled is true, fails when there's a subrange where CT have been disabled /// inside first..last blocks range. fn require_changes_trie( @@ -656,7 +661,7 @@ where import_block: BlockImportParams>, new_cache: HashMap>, storage_changes: Option< - sp_consensus::StorageChanges>, + sc_consensus::StorageChanges>, >, ) -> sp_blockchain::Result where @@ -749,7 +754,7 @@ where body: Option>, indexed_body: Option>>, storage_changes: Option< - sp_consensus::StorageChanges>, + sc_consensus::StorageChanges>, >, new_cache: HashMap>, finalized: bool, @@ -793,7 +798,7 @@ where let storage_changes = match storage_changes { Some(storage_changes) => { let storage_changes = match storage_changes { - sp_consensus::StorageChanges::Changes(storage_changes) => { + sc_consensus::StorageChanges::Changes(storage_changes) => { self.backend .begin_state_operation(&mut operation.op, BlockId::Hash(parent_hash))?; let (main_sc, child_sc, offchain_sc, tx, _, changes_trie_tx, tx_index) = @@ -813,7 +818,7 @@ where Some((main_sc, child_sc)) }, - sp_consensus::StorageChanges::Import(changes) => { + sc_consensus::StorageChanges::Import(changes) => { let storage = sp_storage::Storage { top: changes.state.into_iter().collect(), children_default: Default::default(), @@ -889,7 +894,8 @@ where operation.op.insert_aux(aux)?; - // we only notify when we are already synced to the tip of the chain or if this import triggers a re-org + // we only notify when we are already synced to the tip of the chain + // or if this import triggers a re-org if make_notifications || tree_route.is_some() { if finalized { operation.notify_finalized.push(hash); @@ -933,7 +939,7 @@ where (_, StateAction::Skip) => (false, None), ( BlockStatus::InChainPruned, - StateAction::ApplyChanges(sp_consensus::StorageChanges::Changes(_)), + StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(_)), ) => return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)), (BlockStatus::InChainPruned, StateAction::Execute) => return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)), @@ -975,7 +981,7 @@ where { return Err(Error::InvalidStateRoot) } - Some(sp_consensus::StorageChanges::Changes(gen_storage_changes)) + Some(sc_consensus::StorageChanges::Changes(gen_storage_changes)) }, // No block body, no storage changes (true, None, None) => None, @@ -1852,7 +1858,7 @@ where /// objects. Otherwise, importing blocks directly into the client would be bypassing /// important verification work. #[async_trait::async_trait] -impl sp_consensus::BlockImport for &Client +impl sc_consensus::BlockImport for &Client where B: backend::Backend, E: CallExecutor + Send + Sync, @@ -1960,7 +1966,7 @@ where } #[async_trait::async_trait] -impl sp_consensus::BlockImport for Client +impl sc_consensus::BlockImport for Client where B: backend::Backend, E: CallExecutor + Send + Sync, diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 5d7c490db6ab..a6cefcd5db62 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -67,6 +67,7 @@ pub use sc_chain_spec::{ Properties, RuntimeGenesis, }; use sc_client_api::{blockchain::HeaderBackend, BlockchainEvents}; +pub use sc_consensus::ImportQueue; pub use sc_executor::NativeExecutionDispatch; #[doc(hidden)] pub use sc_network::config::{OnDemand, TransactionImport, TransactionImportFuture}; @@ -74,7 +75,6 @@ pub use sc_rpc::Metadata as RpcMetadata; pub use sc_tracing::TracingReceiver; pub use sc_transaction_pool::Options as TransactionPoolOptions; pub use sc_transaction_pool_api::{error::IntoPoolError, InPoolTransaction, TransactionPool}; -pub use sp_consensus::import_queue::ImportQueue; #[doc(hidden)] pub use std::{ops::Deref, result::Result, sync::Arc}; pub use task_manager::{SpawnTaskHandle, TaskManager}; diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index e7e627f919c1..d0081b324911 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -31,6 +31,7 @@ futures = { version = "0.3.1", features = ["compat"] } sc-service = { version = "0.10.0-dev", features = ["test-helpers"], path = "../../service" } sc-network = { version = "0.10.0-dev", path = "../../network" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sp-runtime = { version = "4.0.0-dev", path = "../../../primitives/runtime" } sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } sc-transaction-pool-api = { version = "4.0.0-dev", path = "../../../client/transaction-pool/api" } diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index d6a506ab63d7..dd0a33b7e858 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -24,13 +24,13 @@ use sc_client_api::{in_mem, BlockBackend, BlockchainEvents, StorageProvider}; use sc_client_db::{ Backend, DatabaseSettings, DatabaseSettingsSrc, KeepBlocks, PruningMode, TransactionStorageMode, }; +use sc_consensus::{ + BlockCheckParams, BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, +}; use sc_executor::native_executor_instance; use sc_service::client::{self, new_in_mem, Client, LocalCallExecutor}; use sp_api::ProvideRuntimeApi; -use sp_consensus::{ - BlockCheckParams, BlockImport, BlockImportParams, BlockOrigin, BlockStatus, - Error as ConsensusError, ForkChoiceStrategy, ImportResult, SelectChain, -}; +use sp_consensus::{BlockOrigin, BlockStatus, Error as ConsensusError, SelectChain}; use sp_core::{blake2_256, testing::TaskExecutor, ChangesTrieConfiguration, H256}; use sp_runtime::{ generic::BlockId, diff --git a/docs/STYLE_GUIDE.md b/docs/STYLE_GUIDE.md index e6f217f2b485..ea070cdbc59f 100644 --- a/docs/STYLE_GUIDE.md +++ b/docs/STYLE_GUIDE.md @@ -2,6 +2,9 @@ title: Style Guide for Rust in Substrate --- +Where possible these styles are enforced by settings in `rustfmt.toml` so if you run `cargo fmt` +then you will adhere to most of these style guidelines automatically. + # Formatting - Indent using tabs. diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index da9d6adff6f3..4ee5154a6b0f 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -39,7 +39,7 @@ pub use frame_metadata::{ }; pub use sp_runtime::{traits::Dispatchable, DispatchError}; -/// The return typ of a `Dispatchable` in frame. When returned explicitly from +/// The return type of a `Dispatchable` in frame. When returned explicitly from /// a dispatchable function it allows overriding the default `PostDispatchInfo` /// returned from a dispatch. pub type DispatchResultWithPostInfo = diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index 642e7c5b9528..fb0ef5b4d7a7 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -293,7 +293,7 @@ pub enum BlockStatus { /// A list of all well known keys in the blockchain cache. pub mod well_known_cache_keys { /// The type representing cache keys. - pub type Id = sp_consensus::import_queue::CacheKeyId; + pub type Id = sp_consensus::CacheKeyId; /// A list of authorities. pub const AUTHORITIES: Id = *b"auth"; diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index 5a9d1814bd63..ab4f5a24f5c5 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -13,15 +13,14 @@ readme = "README.md" [package.metadata.docs.rs] targets = ["x86_64-unknown-linux-gnu"] - [dependencies] -thiserror = "1.0.21" -libp2p = { version = "0.37.1", default-features = false } +async-trait = "0.1.42" +codec = { package = "parity-scale-codec", version = "2.0.0", features = ["derive"] } +futures = { version = "0.3.1", features = ["thread-pool"] } log = "0.4.8" sp-core = { path= "../../core", version = "4.0.0-dev"} sp-inherents = { version = "4.0.0-dev", path = "../../inherents" } sp-state-machine = { version = "0.10.0-dev", path = "../../state-machine" } -futures = { version = "0.3.1", features = ["thread-pool"] } futures-timer = "3.0.1" sp-std = { version = "4.0.0-dev", path = "../../std" } sp-version = { version = "4.0.0-dev", path = "../../version" } @@ -29,12 +28,11 @@ sp-runtime = { version = "4.0.0-dev", path = "../../runtime" } sp-utils = { version = "4.0.0-dev", path = "../../utils" } sp-trie = { version = "4.0.0-dev", path = "../../trie" } sp-api = { version = "4.0.0-dev", path = "../../api" } -codec = { package = "parity-scale-codec", version = "2.0.0", features = ["derive"] } parking_lot = "0.11.1" serde = { version = "1.0", features = ["derive"] } prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../../utils/prometheus", version = "0.9.0"} wasm-timer = "0.2.5" -async-trait = "0.1.50" +thiserror = "1.0.21" [dev-dependencies] futures = "0.3.9" diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index eb524422a6e2..f6c1e028b945 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -21,14 +21,6 @@ //! change. Implementors of traits should not rely on the interfaces to remain //! the same. -// This provides "unused" building blocks to other crates -#![allow(dead_code)] -// our error-chain could potentially blow up otherwise -#![recursion_limit = "128"] - -#[macro_use] -extern crate log; - use std::{sync::Arc, time::Duration}; use futures::prelude::*; @@ -38,25 +30,19 @@ use sp_runtime::{ }; use sp_state_machine::StorageProof; -pub mod block_import; pub mod block_validation; pub mod error; pub mod evaluation; -pub mod import_queue; -mod metrics; mod select_chain; pub use self::error::Error; -pub use block_import::{ - BlockCheckParams, BlockImport, BlockImportParams, BlockOrigin, ForkChoiceStrategy, - ImportResult, ImportedAux, ImportedState, JustificationImport, JustificationSyncLink, - StateAction, StorageChanges, -}; -pub use import_queue::DefaultImportQueue; pub use select_chain::SelectChain; pub use sp_inherents::InherentData; pub use sp_state_machine::Backend as StateBackend; +/// Type of keys in the blockchain cache that consensus module could use for its needs. +pub type CacheKeyId = [u8; 4]; + /// Block status. #[derive(Debug, PartialEq, Eq)] pub enum BlockStatus { @@ -72,6 +58,23 @@ pub enum BlockStatus { Unknown, } +/// Block data origin. +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum BlockOrigin { + /// Genesis block built into the client. + Genesis, + /// Block is part of the initial sync with the network. + NetworkInitialSync, + /// Block was broadcasted on the network. + NetworkBroadcast, + /// Block that was received from the network and validated in the consensus process. + ConsensusBroadcast, + /// Block that was collated by this node. + Own, + /// Block was imported from a file. + File, +} + /// Environment for a Consensus instance. /// /// Creates proposer instance. diff --git a/test-utils/client/src/client_ext.rs b/test-utils/client/src/client_ext.rs index ef778ca96805..bf1c9898972c 100644 --- a/test-utils/client/src/client_ext.rs +++ b/test-utils/client/src/client_ext.rs @@ -19,10 +19,9 @@ use codec::alloc::collections::hash_map::HashMap; use sc_client_api::{backend::Finalizer, client::BlockBackend}; +use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; use sc_service::client::Client; -use sp_consensus::{ - BlockImport, BlockImportParams, BlockOrigin, Error as ConsensusError, ForkChoiceStrategy, -}; +use sp_consensus::{BlockOrigin, Error as ConsensusError}; use sp_runtime::{generic::BlockId, traits::Block as BlockT, Justification, Justifications}; /// Extension trait for a test client. diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 2a4be6787dd7..cc57f12ea31a 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -52,6 +52,7 @@ serde = { version = "1.0.126", optional = true, features = ["derive"] } [dev-dependencies] sc-block-builder = { version = "0.10.0-dev", path = "../../client/block-builder" } sc-executor = { version = "0.10.0-dev", path = "../../client/executor" } +sp-consensus = { version = "0.10.0-dev", path = "../../primitives/consensus/common" } substrate-test-runtime-client = { version = "2.0.0", path = "./client" } futures = "0.3.9" diff --git a/test-utils/runtime/client/Cargo.toml b/test-utils/runtime/client/Cargo.toml index 24e9f8af2944..9f1dc32a64ff 100644 --- a/test-utils/runtime/client/Cargo.toml +++ b/test-utils/runtime/client/Cargo.toml @@ -14,6 +14,7 @@ targets = ["x86_64-unknown-linux-gnu"] [dependencies] sc-light = { version = "4.0.0-dev", path = "../../../client/light" } sp-consensus = { version = "0.10.0-dev", path = "../../../primitives/consensus/common" } +sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sc-block-builder = { version = "0.10.0-dev", path = "../../../client/block-builder" } substrate-test-client = { version = "2.0.0", path = "../../client" } sp-core = { version = "4.0.0-dev", path = "../../../primitives/core" } @@ -23,6 +24,5 @@ sp-api = { version = "4.0.0-dev", path = "../../../primitives/api" } sp-blockchain = { version = "4.0.0-dev", path = "../../../primitives/blockchain" } codec = { package = "parity-scale-codec", version = "2.0.0" } sc-client-api = { version = "4.0.0-dev", path = "../../../client/api" } -sc-consensus = { version = "0.10.0-dev", path = "../../../client/consensus/common" } sc-service = { version = "0.10.0-dev", default-features = false, path = "../../../client/service" } futures = "0.3.9" diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 8da8f5c5db4e..bdf45ceae88b 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -1244,12 +1244,12 @@ mod tests { use codec::Encode; use sc_block_builder::BlockBuilderProvider; use sp_api::ProvideRuntimeApi; + use sp_consensus::BlockOrigin; use sp_core::storage::well_known_keys::HEAP_PAGES; use sp_runtime::generic::BlockId; use sp_state_machine::ExecutionStrategy; use substrate_test_runtime_client::{ - prelude::*, runtime::TestAPI, sp_consensus::BlockOrigin, DefaultTestClientBuilderExt, - TestClientBuilder, + prelude::*, runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, }; #[test] diff --git a/test-utils/test-runner/src/lib.rs b/test-utils/test-runner/src/lib.rs index c73ead9eb59a..9f0a8d5d6cb6 100644 --- a/test-utils/test-runner/src/lib.rs +++ b/test-utils/test-runner/src/lib.rs @@ -227,10 +227,11 @@ //! } //! ``` +use sc_consensus::BlockImport; use sc_executor::NativeExecutionDispatch; use sc_service::TFullClient; use sp_api::{ConstructRuntimeApi, TransactionFor}; -use sp_consensus::{BlockImport, SelectChain}; +use sp_consensus::SelectChain; use sp_inherents::InherentDataProvider; use sp_runtime::traits::{Block as BlockT, SignedExtension}; From d108b65257c552d5ff827bba6019a47cf2a7f6dd Mon Sep 17 00:00:00 2001 From: Shawn Tabrizi Date: Sun, 1 Aug 2021 20:13:58 +0200 Subject: [PATCH 5/8] Refactor Benchmarks for Less Wasm Memory Usage (#9373) * extract repeat out of benchmark * remove r * unused * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_balances --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/balances/src/weights.rs --template=./.maintain/frame-weight-template.hbs * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_balances --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/balances/src/weights.rs --template=./.maintain/frame-weight-template.hbs * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_staking --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/staking/src/weights.rs --template=./.maintain/frame-weight-template.hbs * use linked map to keep order * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_balances --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/balances/src/weights.rs --template=./.maintain/frame-weight-template.hbs * Delete pallet_balances.rs * Delete out * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_staking --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/staking/src/weights.rs --template=./.maintain/frame-weight-template.hbs * steps and repeat to tuple (current_*, total_*) * idea for list command * fmt * use benchmark list in cli * handle steps in cli * move log update to cli * fmt * remove old todo * line width * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_balances --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/balances/src/weights.rs --template=./.maintain/frame-weight-template.hbs * benchmark metadata function * don't need this warm up * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_balances --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/balances/src/weights.rs --template=./.maintain/frame-weight-template.hbs * fix warnings * fix node-template * fix * fmt * line width * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_staking --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/staking/src/weights.rs --template=./.maintain/frame-weight-template.hbs * improve docs * improve cli * fix format * fix bug? * Revert "fix bug?" This reverts commit 8051bf1bf9bae862ff28dfff386e7045cd3f045e. * skip frame-metadata * extract repeat out of benchmark * remove r * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_balances --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/balances/src/weights.rs --template=./.maintain/frame-weight-template.hbs * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_balances --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/balances/src/weights.rs --template=./.maintain/frame-weight-template.hbs * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_staking --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/staking/src/weights.rs --template=./.maintain/frame-weight-template.hbs * use linked map to keep order * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_balances --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/balances/src/weights.rs --template=./.maintain/frame-weight-template.hbs * Delete pallet_balances.rs * Delete out * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_staking --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/staking/src/weights.rs --template=./.maintain/frame-weight-template.hbs * steps and repeat to tuple (current_*, total_*) * idea for list command * fmt * use benchmark list in cli * handle steps in cli * move log update to cli * remove old todo * line width * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_balances --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/balances/src/weights.rs --template=./.maintain/frame-weight-template.hbs * benchmark metadata function * don't need this warm up * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_balances --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/balances/src/weights.rs --template=./.maintain/frame-weight-template.hbs * fix warnings * fix node-template * fix * fmt * line width * cargo run --quiet --release --features=runtime-benchmarks --manifest-path=bin/node/cli/Cargo.toml -- benchmark --chain=dev --steps=50 --repeat=20 --pallet=pallet_staking --extrinsic=* --execution=wasm --wasm-execution=compiled --heap-pages=4096 --output=./frame/staking/src/weights.rs --template=./.maintain/frame-weight-template.hbs * improve docs * improve cli * fix format * fix bug? * Revert "fix bug?" This reverts commit 8051bf1bf9bae862ff28dfff386e7045cd3f045e. * skip frame-metadata * Update .gitlab-ci.yml * fix import * Update .gitlab-ci.yml Co-authored-by: Parity Benchmarking Bot --- .gitlab-ci.yml | 7 +- Cargo.lock | 2 + bin/node-template/runtime/src/lib.rs | 30 +- bin/node/runtime/src/lib.rs | 63 ++- frame/balances/src/weights.rs | 40 +- frame/benchmarking/src/lib.rs | 373 +++++++------- frame/benchmarking/src/utils.rs | 43 +- frame/staking/src/weights.rs | 524 +++++++++++++++----- utils/frame/benchmarking-cli/Cargo.toml | 2 + utils/frame/benchmarking-cli/src/command.rs | 376 +++++++++----- utils/frame/benchmarking-cli/src/lib.rs | 20 +- utils/frame/benchmarking-cli/src/writer.rs | 2 +- 12 files changed, 1001 insertions(+), 481 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 2cef2d8badcc..f954ac23cba2 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -273,7 +273,7 @@ node-bench-regression-guard: CI_IMAGE: "paritytech/node-bench-regression-guard:latest" before_script: [""] script: - - 'node-bench-regression-guard --reference artifacts/benches/master-* + - 'node-bench-regression-guard --reference artifacts/benches/master-* --compare-with artifacts/benches/$CI_COMMIT_REF_NAME-$CI_COMMIT_SHORT_SHA' cargo-check-subkey: @@ -343,6 +343,7 @@ unleash-check: - mkdir -p target/unleash - export CARGO_TARGET_DIR=target/unleash - cargo unleash check ${CARGO_UNLEASH_PKG_DEF} + allow_failure: true test-frame-examples-compile-to-wasm: # into one job @@ -578,7 +579,7 @@ build-rust-doc: - buildah push --format=v2s2 "$IMAGE_NAME:latest" after_script: - buildah logout "$IMAGE_NAME" - # pass artifacts to the trigger-simnet job + # pass artifacts to the trigger-simnet job - echo "IMAGE_NAME=${IMAGE_NAME}" | tee -a ./artifacts/$PRODUCT/build.env - IMAGE_TAG="$(cat ./artifacts/$PRODUCT/VERSION)" - echo "IMAGE_TAG=${IMAGE_TAG}" | tee -a ./artifacts/$PRODUCT/build.env @@ -713,7 +714,7 @@ trigger-simnet: - if: $CI_COMMIT_REF_NAME == "master" needs: - job: publish-docker-substrate - # `build.env` brings here `$IMAGE_NAME` and `$IMAGE_TAG` (`$VERSION` here, + # `build.env` brings here `$IMAGE_NAME` and `$IMAGE_TAG` (`$VERSION` here, # i.e. `2643-0.8.29-5f689e0a-6b24dc54`). variables: TRGR_PROJECT: ${CI_PROJECT_NAME} diff --git a/Cargo.lock b/Cargo.lock index 6cfc2d19db81..3d22e0c0b6bd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1800,6 +1800,8 @@ dependencies = [ "frame-benchmarking", "frame-support", "handlebars", + "linked-hash-map", + "log", "parity-scale-codec", "sc-cli", "sc-client-db", diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index f9eaa96153eb..63da72102df3 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -446,14 +446,30 @@ impl_runtime_apis! { #[cfg(feature = "runtime-benchmarks")] impl frame_benchmarking::Benchmark for Runtime { + fn benchmark_metadata(extra: bool) -> ( + Vec, + Vec, + ) { + use frame_benchmarking::{list_benchmark, Benchmarking, BenchmarkList}; + use frame_support::traits::StorageInfoTrait; + use frame_system_benchmarking::Pallet as SystemBench; + + let mut list = Vec::::new(); + + list_benchmark!(list, extra, frame_system, SystemBench::); + list_benchmark!(list, extra, pallet_balances, Balances); + list_benchmark!(list, extra, pallet_timestamp, Timestamp); + list_benchmark!(list, extra, pallet_template, TemplateModule); + + let storage_info = AllPalletsWithSystem::storage_info(); + + return (list, storage_info) + } + fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig - ) -> Result< - (Vec, Vec), - sp_runtime::RuntimeString, - > { + ) -> Result, sp_runtime::RuntimeString> { use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey}; - use frame_support::traits::StorageInfoTrait; use frame_system_benchmarking::Pallet as SystemBench; impl frame_system_benchmarking::Config for Runtime {} @@ -471,8 +487,6 @@ impl_runtime_apis! { hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef780d41e5e16056765bc8461851072c9d7").to_vec().into(), ]; - let storage_info = AllPalletsWithSystem::storage_info(); - let mut batches = Vec::::new(); let params = (&config, &whitelist); @@ -482,7 +496,7 @@ impl_runtime_apis! { add_benchmark!(params, batches, pallet_template, TemplateModule); if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } - Ok((batches, storage_info)) + Ok(batches) } } } diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 90bd11d484b2..181f5fd42376 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1534,14 +1534,63 @@ impl_runtime_apis! { #[cfg(feature = "runtime-benchmarks")] impl frame_benchmarking::Benchmark for Runtime { + fn benchmark_metadata(extra: bool) -> ( + Vec, + Vec, + ) { + use frame_benchmarking::{list_benchmark, Benchmarking, BenchmarkList}; + use frame_support::traits::StorageInfoTrait; + + // Trying to add benchmarks directly to the Session Pallet caused cyclic dependency + // issues. To get around that, we separated the Session benchmarks into its own crate, + // which is why we need these two lines below. + use pallet_session_benchmarking::Pallet as SessionBench; + use pallet_offences_benchmarking::Pallet as OffencesBench; + use frame_system_benchmarking::Pallet as SystemBench; + + let mut list = Vec::::new(); + + list_benchmark!(list, extra, pallet_assets, Assets); + list_benchmark!(list, extra, pallet_babe, Babe); + list_benchmark!(list, extra, pallet_balances, Balances); + list_benchmark!(list, extra, pallet_bounties, Bounties); + list_benchmark!(list, extra, pallet_collective, Council); + list_benchmark!(list, extra, pallet_contracts, Contracts); + list_benchmark!(list, extra, pallet_democracy, Democracy); + list_benchmark!(list, extra, pallet_election_provider_multi_phase, ElectionProviderMultiPhase); + list_benchmark!(list, extra, pallet_elections_phragmen, Elections); + list_benchmark!(list, extra, pallet_gilt, Gilt); + list_benchmark!(list, extra, pallet_grandpa, Grandpa); + list_benchmark!(list, extra, pallet_identity, Identity); + list_benchmark!(list, extra, pallet_im_online, ImOnline); + list_benchmark!(list, extra, pallet_indices, Indices); + list_benchmark!(list, extra, pallet_lottery, Lottery); + list_benchmark!(list, extra, pallet_membership, TechnicalMembership); + list_benchmark!(list, extra, pallet_mmr, Mmr); + list_benchmark!(list, extra, pallet_multisig, Multisig); + list_benchmark!(list, extra, pallet_offences, OffencesBench::); + list_benchmark!(list, extra, pallet_proxy, Proxy); + list_benchmark!(list, extra, pallet_scheduler, Scheduler); + list_benchmark!(list, extra, pallet_session, SessionBench::); + list_benchmark!(list, extra, pallet_staking, Staking); + list_benchmark!(list, extra, frame_system, SystemBench::); + list_benchmark!(list, extra, pallet_timestamp, Timestamp); + list_benchmark!(list, extra, pallet_tips, Tips); + list_benchmark!(list, extra, pallet_transaction_storage, TransactionStorage); + list_benchmark!(list, extra, pallet_treasury, Treasury); + list_benchmark!(list, extra, pallet_uniques, Uniques); + list_benchmark!(list, extra, pallet_utility, Utility); + list_benchmark!(list, extra, pallet_vesting, Vesting); + + let storage_info = AllPalletsWithSystem::storage_info(); + + return (list, storage_info) + } + fn dispatch_benchmark( config: frame_benchmarking::BenchmarkConfig - ) -> Result< - (Vec, Vec), - sp_runtime::RuntimeString, - > { + ) -> Result, sp_runtime::RuntimeString> { use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark, TrackedStorageKey}; - use frame_support::traits::StorageInfoTrait; // Trying to add benchmarks directly to the Session Pallet caused cyclic dependency // issues. To get around that, we separated the Session benchmarks into its own crate, @@ -1569,8 +1618,6 @@ impl_runtime_apis! { hex_literal::hex!("26aa394eea5630e07c48ae0c9558cef7b99d880ec681799c0cf30e8886371da95ecffd7b6c0f78751baa9d281e0bfa3a6d6f646c70792f74727372790000000000000000000000000000000000000000").to_vec().into(), ]; - let storage_info = AllPalletsWithSystem::storage_info(); - let mut batches = Vec::::new(); let params = (&config, &whitelist); @@ -1607,7 +1654,7 @@ impl_runtime_apis! { add_benchmark!(params, batches, pallet_vesting, Vesting); if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } - Ok((batches, storage_info)) + Ok(batches) } } } diff --git a/frame/balances/src/weights.rs b/frame/balances/src/weights.rs index d1e86ce45e4b..df609b74840d 100644 --- a/frame/balances/src/weights.rs +++ b/frame/balances/src/weights.rs @@ -17,8 +17,8 @@ //! Autogenerated weights for pallet_balances //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-07-30, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -56,33 +56,39 @@ pub trait WeightInfo { /// Weights for pallet_balances using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { + // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - (73_268_000 as Weight) + (78_358_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } + // Storage: System Account (r:1 w:1) fn transfer_keep_alive() -> Weight { - (54_881_000 as Weight) + (59_001_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } + // Storage: System Account (r:1 w:1) fn set_balance_creating() -> Weight { - (29_853_000 as Weight) + (32_698_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } + // Storage: System Account (r:1 w:1) fn set_balance_killing() -> Weight { - (36_007_000 as Weight) + (38_746_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } + // Storage: System Account (r:2 w:2) fn force_transfer() -> Weight { - (72_541_000 as Weight) + (77_622_000 as Weight) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } + // Storage: System Account (r:1 w:1) fn transfer_all() -> Weight { - (67_360_000 as Weight) + (72_020_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } @@ -90,33 +96,39 @@ impl WeightInfo for SubstrateWeight { // For backwards compatibility and tests impl WeightInfo for () { + // Storage: System Account (r:1 w:1) fn transfer() -> Weight { - (73_268_000 as Weight) + (78_358_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } + // Storage: System Account (r:1 w:1) fn transfer_keep_alive() -> Weight { - (54_881_000 as Weight) + (59_001_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } + // Storage: System Account (r:1 w:1) fn set_balance_creating() -> Weight { - (29_853_000 as Weight) + (32_698_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } + // Storage: System Account (r:1 w:1) fn set_balance_killing() -> Weight { - (36_007_000 as Weight) + (38_746_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } + // Storage: System Account (r:2 w:2) fn force_transfer() -> Weight { - (72_541_000 as Weight) + (77_622_000 as Weight) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } + // Storage: System Account (r:1 w:1) fn transfer_all() -> Weight { - (67_360_000 as Weight) + (72_020_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index a0aa78f722f7..7149ddc82f59 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -711,8 +711,8 @@ macro_rules! impl_benchmark { extrinsic: &[u8], lowest_range_values: &[u32], highest_range_values: &[u32], - steps: &[u32], - repeat: u32, + steps: (u32, u32), + _repeat: (u32, u32), whitelist: &[$crate::TrackedStorageKey], verify: bool, ) -> Result<$crate::Vec<$crate::BenchmarkResults>, &'static str> { @@ -724,9 +724,6 @@ macro_rules! impl_benchmark { _ => return Err("Could not find extrinsic."), }; let mut results: $crate::Vec<$crate::BenchmarkResults> = $crate::Vec::new(); - if repeat == 0 { - return Ok(results); - } // Add whitelist to DB including whitelisted caller let mut whitelist = whitelist.to_vec(); @@ -737,141 +734,110 @@ macro_rules! impl_benchmark { whitelist.push(whitelisted_caller_key.into()); $crate::benchmarking::set_whitelist(whitelist); - // Warm up the DB - $crate::benchmarking::commit_db(); - $crate::benchmarking::wipe_db(); - let components = < SelectedBenchmark as $crate::BenchmarkingSetup >::components(&selected_benchmark); - let mut progress = $crate::benchmarking::current_time(); - // Default number of steps for a component. - let mut prev_steps = 10; - - let mut repeat_benchmark = | - repeat: u32, + let do_benchmark = | c: &[($crate::BenchmarkParameter, u32)], results: &mut $crate::Vec<$crate::BenchmarkResults>, verify: bool, - step: u32, - num_steps: u32, | -> Result<(), &'static str> { - // Run the benchmark `repeat` times. - for r in 0..repeat { - // Set up the externalities environment for the setup we want to - // benchmark. - let closure_to_benchmark = < - SelectedBenchmark as $crate::BenchmarkingSetup - >::instance(&selected_benchmark, c, verify)?; - - // Set the block number to at least 1 so events are deposited. - if $crate::Zero::is_zero(&frame_system::Pallet::::block_number()) { - frame_system::Pallet::::set_block_number(1u32.into()); - } + // Set up the externalities environment for the setup we want to + // benchmark. + let closure_to_benchmark = < + SelectedBenchmark as $crate::BenchmarkingSetup + >::instance(&selected_benchmark, c, verify)?; - // Commit the externalities to the database, flushing the DB cache. - // This will enable worst case scenario for reading from the database. - $crate::benchmarking::commit_db(); + // Set the block number to at least 1 so events are deposited. + if $crate::Zero::is_zero(&frame_system::Pallet::::block_number()) { + frame_system::Pallet::::set_block_number(1u32.into()); + } - // Reset the read/write counter so we don't count operations in the setup process. - $crate::benchmarking::reset_read_write_count(); + // Commit the externalities to the database, flushing the DB cache. + // This will enable worst case scenario for reading from the database. + $crate::benchmarking::commit_db(); - if verify { - closure_to_benchmark()?; - } else { - // Time the extrinsic logic. - $crate::log::trace!( - target: "benchmark", - "Start Benchmark: {:?}", c - ); - - let start_pov = $crate::benchmarking::proof_size(); - let start_extrinsic = $crate::benchmarking::current_time(); - - closure_to_benchmark()?; - - let finish_extrinsic = $crate::benchmarking::current_time(); - let end_pov = $crate::benchmarking::proof_size(); - - // Calculate the diff caused by the benchmark. - let elapsed_extrinsic = finish_extrinsic.saturating_sub(start_extrinsic); - let diff_pov = match (start_pov, end_pov) { - (Some(start), Some(end)) => end.saturating_sub(start), - _ => Default::default(), - }; - - // Commit the changes to get proper write count - $crate::benchmarking::commit_db(); - $crate::log::trace!( - target: "benchmark", - "End Benchmark: {} ns", elapsed_extrinsic - ); - let read_write_count = $crate::benchmarking::read_write_count(); - $crate::log::trace!( - target: "benchmark", - "Read/Write Count {:?}", read_write_count - ); - - let time = $crate::benchmarking::current_time(); - if time.saturating_sub(progress) > 5000000000 { - progress = $crate::benchmarking::current_time(); - $crate::log::info!( - target: "benchmark", - "Benchmarking {} {}/{}, run {}/{}", - extrinsic, - step, - num_steps, - r, - repeat, - ); - } - - // Time the storage root recalculation. - let start_storage_root = $crate::benchmarking::current_time(); - $crate::storage_root(); - let finish_storage_root = $crate::benchmarking::current_time(); - let elapsed_storage_root = finish_storage_root - start_storage_root; - - // TODO: Fix memory allocation issue then re-enable - // let read_and_written_keys = $crate::benchmarking::get_read_and_written_keys(); - let read_and_written_keys = Default::default(); - - results.push($crate::BenchmarkResults { - components: c.to_vec(), - extrinsic_time: elapsed_extrinsic, - storage_root_time: elapsed_storage_root, - reads: read_write_count.0, - repeat_reads: read_write_count.1, - writes: read_write_count.2, - repeat_writes: read_write_count.3, - proof_size: diff_pov, - keys: read_and_written_keys, - }); - } + // Reset the read/write counter so we don't count operations in the setup process. + $crate::benchmarking::reset_read_write_count(); - // Wipe the DB back to the genesis state. - $crate::benchmarking::wipe_db(); + if verify { + closure_to_benchmark()?; + } else { + // Time the extrinsic logic. + $crate::log::trace!( + target: "benchmark", + "Start Benchmark: {:?}", c + ); + + let start_pov = $crate::benchmarking::proof_size(); + let start_extrinsic = $crate::benchmarking::current_time(); + + closure_to_benchmark()?; + + let finish_extrinsic = $crate::benchmarking::current_time(); + let end_pov = $crate::benchmarking::proof_size(); + + // Calculate the diff caused by the benchmark. + let elapsed_extrinsic = finish_extrinsic.saturating_sub(start_extrinsic); + let diff_pov = match (start_pov, end_pov) { + (Some(start), Some(end)) => end.saturating_sub(start), + _ => Default::default(), + }; + + // Commit the changes to get proper write count + $crate::benchmarking::commit_db(); + $crate::log::trace!( + target: "benchmark", + "End Benchmark: {} ns", elapsed_extrinsic + ); + let read_write_count = $crate::benchmarking::read_write_count(); + $crate::log::trace!( + target: "benchmark", + "Read/Write Count {:?}", read_write_count + ); + + // Time the storage root recalculation. + let start_storage_root = $crate::benchmarking::current_time(); + $crate::storage_root(); + let finish_storage_root = $crate::benchmarking::current_time(); + let elapsed_storage_root = finish_storage_root - start_storage_root; + + let read_and_written_keys = $crate::benchmarking::get_read_and_written_keys(); + + results.push($crate::BenchmarkResults { + components: c.to_vec(), + extrinsic_time: elapsed_extrinsic, + storage_root_time: elapsed_storage_root, + reads: read_write_count.0, + repeat_reads: read_write_count.1, + writes: read_write_count.2, + repeat_writes: read_write_count.3, + proof_size: diff_pov, + keys: read_and_written_keys, + }); } + // Wipe the DB back to the genesis state. + $crate::benchmarking::wipe_db(); + Ok(()) }; + let (current_step, total_steps) = steps; + if components.is_empty() { - if verify { - // If `--verify` is used, run the benchmark once to verify it would complete. - repeat_benchmark(1, Default::default(), &mut $crate::Vec::new(), true, 1, 1)?; + // The CLI could ask to do more steps than is sensible, so we skip those. + if current_step == 0 { + if verify { + // If `--verify` is used, run the benchmark once to verify it would complete. + do_benchmark(Default::default(), &mut $crate::Vec::new(), true)?; + } + do_benchmark(Default::default(), &mut results, false)?; } - repeat_benchmark(repeat, Default::default(), &mut results, false, 1, 1)?; } else { // Select the component we will be benchmarking. Each component will be benchmarked. for (idx, (name, low, high)) in components.iter().enumerate() { - // Get the number of steps for this component. - let steps = steps.get(idx).cloned().unwrap_or(prev_steps); - prev_steps = steps; - - // Skip this loop if steps is zero - if steps == 0 { continue } let lowest = lowest_range_values.get(idx).cloned().unwrap_or(*low); let highest = highest_range_values.get(idx).cloned().unwrap_or(*high); @@ -879,31 +845,34 @@ macro_rules! impl_benchmark { let diff = highest - lowest; // Create up to `STEPS` steps for that component between high and low. - let step_size = (diff / steps).max(1); + let step_size = (diff / total_steps).max(1); let num_of_steps = diff / step_size + 1; - for s in 0..num_of_steps { - // This is the value we will be testing for component `name` - let component_value = lowest + step_size * s; + // The CLI could ask to do more steps than is sensible, so we just skip those. + if current_step >= num_of_steps { + continue; + } - // Select the max value for all the other components. - let c: $crate::Vec<($crate::BenchmarkParameter, u32)> = components.iter() - .enumerate() - .map(|(idx, (n, _, h))| - if n == name { - (*n, component_value) - } else { - (*n, *highest_range_values.get(idx).unwrap_or(h)) - } - ) - .collect(); + // This is the value we will be testing for component `name` + let component_value = lowest + step_size * current_step; + + // Select the max value for all the other components. + let c: $crate::Vec<($crate::BenchmarkParameter, u32)> = components.iter() + .enumerate() + .map(|(idx, (n, _, h))| + if n == name { + (*n, component_value) + } else { + (*n, *highest_range_values.get(idx).unwrap_or(h)) + } + ) + .collect(); - if verify { - // If `--verify` is used, run the benchmark once to verify it would complete. - repeat_benchmark(1, &c, &mut $crate::Vec::new(), true, s, num_of_steps)?; - } - repeat_benchmark(repeat, &c, &mut results, false, s, num_of_steps)?; + if verify { + // If `--verify` is used, run the benchmark once to verify it would complete. + do_benchmark(&c, &mut $crate::Vec::new(), true)?; } + do_benchmark(&c, &mut results, false)?; } } return Ok(results); @@ -1253,8 +1222,8 @@ pub fn show_benchmark_debug_info( benchmark: &[u8], lowest_range_values: &sp_std::prelude::Vec, highest_range_values: &sp_std::prelude::Vec, - steps: &sp_std::prelude::Vec, - repeat: &u32, + steps: &(u32, u32), + repeat: &(u32, u32), verify: &bool, error_message: &str, ) -> sp_runtime::RuntimeString { @@ -1273,8 +1242,8 @@ pub fn show_benchmark_debug_info( .expect("it's all just strings ran through the wasm interface. qed"), lowest_range_values, highest_range_values, - steps, - repeat, + steps.1, + repeat.1, verify, error_message, ) @@ -1359,62 +1328,70 @@ macro_rules! add_benchmark { verify, extra, } = config; - if &pallet[..] == &name_string[..] || &pallet[..] == &b"*"[..] { - if &pallet[..] == &b"*"[..] || &benchmark[..] == &b"*"[..] { - for benchmark in $( $location )*::benchmarks(*extra).into_iter() { - $batches.push($crate::BenchmarkBatch { - pallet: name_string.to_vec(), - instance: instance_string.to_vec(), - benchmark: benchmark.to_vec(), - results: $( $location )*::run_benchmark( - benchmark, - &lowest_range_values[..], - &highest_range_values[..], - &steps[..], - *repeat, - whitelist, - *verify, - ).map_err(|e| { - $crate::show_benchmark_debug_info( - instance_string, - benchmark, - lowest_range_values, - highest_range_values, - steps, - repeat, - verify, - e, - ) - })?, - }); - } - } else { - $batches.push($crate::BenchmarkBatch { - pallet: name_string.to_vec(), - instance: instance_string.to_vec(), - benchmark: benchmark.clone(), - results: $( $location )*::run_benchmark( - &benchmark[..], - &lowest_range_values[..], - &highest_range_values[..], - &steps[..], - *repeat, - whitelist, - *verify, - ).map_err(|e| { - $crate::show_benchmark_debug_info( - instance_string, - benchmark, - lowest_range_values, - highest_range_values, - steps, - repeat, - verify, - e, - ) - })?, - }); - } + if &pallet[..] == &name_string[..] { + $batches.push($crate::BenchmarkBatch { + pallet: name_string.to_vec(), + instance: instance_string.to_vec(), + benchmark: benchmark.clone(), + results: $( $location )*::run_benchmark( + &benchmark[..], + &lowest_range_values[..], + &highest_range_values[..], + *steps, + *repeat, + whitelist, + *verify, + ).map_err(|e| { + $crate::show_benchmark_debug_info( + instance_string, + benchmark, + lowest_range_values, + highest_range_values, + steps, + repeat, + verify, + e, + ) + })? + }); } ) } + +/// This macro allows users to easily generate a list of benchmarks for the pallets configured +/// in the runtime. +/// +/// To use this macro, first create a an object to store the list: +/// +/// ```ignore +/// let mut list = Vec::::new(); +/// ``` +/// +/// Then pass this `list` to the macro, along with the `extra` boolean, the pallet crate, and +/// pallet struct: +/// +/// ```ignore +/// list_benchmark!(list, extra, pallet_balances, Balances); +/// list_benchmark!(list, extra, pallet_session, SessionBench::); +/// list_benchmark!(list, extra, frame_system, SystemBench::); +/// ``` +/// +/// This should match what exists with the `add_benchmark!` macro. + +#[macro_export] +macro_rules! list_benchmark { + ( $list:ident, $extra:ident, $name:path, $( $location:tt )* ) => ( + let pallet_string = stringify!($name).as_bytes(); + let instance_string = stringify!( $( $location )* ).as_bytes(); + let benchmarks = $( $location )*::benchmarks($extra) + .iter() + .map(|b| b.to_vec()) + .collect::>(); + let pallet_benchmarks = BenchmarkList { + pallet: pallet_string.to_vec(), + instance: instance_string.to_vec(), + benchmarks: benchmarks.to_vec(), + }; + $list.push(pallet_benchmarks) + ) +} diff --git a/frame/benchmarking/src/utils.rs b/frame/benchmarking/src/utils.rs index 33d479a0b54a..82c6e44796fa 100644 --- a/frame/benchmarking/src/utils.rs +++ b/frame/benchmarking/src/utils.rs @@ -103,22 +103,41 @@ pub struct BenchmarkConfig { pub lowest_range_values: Vec, /// An optional manual override to the highest values used in the `steps` range. pub highest_range_values: Vec, - /// The number of samples to take across the range of values for components. - pub steps: Vec, - /// The number of times to repeat a benchmark. - pub repeat: u32, + /// The number of samples to take across the range of values for components. (current_step, + /// total_steps) + pub steps: (u32, u32), + /// The number times to repeat each benchmark to increase accuracy of results. (current_repeat, + /// total_repeat) + pub repeat: (u32, u32), /// Enable an extra benchmark iteration which runs the verification logic for a benchmark. pub verify: bool, - /// Enable benchmarking of "extra" extrinsics, i.e. those that are not directly used in a pallet. + /// Enable benchmarking of "extra" extrinsics, i.e. those that are not directly used in a + /// pallet. pub extra: bool, } +/// A list of benchmarks available for a particular pallet and instance. +/// +/// All `Vec` must be valid utf8 strings. +#[derive(Encode, Decode, Default, Clone, PartialEq, Debug)] +pub struct BenchmarkList { + pub pallet: Vec, + pub instance: Vec, + pub benchmarks: Vec>, +} + sp_api::decl_runtime_apis! { /// Runtime api for benchmarking a FRAME runtime. pub trait Benchmark { + /// Get the benchmark metadata available for this runtime. + /// + /// Parameters + /// - `extra`: Also list benchmarks marked "extra" which would otherwise not be + /// needed for weight calculation. + fn benchmark_metadata(extra: bool) -> (Vec, Vec); + /// Dispatch the given benchmark. - fn dispatch_benchmark(config: BenchmarkConfig) - -> Result<(Vec, Vec), sp_runtime::RuntimeString>; + fn dispatch_benchmark(config: BenchmarkConfig) -> Result, sp_runtime::RuntimeString>; } } @@ -216,16 +235,18 @@ pub trait Benchmarking { /// Parameters /// - `name`: The name of extrinsic function or benchmark you want to benchmark encoded as /// bytes. - /// - `steps`: The number of sample points you want to take across the range of parameters. /// - `lowest_range_values`: The lowest number for each range of parameters. /// - `highest_range_values`: The highest number for each range of parameters. - /// - `repeat`: The number of times you want to repeat a benchmark. + /// - `steps`: The number of sample points you want to take across the range of parameters. + /// (current_step, total_steps) + /// - `repeat`: The total number times to repeat each benchmark to increase accuracy of results. + /// (current_repeat, total_repeats) fn run_benchmark( name: &[u8], lowest_range_values: &[u32], highest_range_values: &[u32], - steps: &[u32], - repeat: u32, + steps: (u32, u32), + repeat: (u32, u32), whitelist: &[TrackedStorageKey], verify: bool, ) -> Result, &'static str>; diff --git a/frame/staking/src/weights.rs b/frame/staking/src/weights.rs index cba4e68b5f61..fb4ed160d832 100644 --- a/frame/staking/src/weights.rs +++ b/frame/staking/src/weights.rs @@ -17,8 +17,8 @@ //! Autogenerated weights for pallet_staking //! -//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 3.0.0 -//! DATE: 2021-06-19, STEPS: `[50, ]`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` +//! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION 4.0.0-dev +//! DATE: 2021-07-31, STEPS: `50`, REPEAT: 20, LOW RANGE: `[]`, HIGH RANGE: `[]` //! EXECUTION: Some(Wasm), WASM-EXECUTION: Compiled, CHAIN: Some("dev"), DB CACHE: 128 // Executed Command: @@ -78,376 +78,664 @@ pub trait WeightInfo { /// Weights for pallet_staking using the Substrate node and recommended hardware. pub struct SubstrateWeight(PhantomData); impl WeightInfo for SubstrateWeight { + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Balances Locks (r:1 w:1) + // Storage: Staking Bonded (r:1 w:1) + // Storage: Staking HistoryDepth (r:1 w:0) + // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking Payee (r:0 w:1) fn bond() -> Weight { - (72_617_000 as Weight) + (77_492_000 as Weight) .saturating_add(T::DbWeight::get().reads(5 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) } + // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking Bonded (r:1 w:0) + // Storage: Balances Locks (r:1 w:1) fn bond_extra() -> Weight { - (55_590_000 as Weight) + (59_476_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking Nominators (r:1 w:0) + // Storage: Balances Locks (r:1 w:1) + // Storage: System Account (r:1 w:1) + // Storage: Staking Validators (r:1 w:0) fn unbond() -> Weight { - (59_730_000 as Weight) + (63_655_000 as Weight) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } + // Storage: Balances Locks (r:1 w:1) + // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: System Account (r:1 w:1) fn withdraw_unbonded_update(s: u32, ) -> Weight { - (52_279_000 as Weight) + (54_534_000 as Weight) // Standard Error: 0 - .saturating_add((68_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((24_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(4 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Balances Locks (r:1 w:1) + // Storage: Staking Nominators (r:1 w:0) + // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking Bonded (r:1 w:1) + // Storage: Staking Validators (r:1 w:0) + // Storage: Staking Payee (r:0 w:1) + // Storage: Staking SlashingSpans (r:1 w:0) + // Storage: System Account (r:1 w:1) + // Storage: Staking SpanSlash (r:0 w:2) fn withdraw_unbonded_kill(s: u32, ) -> Weight { - (86_629_000 as Weight) + (89_850_000 as Weight) // Standard Error: 1_000 - .saturating_add((2_379_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((2_396_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(8 as Weight)) .saturating_add(T::DbWeight::get().writes(6 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } + // Storage: Staking MaxValidatorsCount (r:1 w:0) + // Storage: Staking MinValidatorBond (r:1 w:0) + // Storage: Staking Ledger (r:1 w:0) + // Storage: Staking Nominators (r:1 w:0) + // Storage: Staking Validators (r:1 w:1) + // Storage: Staking CounterForValidators (r:1 w:1) fn validate() -> Weight { - (32_393_000 as Weight) + (36_726_000 as Weight) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } + // Storage: Staking Ledger (r:1 w:0) + // Storage: Staking Nominators (r:1 w:1) fn kick(k: u32, ) -> Weight { - (36_986_000 as Weight) - // Standard Error: 13_000 - .saturating_add((16_574_000 as Weight).saturating_mul(k as Weight)) + (19_497_000 as Weight) + // Standard Error: 15_000 + .saturating_add((17_057_000 as Weight).saturating_mul(k as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(k as Weight))) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } + // Storage: Staking Ledger (r:1 w:0) + // Storage: Staking MinNominatorBond (r:1 w:0) + // Storage: Staking CounterForNominators (r:1 w:1) + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking Nominators (r:1 w:1) + // Storage: Staking Validators (r:2 w:0) + // Storage: Staking MaxNominatorsCount (r:1 w:0) fn nominate(n: u32, ) -> Weight { - (43_228_000 as Weight) - // Standard Error: 21_000 - .saturating_add((5_119_000 as Weight).saturating_mul(n as Weight)) + (45_146_000 as Weight) + // Standard Error: 13_000 + .saturating_add((5_527_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } + // Storage: Staking Validators (r:1 w:0) + // Storage: Staking Ledger (r:1 w:0) + // Storage: Staking Nominators (r:1 w:0) fn chill() -> Weight { - (17_800_000 as Weight) + (18_986_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) } + // Storage: Staking Payee (r:0 w:1) + // Storage: Staking Ledger (r:1 w:0) fn set_payee() -> Weight { - (12_612_000 as Weight) + (13_348_000 as Weight) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } + // Storage: Staking Bonded (r:1 w:1) + // Storage: Staking Ledger (r:2 w:2) fn set_controller() -> Weight { - (27_503_000 as Weight) + (28_148_000 as Weight) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } + // Storage: Staking ValidatorCount (r:0 w:1) fn set_validator_count() -> Weight { - (2_119_000 as Weight) + (2_909_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } + // Storage: Staking ForceEra (r:0 w:1) fn force_no_eras() -> Weight { - (2_320_000 as Weight) + (3_163_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } + // Storage: Staking ForceEra (r:0 w:1) fn force_new_era() -> Weight { - (2_269_000 as Weight) + (3_141_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } + // Storage: Staking ForceEra (r:0 w:1) fn force_new_era_always() -> Weight { - (2_334_000 as Weight) + (3_220_000 as Weight) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } + // Storage: Staking Invulnerables (r:0 w:1) fn set_invulnerables(v: u32, ) -> Weight { - (2_354_000 as Weight) + (3_569_000 as Weight) // Standard Error: 0 - .saturating_add((5_000 as Weight).saturating_mul(v as Weight)) + .saturating_add((58_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } + // Storage: Balances Locks (r:1 w:1) + // Storage: System Account (r:1 w:1) + // Storage: Staking Nominators (r:1 w:0) + // Storage: Staking Payee (r:0 w:1) + // Storage: Staking Bonded (r:1 w:1) + // Storage: Staking Validators (r:1 w:0) + // Storage: Staking SlashingSpans (r:1 w:0) + // Storage: Staking Ledger (r:0 w:1) + // Storage: Staking SpanSlash (r:0 w:2) fn force_unstake(s: u32, ) -> Weight { - (61_556_000 as Weight) + (65_753_000 as Weight) // Standard Error: 1_000 - .saturating_add((2_377_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((2_420_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(6 as Weight)) .saturating_add(T::DbWeight::get().writes(6 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } + // Storage: Staking UnappliedSlashes (r:1 w:1) fn cancel_deferred_slash(s: u32, ) -> Weight { - (3_367_105_000 as Weight) - // Standard Error: 222_000 - .saturating_add((19_817_000 as Weight).saturating_mul(s as Weight)) + (3_056_514_000 as Weight) + // Standard Error: 218_000 + .saturating_add((21_159_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().writes(1 as Weight)) } + // Storage: Staking Bonded (r:2 w:0) + // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking ErasRewardPoints (r:1 w:0) + // Storage: Staking ErasValidatorReward (r:1 w:0) + // Storage: Staking HistoryDepth (r:1 w:0) + // Storage: System Account (r:2 w:2) + // Storage: Staking Payee (r:2 w:0) + // Storage: Staking ErasStakersClipped (r:1 w:0) + // Storage: Staking ErasValidatorPrefs (r:1 w:0) fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (47_229_000 as Weight) - // Standard Error: 53_000 - .saturating_add((48_365_000 as Weight).saturating_mul(n as Weight)) + (121_794_000 as Weight) + // Standard Error: 19_000 + .saturating_add((49_467_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(10 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(2 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) } + // Storage: Staking ErasValidatorPrefs (r:1 w:0) + // Storage: Staking Ledger (r:2 w:2) + // Storage: Staking ErasValidatorReward (r:1 w:0) + // Storage: Balances Locks (r:2 w:2) + // Storage: Staking ErasRewardPoints (r:1 w:0) + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking HistoryDepth (r:1 w:0) + // Storage: System Account (r:2 w:2) + // Storage: Staking ErasStakersClipped (r:1 w:0) + // Storage: Staking Bonded (r:2 w:0) + // Storage: Staking Payee (r:2 w:0) fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (156_788_000 as Weight) - // Standard Error: 20_000 - .saturating_add((61_280_000 as Weight).saturating_mul(n as Weight)) + (147_049_000 as Weight) + // Standard Error: 30_000 + .saturating_add((64_428_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(11 as Weight)) .saturating_add(T::DbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(3 as Weight)) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) } + // Storage: Balances Locks (r:1 w:1) + // Storage: Staking Ledger (r:1 w:1) + // Storage: System Account (r:1 w:1) fn rebond(l: u32, ) -> Weight { - (47_815_000 as Weight) + (52_184_000 as Weight) // Standard Error: 1_000 - .saturating_add((65_000 as Weight).saturating_mul(l as Weight)) + .saturating_add((35_000 as Weight).saturating_mul(l as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().writes(3 as Weight)) } + // Storage: Staking ErasStakersClipped (r:0 w:2) + // Storage: Staking ErasValidatorReward (r:0 w:1) + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking ErasValidatorPrefs (r:0 w:2) + // Storage: Staking ErasTotalStake (r:0 w:1) + // Storage: Staking ErasStakers (r:0 w:2) + // Storage: Staking ErasRewardPoints (r:0 w:1) + // Storage: Staking HistoryDepth (r:1 w:1) + // Storage: Staking ErasStartSessionIndex (r:0 w:1) fn set_history_depth(e: u32, ) -> Weight { (0 as Weight) - // Standard Error: 74_000 - .saturating_add((34_945_000 as Weight).saturating_mul(e as Weight)) + // Standard Error: 57_000 + .saturating_add((30_689_000 as Weight).saturating_mul(e as Weight)) .saturating_add(T::DbWeight::get().reads(2 as Weight)) .saturating_add(T::DbWeight::get().writes(4 as Weight)) .saturating_add(T::DbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) } + // Storage: Staking Nominators (r:1 w:0) + // Storage: Staking SlashingSpans (r:1 w:1) + // Storage: Staking Validators (r:1 w:1) + // Storage: Staking SpanSlash (r:0 w:1) + // Storage: Staking CounterForValidators (r:1 w:1) + // Storage: System Account (r:1 w:1) + // Storage: Staking Payee (r:0 w:1) + // Storage: Staking Ledger (r:0 w:1) + // Storage: Staking Bonded (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) fn reap_stash(s: u32, ) -> Weight { - (73_483_000 as Weight) - // Standard Error: 0 - .saturating_add((2_384_000 as Weight).saturating_mul(s as Weight)) + (75_836_000 as Weight) + // Standard Error: 2_000 + .saturating_add((2_423_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().writes(8 as Weight)) .saturating_add(T::DbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } + // Storage: System BlockWeight (r:1 w:1) + // Storage: Staking ErasStakers (r:0 w:1) + // Storage: Staking ErasStakersClipped (r:0 w:1) + // Storage: Staking Nominators (r:101 w:0) + // Storage: Staking SlashingSpans (r:1 w:0) + // Storage: Staking ErasStartSessionIndex (r:0 w:1) + // Storage: Staking Ledger (r:101 w:0) + // Storage: Staking Validators (r:2 w:0) + // Storage: Staking Bonded (r:101 w:0) + // Storage: Staking CounterForNominators (r:1 w:0) + // Storage: Staking ValidatorCount (r:1 w:0) + // Storage: Staking HistoryDepth (r:1 w:0) + // Storage: Staking ErasValidatorPrefs (r:0 w:1) + // Storage: Staking CounterForValidators (r:1 w:0) + // Storage: Staking CurrentEra (r:1 w:1) + // Storage: Staking MinimumValidatorCount (r:1 w:0) + // Storage: Staking ErasTotalStake (r:0 w:1) fn new_era(v: u32, n: u32, ) -> Weight { (0 as Weight) - // Standard Error: 846_000 - .saturating_add((305_234_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 42_000 - .saturating_add((48_280_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 1_492_000 + .saturating_add((299_860_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 99_000 + .saturating_add((47_937_000 as Weight).saturating_mul(n as Weight)) .saturating_add(T::DbWeight::get().reads(10 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().writes(4 as Weight)) .saturating_add(T::DbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) } + // Storage: Staking Validators (r:501 w:0) + // Storage: Staking Bonded (r:1500 w:0) + // Storage: Staking Nominators (r:1001 w:0) + // Storage: Staking Ledger (r:1500 w:0) + // Storage: Staking SlashingSpans (r:21 w:0) fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { (0 as Weight) - // Standard Error: 99_000 - .saturating_add((25_735_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 99_000 - .saturating_add((28_122_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 3_388_000 - .saturating_add((21_500_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 101_000 + .saturating_add((27_304_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 101_000 + .saturating_add((29_893_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 3_441_000 + .saturating_add((91_111_000 as Weight).saturating_mul(s as Weight)) .saturating_add(T::DbWeight::get().reads(3 as Weight)) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(T::DbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) } + // Storage: Staking Validators (r:501 w:0) fn get_npos_targets(v: u32, ) -> Weight { (0 as Weight) - // Standard Error: 30_000 - .saturating_add((11_065_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 32_000 + .saturating_add((11_692_000 as Weight).saturating_mul(v as Weight)) .saturating_add(T::DbWeight::get().reads(1 as Weight)) .saturating_add(T::DbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) } + // Storage: Staking MaxNominatorsCount (r:0 w:1) + // Storage: Staking MaxValidatorsCount (r:0 w:1) + // Storage: Staking MinValidatorBond (r:0 w:1) + // Storage: Staking ChillThreshold (r:0 w:1) + // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_limits() -> Weight { - (5_028_000 as Weight) - .saturating_add(T::DbWeight::get().writes(4 as Weight)) - } + (7_325_000 as Weight) + .saturating_add(T::DbWeight::get().writes(5 as Weight)) + } + // Storage: Staking MinValidatorBond (r:1 w:0) + // Storage: Staking ChillThreshold (r:1 w:0) + // Storage: Staking Nominators (r:1 w:0) + // Storage: Staking Validators (r:1 w:1) + // Storage: Staking CounterForValidators (r:1 w:1) + // Storage: Staking MaxValidatorsCount (r:1 w:0) + // Storage: Staking Ledger (r:1 w:0) fn chill_other() -> Weight { - (35_758_000 as Weight) - .saturating_add(T::DbWeight::get().reads(5 as Weight)) + (62_683_000 as Weight) + .saturating_add(T::DbWeight::get().reads(7 as Weight)) .saturating_add(T::DbWeight::get().writes(2 as Weight)) } } // For backwards compatibility and tests impl WeightInfo for () { + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Balances Locks (r:1 w:1) + // Storage: Staking Bonded (r:1 w:1) + // Storage: Staking HistoryDepth (r:1 w:0) + // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking Payee (r:0 w:1) fn bond() -> Weight { - (72_617_000 as Weight) + (77_492_000 as Weight) .saturating_add(RocksDbWeight::get().reads(5 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) } + // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking Bonded (r:1 w:0) + // Storage: Balances Locks (r:1 w:1) fn bond_extra() -> Weight { - (55_590_000 as Weight) + (59_476_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking Nominators (r:1 w:0) + // Storage: Balances Locks (r:1 w:1) + // Storage: System Account (r:1 w:1) + // Storage: Staking Validators (r:1 w:0) fn unbond() -> Weight { - (59_730_000 as Weight) + (63_655_000 as Weight) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } + // Storage: Balances Locks (r:1 w:1) + // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: System Account (r:1 w:1) fn withdraw_unbonded_update(s: u32, ) -> Weight { - (52_279_000 as Weight) + (54_534_000 as Weight) // Standard Error: 0 - .saturating_add((68_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((24_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(4 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Balances Locks (r:1 w:1) + // Storage: Staking Nominators (r:1 w:0) + // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking Bonded (r:1 w:1) + // Storage: Staking Validators (r:1 w:0) + // Storage: Staking Payee (r:0 w:1) + // Storage: Staking SlashingSpans (r:1 w:0) + // Storage: System Account (r:1 w:1) + // Storage: Staking SpanSlash (r:0 w:2) fn withdraw_unbonded_kill(s: u32, ) -> Weight { - (86_629_000 as Weight) + (89_850_000 as Weight) // Standard Error: 1_000 - .saturating_add((2_379_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((2_396_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(8 as Weight)) .saturating_add(RocksDbWeight::get().writes(6 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } + // Storage: Staking MaxValidatorsCount (r:1 w:0) + // Storage: Staking MinValidatorBond (r:1 w:0) + // Storage: Staking Ledger (r:1 w:0) + // Storage: Staking Nominators (r:1 w:0) + // Storage: Staking Validators (r:1 w:1) + // Storage: Staking CounterForValidators (r:1 w:1) fn validate() -> Weight { - (32_393_000 as Weight) + (36_726_000 as Weight) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } + // Storage: Staking Ledger (r:1 w:0) + // Storage: Staking Nominators (r:1 w:1) fn kick(k: u32, ) -> Weight { - (36_986_000 as Weight) - // Standard Error: 13_000 - .saturating_add((16_574_000 as Weight).saturating_mul(k as Weight)) + (19_497_000 as Weight) + // Standard Error: 15_000 + .saturating_add((17_057_000 as Weight).saturating_mul(k as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(k as Weight))) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(k as Weight))) } + // Storage: Staking Ledger (r:1 w:0) + // Storage: Staking MinNominatorBond (r:1 w:0) + // Storage: Staking CounterForNominators (r:1 w:1) + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking Nominators (r:1 w:1) + // Storage: Staking Validators (r:2 w:0) + // Storage: Staking MaxNominatorsCount (r:1 w:0) fn nominate(n: u32, ) -> Weight { - (43_228_000 as Weight) - // Standard Error: 21_000 - .saturating_add((5_119_000 as Weight).saturating_mul(n as Weight)) + (45_146_000 as Weight) + // Standard Error: 13_000 + .saturating_add((5_527_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(7 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } + // Storage: Staking Validators (r:1 w:0) + // Storage: Staking Ledger (r:1 w:0) + // Storage: Staking Nominators (r:1 w:0) fn chill() -> Weight { - (17_800_000 as Weight) + (18_986_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) } + // Storage: Staking Payee (r:0 w:1) + // Storage: Staking Ledger (r:1 w:0) fn set_payee() -> Weight { - (12_612_000 as Weight) + (13_348_000 as Weight) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } + // Storage: Staking Bonded (r:1 w:1) + // Storage: Staking Ledger (r:2 w:2) fn set_controller() -> Weight { - (27_503_000 as Weight) + (28_148_000 as Weight) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } + // Storage: Staking ValidatorCount (r:0 w:1) fn set_validator_count() -> Weight { - (2_119_000 as Weight) + (2_909_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } + // Storage: Staking ForceEra (r:0 w:1) fn force_no_eras() -> Weight { - (2_320_000 as Weight) + (3_163_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } + // Storage: Staking ForceEra (r:0 w:1) fn force_new_era() -> Weight { - (2_269_000 as Weight) + (3_141_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } + // Storage: Staking ForceEra (r:0 w:1) fn force_new_era_always() -> Weight { - (2_334_000 as Weight) + (3_220_000 as Weight) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } + // Storage: Staking Invulnerables (r:0 w:1) fn set_invulnerables(v: u32, ) -> Weight { - (2_354_000 as Weight) + (3_569_000 as Weight) // Standard Error: 0 - .saturating_add((5_000 as Weight).saturating_mul(v as Weight)) + .saturating_add((58_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } + // Storage: Balances Locks (r:1 w:1) + // Storage: System Account (r:1 w:1) + // Storage: Staking Nominators (r:1 w:0) + // Storage: Staking Payee (r:0 w:1) + // Storage: Staking Bonded (r:1 w:1) + // Storage: Staking Validators (r:1 w:0) + // Storage: Staking SlashingSpans (r:1 w:0) + // Storage: Staking Ledger (r:0 w:1) + // Storage: Staking SpanSlash (r:0 w:2) fn force_unstake(s: u32, ) -> Weight { - (61_556_000 as Weight) + (65_753_000 as Weight) // Standard Error: 1_000 - .saturating_add((2_377_000 as Weight).saturating_mul(s as Weight)) + .saturating_add((2_420_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(6 as Weight)) .saturating_add(RocksDbWeight::get().writes(6 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } + // Storage: Staking UnappliedSlashes (r:1 w:1) fn cancel_deferred_slash(s: u32, ) -> Weight { - (3_367_105_000 as Weight) - // Standard Error: 222_000 - .saturating_add((19_817_000 as Weight).saturating_mul(s as Weight)) + (3_056_514_000 as Weight) + // Standard Error: 218_000 + .saturating_add((21_159_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().writes(1 as Weight)) } + // Storage: Staking Bonded (r:2 w:0) + // Storage: Staking Ledger (r:1 w:1) + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking ErasRewardPoints (r:1 w:0) + // Storage: Staking ErasValidatorReward (r:1 w:0) + // Storage: Staking HistoryDepth (r:1 w:0) + // Storage: System Account (r:2 w:2) + // Storage: Staking Payee (r:2 w:0) + // Storage: Staking ErasStakersClipped (r:1 w:0) + // Storage: Staking ErasValidatorPrefs (r:1 w:0) fn payout_stakers_dead_controller(n: u32, ) -> Weight { - (47_229_000 as Weight) - // Standard Error: 53_000 - .saturating_add((48_365_000 as Weight).saturating_mul(n as Weight)) + (121_794_000 as Weight) + // Standard Error: 19_000 + .saturating_add((49_467_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(10 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(n as Weight))) } + // Storage: Staking ErasValidatorPrefs (r:1 w:0) + // Storage: Staking Ledger (r:2 w:2) + // Storage: Staking ErasValidatorReward (r:1 w:0) + // Storage: Balances Locks (r:2 w:2) + // Storage: Staking ErasRewardPoints (r:1 w:0) + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking HistoryDepth (r:1 w:0) + // Storage: System Account (r:2 w:2) + // Storage: Staking ErasStakersClipped (r:1 w:0) + // Storage: Staking Bonded (r:2 w:0) + // Storage: Staking Payee (r:2 w:0) fn payout_stakers_alive_staked(n: u32, ) -> Weight { - (156_788_000 as Weight) - // Standard Error: 20_000 - .saturating_add((61_280_000 as Weight).saturating_mul(n as Weight)) + (147_049_000 as Weight) + // Standard Error: 30_000 + .saturating_add((64_428_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(11 as Weight)) .saturating_add(RocksDbWeight::get().reads((5 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(n as Weight))) } + // Storage: Balances Locks (r:1 w:1) + // Storage: Staking Ledger (r:1 w:1) + // Storage: System Account (r:1 w:1) fn rebond(l: u32, ) -> Weight { - (47_815_000 as Weight) + (52_184_000 as Weight) // Standard Error: 1_000 - .saturating_add((65_000 as Weight).saturating_mul(l as Weight)) + .saturating_add((35_000 as Weight).saturating_mul(l as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().writes(3 as Weight)) } + // Storage: Staking ErasStakersClipped (r:0 w:2) + // Storage: Staking ErasValidatorReward (r:0 w:1) + // Storage: Staking CurrentEra (r:1 w:0) + // Storage: Staking ErasValidatorPrefs (r:0 w:2) + // Storage: Staking ErasTotalStake (r:0 w:1) + // Storage: Staking ErasStakers (r:0 w:2) + // Storage: Staking ErasRewardPoints (r:0 w:1) + // Storage: Staking HistoryDepth (r:1 w:1) + // Storage: Staking ErasStartSessionIndex (r:0 w:1) fn set_history_depth(e: u32, ) -> Weight { (0 as Weight) - // Standard Error: 74_000 - .saturating_add((34_945_000 as Weight).saturating_mul(e as Weight)) + // Standard Error: 57_000 + .saturating_add((30_689_000 as Weight).saturating_mul(e as Weight)) .saturating_add(RocksDbWeight::get().reads(2 as Weight)) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) .saturating_add(RocksDbWeight::get().writes((7 as Weight).saturating_mul(e as Weight))) } + // Storage: Staking Nominators (r:1 w:0) + // Storage: Staking SlashingSpans (r:1 w:1) + // Storage: Staking Validators (r:1 w:1) + // Storage: Staking SpanSlash (r:0 w:1) + // Storage: Staking CounterForValidators (r:1 w:1) + // Storage: System Account (r:1 w:1) + // Storage: Staking Payee (r:0 w:1) + // Storage: Staking Ledger (r:0 w:1) + // Storage: Staking Bonded (r:1 w:1) + // Storage: Balances Locks (r:1 w:1) fn reap_stash(s: u32, ) -> Weight { - (73_483_000 as Weight) - // Standard Error: 0 - .saturating_add((2_384_000 as Weight).saturating_mul(s as Weight)) + (75_836_000 as Weight) + // Standard Error: 2_000 + .saturating_add((2_423_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(7 as Weight)) .saturating_add(RocksDbWeight::get().writes(8 as Weight)) .saturating_add(RocksDbWeight::get().writes((1 as Weight).saturating_mul(s as Weight))) } + // Storage: System BlockWeight (r:1 w:1) + // Storage: Staking ErasStakers (r:0 w:1) + // Storage: Staking ErasStakersClipped (r:0 w:1) + // Storage: Staking Nominators (r:101 w:0) + // Storage: Staking SlashingSpans (r:1 w:0) + // Storage: Staking ErasStartSessionIndex (r:0 w:1) + // Storage: Staking Ledger (r:101 w:0) + // Storage: Staking Validators (r:2 w:0) + // Storage: Staking Bonded (r:101 w:0) + // Storage: Staking CounterForNominators (r:1 w:0) + // Storage: Staking ValidatorCount (r:1 w:0) + // Storage: Staking HistoryDepth (r:1 w:0) + // Storage: Staking ErasValidatorPrefs (r:0 w:1) + // Storage: Staking CounterForValidators (r:1 w:0) + // Storage: Staking CurrentEra (r:1 w:1) + // Storage: Staking MinimumValidatorCount (r:1 w:0) + // Storage: Staking ErasTotalStake (r:0 w:1) fn new_era(v: u32, n: u32, ) -> Weight { (0 as Weight) - // Standard Error: 846_000 - .saturating_add((305_234_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 42_000 - .saturating_add((48_280_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 1_492_000 + .saturating_add((299_860_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 99_000 + .saturating_add((47_937_000 as Weight).saturating_mul(n as Weight)) .saturating_add(RocksDbWeight::get().reads(10 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().writes(4 as Weight)) .saturating_add(RocksDbWeight::get().writes((3 as Weight).saturating_mul(v as Weight))) } + // Storage: Staking Validators (r:501 w:0) + // Storage: Staking Bonded (r:1500 w:0) + // Storage: Staking Nominators (r:1001 w:0) + // Storage: Staking Ledger (r:1500 w:0) + // Storage: Staking SlashingSpans (r:21 w:0) fn get_npos_voters(v: u32, n: u32, s: u32, ) -> Weight { (0 as Weight) - // Standard Error: 99_000 - .saturating_add((25_735_000 as Weight).saturating_mul(v as Weight)) - // Standard Error: 99_000 - .saturating_add((28_122_000 as Weight).saturating_mul(n as Weight)) - // Standard Error: 3_388_000 - .saturating_add((21_500_000 as Weight).saturating_mul(s as Weight)) + // Standard Error: 101_000 + .saturating_add((27_304_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 101_000 + .saturating_add((29_893_000 as Weight).saturating_mul(n as Weight)) + // Standard Error: 3_441_000 + .saturating_add((91_111_000 as Weight).saturating_mul(s as Weight)) .saturating_add(RocksDbWeight::get().reads(3 as Weight)) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(v as Weight))) .saturating_add(RocksDbWeight::get().reads((3 as Weight).saturating_mul(n as Weight))) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(s as Weight))) } + // Storage: Staking Validators (r:501 w:0) fn get_npos_targets(v: u32, ) -> Weight { (0 as Weight) - // Standard Error: 30_000 - .saturating_add((11_065_000 as Weight).saturating_mul(v as Weight)) + // Standard Error: 32_000 + .saturating_add((11_692_000 as Weight).saturating_mul(v as Weight)) .saturating_add(RocksDbWeight::get().reads(1 as Weight)) .saturating_add(RocksDbWeight::get().reads((1 as Weight).saturating_mul(v as Weight))) } + // Storage: Staking MaxNominatorsCount (r:0 w:1) + // Storage: Staking MaxValidatorsCount (r:0 w:1) + // Storage: Staking MinValidatorBond (r:0 w:1) + // Storage: Staking ChillThreshold (r:0 w:1) + // Storage: Staking MinNominatorBond (r:0 w:1) fn set_staking_limits() -> Weight { - (5_028_000 as Weight) - .saturating_add(RocksDbWeight::get().writes(4 as Weight)) - } + (7_325_000 as Weight) + .saturating_add(RocksDbWeight::get().writes(5 as Weight)) + } + // Storage: Staking MinValidatorBond (r:1 w:0) + // Storage: Staking ChillThreshold (r:1 w:0) + // Storage: Staking Nominators (r:1 w:0) + // Storage: Staking Validators (r:1 w:1) + // Storage: Staking CounterForValidators (r:1 w:1) + // Storage: Staking MaxValidatorsCount (r:1 w:0) + // Storage: Staking Ledger (r:1 w:0) fn chill_other() -> Weight { - (35_758_000 as Weight) - .saturating_add(RocksDbWeight::get().reads(5 as Weight)) + (62_683_000 as Weight) + .saturating_add(RocksDbWeight::get().reads(7 as Weight)) .saturating_add(RocksDbWeight::get().writes(2 as Weight)) } } diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index 9bae97101977..93616b590f61 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -30,6 +30,8 @@ chrono = "0.4" serde = "1.0.126" handlebars = "3.5.0" Inflector = "0.11.4" +linked-hash-map = "0.5.4" +log = "0.4.8" [features] default = ["db"] diff --git a/utils/frame/benchmarking-cli/src/command.rs b/utils/frame/benchmarking-cli/src/command.rs index 2ef9f3914a5d..925cfd07d03e 100644 --- a/utils/frame/benchmarking-cli/src/command.rs +++ b/utils/frame/benchmarking-cli/src/command.rs @@ -17,8 +17,11 @@ use crate::BenchmarkCmd; use codec::{Decode, Encode}; -use frame_benchmarking::{Analysis, BenchmarkBatch, BenchmarkSelector}; +use frame_benchmarking::{ + Analysis, BenchmarkBatch, BenchmarkList, BenchmarkResults, BenchmarkSelector, +}; use frame_support::traits::StorageInfo; +use linked_hash_map::LinkedHashMap; use sc_cli::{CliConfiguration, ExecutionStrategy, Result, SharedParams}; use sc_client_db::BenchmarkingState; use sc_executor::NativeExecutor; @@ -31,7 +34,43 @@ use sp_externalities::Extensions; use sp_keystore::{testing::KeyStore, KeystoreExt, SyncCryptoStorePtr}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use sp_state_machine::StateMachine; -use std::{fmt::Debug, sync::Arc}; +use std::{fmt::Debug, sync::Arc, time}; + +// This takes multiple benchmark batches and combines all the results where the pallet, instance, +// and benchmark are the same. +fn combine_batches(batches: Vec) -> Vec { + if batches.is_empty() { + return batches + } + + let mut all_benchmarks = LinkedHashMap::<_, Vec>::new(); + + batches + .into_iter() + .for_each(|BenchmarkBatch { pallet, instance, benchmark, results }| { + // We use this key to uniquely identify a benchmark among batches. + let key = (pallet, instance, benchmark); + + match all_benchmarks.get_mut(&key) { + // We already have this benchmark, so we extend the results. + Some(x) => x.extend(results), + // New benchmark, so we add a new entry with the initial results. + None => { + all_benchmarks.insert(key, results); + }, + } + }); + + all_benchmarks + .into_iter() + .map(|((pallet, instance, benchmark), results)| BenchmarkBatch { + pallet, + instance, + benchmark, + results, + }) + .collect::>() +} impl BenchmarkCmd { /// Runs the command and benchmarks the chain. @@ -63,6 +102,10 @@ impl BenchmarkCmd { let spec = config.chain_spec; let wasm_method = self.wasm_method.into(); let strategy = self.execution.unwrap_or(ExecutionStrategy::Native); + let pallet = self.pallet.clone().unwrap_or_else(|| String::new()); + let pallet = pallet.as_bytes(); + let extrinsic = self.extrinsic.clone().unwrap_or_else(|| String::new()); + let extrinsic = extrinsic.as_bytes(); let genesis_storage = spec.build_storage()?; let mut changes = Default::default(); @@ -74,137 +117,204 @@ impl BenchmarkCmd { 2, // The runtime instances cache size. ); - let mut extensions = Extensions::default(); - extensions.register(KeystoreExt(Arc::new(KeyStore::new()) as SyncCryptoStorePtr)); - let (offchain, _) = TestOffchainExt::new(); - let (pool, _) = TestTransactionPoolExt::new(); - extensions.register(OffchainWorkerExt::new(offchain.clone())); - extensions.register(OffchainDbExt::new(offchain)); - extensions.register(TransactionPoolExt::new(pool)); + let extensions = || -> Extensions { + let mut extensions = Extensions::default(); + extensions.register(KeystoreExt(Arc::new(KeyStore::new()) as SyncCryptoStorePtr)); + let (offchain, _) = TestOffchainExt::new(); + let (pool, _) = TestTransactionPoolExt::new(); + extensions.register(OffchainWorkerExt::new(offchain.clone())); + extensions.register(OffchainDbExt::new(offchain)); + extensions.register(TransactionPoolExt::new(pool)); + return extensions + }; + // Get Benchmark List let result = StateMachine::<_, _, NumberFor, _>::new( &state, None, &mut changes, &executor, - "Benchmark_dispatch_benchmark", - &( - &self.pallet, - &self.extrinsic, - self.lowest_range_values.clone(), - self.highest_range_values.clone(), - self.steps.clone(), - self.repeat, - !self.no_verify, - self.extra, - ) - .encode(), - extensions, + "Benchmark_benchmark_metadata", + &(self.extra).encode(), + extensions(), &sp_state_machine::backend::BackendRuntimeCode::new(&state).runtime_code()?, sp_core::testing::TaskExecutor::new(), ) .execute(strategy.into()) - .map_err(|e| format!("Error executing runtime benchmark: {:?}", e))?; - - let results = , Vec), - String, - > as Decode>::decode(&mut &result[..]) - .map_err(|e| format!("Failed to decode benchmark results: {:?}", e))?; - - match results { - Ok((batches, storage_info)) => { - if let Some(output_path) = &self.output { - crate::writer::write_results(&batches, &storage_info, output_path, self)?; - } + .map_err(|e| format!("Error getting benchmark list: {:?}", e))?; - for batch in batches.into_iter() { - // Print benchmark metadata - println!( - "Pallet: {:?}, Extrinsic: {:?}, Lowest values: {:?}, Highest values: {:?}, Steps: {:?}, Repeat: {:?}", - String::from_utf8(batch.pallet).expect("Encoded from String; qed"), - String::from_utf8(batch.benchmark).expect("Encoded from String; qed"), - self.lowest_range_values, - self.highest_range_values, - self.steps, - self.repeat, - ); + let (list, storage_info) = + <(Vec, Vec) as Decode>::decode(&mut &result[..]) + .map_err(|e| format!("Failed to decode benchmark metadata: {:?}", e))?; + + if self.list { + list_benchmark(pallet, extrinsic, list); + return Ok(()) + } - // Skip raw data + analysis if there are no results - if batch.results.is_empty() { - continue + // Use the benchmark list and the user input to determine the set of benchmarks to run. + let mut benchmarks_to_run = Vec::new(); + for item in list { + if pallet == &item.pallet[..] || pallet == &b"*"[..] { + if &pallet[..] == &b"*"[..] || &extrinsic[..] == &b"*"[..] { + for benchmark in item.benchmarks { + benchmarks_to_run.push((item.pallet.clone(), benchmark)); } + } else { + benchmarks_to_run.push((pallet.to_vec(), extrinsic.to_vec())); + } + } + } - if self.raw_data { - // Print the table header - batch.results[0] - .components - .iter() - .for_each(|param| print!("{:?},", param.0)); - - print!("extrinsic_time_ns,storage_root_time_ns,reads,repeat_reads,writes,repeat_writes,proof_size_bytes\n"); - // Print the values - batch.results.iter().for_each(|result| { - let parameters = &result.components; - parameters.iter().for_each(|param| print!("{:?},", param.1)); - // Print extrinsic time and storage root time - print!( - "{:?},{:?},{:?},{:?},{:?},{:?},{:?}\n", - result.extrinsic_time, - result.storage_root_time, - result.reads, - result.repeat_reads, - result.writes, - result.repeat_writes, - result.proof_size, - ); - }); + // Run the benchmarks + let mut batches = Vec::new(); + let mut timer = time::SystemTime::now(); + for (pallet, extrinsic) in benchmarks_to_run { + for s in 0..self.steps { + for r in 0..self.repeat { + // This should run only a single instance of a benchmark for `pallet` and + // `extrinsic`. All loops happen above. + let result = StateMachine::<_, _, NumberFor, _>::new( + &state, + None, + &mut changes, + &executor, + "Benchmark_dispatch_benchmark", + &( + &pallet.clone(), + &extrinsic.clone(), + self.lowest_range_values.clone(), + self.highest_range_values.clone(), + (s, self.steps), + (r, self.repeat), + !self.no_verify, + self.extra, + ) + .encode(), + extensions(), + &sp_state_machine::backend::BackendRuntimeCode::new(&state) + .runtime_code()?, + sp_core::testing::TaskExecutor::new(), + ) + .execute(strategy.into()) + .map_err(|e| format!("Error executing runtime benchmark: {:?}", e))?; - println!(); - } + let batch = + , String> as Decode>::decode( + &mut &result[..], + ) + .map_err(|e| format!("Failed to decode benchmark results: {:?}", e))??; - // Conduct analysis. - if !self.no_median_slopes { - println!("Median Slopes Analysis\n========"); - if let Some(analysis) = Analysis::median_slopes( - &batch.results, - BenchmarkSelector::ExtrinsicTime, - ) { - println!("-- Extrinsic Time --\n{}", analysis); - } - if let Some(analysis) = - Analysis::median_slopes(&batch.results, BenchmarkSelector::Reads) - { - println!("Reads = {:?}", analysis); - } - if let Some(analysis) = - Analysis::median_slopes(&batch.results, BenchmarkSelector::Writes) - { - println!("Writes = {:?}", analysis); - } - } - if !self.no_min_squares { - println!("Min Squares Analysis\n========"); - if let Some(analysis) = Analysis::min_squares_iqr( - &batch.results, - BenchmarkSelector::ExtrinsicTime, - ) { - println!("-- Extrinsic Time --\n{}", analysis); - } - if let Some(analysis) = - Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::Reads) - { - println!("Reads = {:?}", analysis); - } - if let Some(analysis) = - Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::Writes) - { - println!("Writes = {:?}", analysis); + batches.extend(batch); + + // Show progress information + if let Some(elapsed) = timer.elapsed().ok() { + if elapsed >= time::Duration::from_secs(5) { + timer = time::SystemTime::now(); + log::info!( + "Running Benchmark:\t{}\t{}\t{}/{}\t{}/{}", + String::from_utf8(pallet.clone()) + .expect("Encoded from String; qed"), + String::from_utf8(extrinsic.clone()) + .expect("Encoded from String; qed"), + s, + self.steps, + r, + self.repeat, + ); } } } - }, - Err(error) => eprintln!("Error: {}", error), + } + } + + // Combine all of the benchmark results, so that benchmarks of the same pallet/function + // are together. + let batches = combine_batches(batches); + + if let Some(output_path) = &self.output { + crate::writer::write_results(&batches, &storage_info, output_path, self)?; + } + + for batch in batches.into_iter() { + // Print benchmark metadata + println!( + "Pallet: {:?}, Extrinsic: {:?}, Lowest values: {:?}, Highest values: {:?}, Steps: {:?}, Repeat: {:?}", + String::from_utf8(batch.pallet).expect("Encoded from String; qed"), + String::from_utf8(batch.benchmark).expect("Encoded from String; qed"), + self.lowest_range_values, + self.highest_range_values, + self.steps, + self.repeat, + ); + + // Skip raw data + analysis if there are no results + if batch.results.is_empty() { + continue + } + + if self.raw_data { + // Print the table header + batch.results[0].components.iter().for_each(|param| print!("{:?},", param.0)); + + print!("extrinsic_time_ns,storage_root_time_ns,reads,repeat_reads,writes,repeat_writes,proof_size_bytes\n"); + // Print the values + batch.results.iter().for_each(|result| { + let parameters = &result.components; + parameters.iter().for_each(|param| print!("{:?},", param.1)); + // Print extrinsic time and storage root time + print!( + "{:?},{:?},{:?},{:?},{:?},{:?},{:?}\n", + result.extrinsic_time, + result.storage_root_time, + result.reads, + result.repeat_reads, + result.writes, + result.repeat_writes, + result.proof_size, + ); + }); + + println!(); + } + + // Conduct analysis. + if !self.no_median_slopes { + println!("Median Slopes Analysis\n========"); + if let Some(analysis) = + Analysis::median_slopes(&batch.results, BenchmarkSelector::ExtrinsicTime) + { + println!("-- Extrinsic Time --\n{}", analysis); + } + if let Some(analysis) = + Analysis::median_slopes(&batch.results, BenchmarkSelector::Reads) + { + println!("Reads = {:?}", analysis); + } + if let Some(analysis) = + Analysis::median_slopes(&batch.results, BenchmarkSelector::Writes) + { + println!("Writes = {:?}", analysis); + } + } + if !self.no_min_squares { + println!("Min Squares Analysis\n========"); + if let Some(analysis) = + Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::ExtrinsicTime) + { + println!("-- Extrinsic Time --\n{}", analysis); + } + if let Some(analysis) = + Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::Reads) + { + println!("Reads = {:?}", analysis); + } + if let Some(analysis) = + Analysis::min_squares_iqr(&batch.results, BenchmarkSelector::Writes) + { + println!("Writes = {:?}", analysis); + } + } } Ok(()) @@ -223,3 +333,41 @@ impl CliConfiguration for BenchmarkCmd { }) } } + +/// List the benchmarks available in the runtime, in a CSV friendly format. +/// +/// If `pallet_input` and `extrinsic_input` is empty, we list everything. +/// +/// If `pallet_input` is present, we only list the benchmarks for that pallet. +/// +/// If `extrinsic_input` is `*`, we will hide the individual benchmarks for each pallet, and just +/// show a single line for each available pallet. +fn list_benchmark(pallet_input: &[u8], extrinsic_input: &[u8], list: Vec) { + let filtered_list = list + .into_iter() + .filter(|item| pallet_input.is_empty() || pallet_input == &item.pallet) + .collect::>(); + + if filtered_list.is_empty() { + println!("Pallet not found."); + return + } + + println!("pallet, benchmark"); + for item in filtered_list { + let pallet_string = + String::from_utf8(item.pallet.clone()).expect("Encoded from String; qed"); + + if extrinsic_input == &b"*"[..] { + println!("{}, *", pallet_string) + } else { + for benchmark in item.benchmarks { + println!( + "{}, {}", + pallet_string, + String::from_utf8(benchmark).expect("Encoded from String; qed"), + ); + } + } + } +} diff --git a/utils/frame/benchmarking-cli/src/lib.rs b/utils/frame/benchmarking-cli/src/lib.rs index 0642ddabc137..41629a866f72 100644 --- a/utils/frame/benchmarking-cli/src/lib.rs +++ b/utils/frame/benchmarking-cli/src/lib.rs @@ -31,16 +31,16 @@ fn parse_pallet_name(pallet: &str) -> String { #[derive(Debug, structopt::StructOpt)] pub struct BenchmarkCmd { /// Select a FRAME Pallet to benchmark, or `*` for all (in which case `extrinsic` must be `*`). - #[structopt(short, long, parse(from_str = parse_pallet_name))] - pub pallet: String, + #[structopt(short, long, parse(from_str = parse_pallet_name), required_unless = "list")] + pub pallet: Option, /// Select an extrinsic inside the pallet to benchmark, or `*` for all. - #[structopt(short, long)] - pub extrinsic: String, + #[structopt(short, long, required_unless = "list")] + pub extrinsic: Option, /// Select how many samples we should take across the variable components. - #[structopt(short, long, use_delimiter = true)] - pub steps: Vec, + #[structopt(short, long, default_value = "1")] + pub steps: u32, /// Indicates lowest values for each of the component ranges. #[structopt(long = "low", use_delimiter = true)] @@ -129,4 +129,12 @@ pub struct BenchmarkCmd { /// Limit the memory the database cache can use. #[structopt(long = "db-cache", value_name = "MiB", default_value = "128")] pub database_cache_size: u32, + + /// List the benchmarks available. + /// + /// * If nothing else is specified, all pallets and benchmarks will be listed. + /// * If the `pallet` argument is passed, then we will only list benchmarks for that pallet. + /// * If the `extrinsic` argument is set to `*`, we will hide the individual benchmarks. + #[structopt(long)] + pub list: bool, } diff --git a/utils/frame/benchmarking-cli/src/writer.rs b/utils/frame/benchmarking-cli/src/writer.rs index 16c93081ac6e..d80a17e1b2db 100644 --- a/utils/frame/benchmarking-cli/src/writer.rs +++ b/utils/frame/benchmarking-cli/src/writer.rs @@ -71,7 +71,7 @@ struct BenchmarkData { // This forwards some specific metadata from the `BenchmarkCmd` #[derive(Serialize, Default, Debug, Clone)] struct CmdData { - steps: Vec, + steps: u32, repeat: u32, lowest_range_values: Vec, highest_range_values: Vec, From df0b9045a3067984e388d23304bcf967597a0e9f Mon Sep 17 00:00:00 2001 From: Arkadiy Paronyan Date: Mon, 2 Aug 2021 10:27:18 +0200 Subject: [PATCH 6/8] Warp sync part I (#9227) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Started warp sync * BABE & GRANDPA recovery * Warp sync protocol * Sync warp proofs first * Added basic documentation * Apply suggestions from code review Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * Style changes * Apply suggestions from code review Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * fmt * Apply suggestions from code review Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> * Fixed chage trie pruning wrt missing blocks * Restore parent finalization * fmt * fmt * Revert pwasm-utils bump * Change error type & check API version * Apply suggestions from code review Co-authored-by: Bastian Köcher * Build fix * Fixed target block check * Formatting Co-authored-by: André Silva <123550+andresilva@users.noreply.github.com> Co-authored-by: Bastian Köcher --- Cargo.lock | 64 ++--- Cargo.toml | 1 - bin/node-template/node/src/service.rs | 11 + bin/node-template/runtime/src/lib.rs | 4 + bin/node/cli/Cargo.toml | 2 - bin/node/cli/src/service.rs | 21 +- bin/node/runtime/src/lib.rs | 4 + client/cli/src/arg_enums.rs | 3 + client/consensus/aura/src/import_queue.rs | 36 ++- client/consensus/babe/src/lib.rs | 143 ++++++++--- client/consensus/babe/src/tests.rs | 10 +- client/consensus/common/src/block_import.rs | 7 + client/consensus/common/src/import_queue.rs | 56 +++-- .../common/src/import_queue/basic_queue.rs | 7 +- client/consensus/epochs/src/lib.rs | 27 ++- .../manual-seal/src/consensus/babe.rs | 15 +- client/consensus/manual-seal/src/lib.rs | 19 +- client/consensus/pow/src/lib.rs | 27 +-- client/db/src/cache/list_cache.rs | 1 + client/db/src/changes_tries_storage.rs | 13 +- client/db/src/lib.rs | 35 +-- client/finality-grandpa/Cargo.toml | 2 +- client/finality-grandpa/src/environment.rs | 2 +- client/finality-grandpa/src/import.rs | 103 +++++++- client/finality-grandpa/src/lib.rs | 7 +- client/finality-grandpa/src/observer.rs | 3 +- client/finality-grandpa/src/tests.rs | 4 + .../src/warp_proof.rs} | 176 ++++++++++---- client/informant/src/display.rs | 46 ++-- client/informant/src/lib.rs | 4 +- client/network/Cargo.toml | 1 + client/network/README.md | 63 +++++ client/network/src/behaviour.rs | 35 ++- client/network/src/config.rs | 7 + client/network/src/gossip/tests.rs | 17 +- client/network/src/lib.rs | 6 +- client/network/src/protocol.rs | 58 ++++- client/network/src/protocol/sync.rs | 227 ++++++++++++++++-- client/network/src/protocol/sync/warp.rs | 181 ++++++++++++++ client/network/src/service.rs | 9 + client/network/src/service/tests.rs | 18 +- .../src/warp_request_handler.rs} | 137 +++++------ client/network/test/src/lib.rs | 51 ++-- client/rpc/src/state/tests.rs | 20 +- client/service/src/builder.rs | 21 +- client/service/src/client/client.rs | 32 ++- client/service/test/src/client/mod.rs | 9 + primitives/finality-grandpa/src/lib.rs | 5 +- test-utils/runtime/src/lib.rs | 4 + test-utils/test-runner/Cargo.toml | 1 + test-utils/test-runner/src/client.rs | 5 +- 51 files changed, 1310 insertions(+), 450 deletions(-) rename client/{finality-grandpa-warp-sync/src/proof.rs => finality-grandpa/src/warp_proof.rs} (68%) create mode 100644 client/network/src/protocol/sync/warp.rs rename client/{finality-grandpa-warp-sync/src/lib.rs => network/src/warp_request_handler.rs} (51%) diff --git a/Cargo.lock b/Cargo.lock index 3d22e0c0b6bd..11359c078140 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1714,7 +1714,7 @@ dependencies = [ "num-traits", "parity-scale-codec", "parking_lot 0.11.1", - "rand 0.8.3", + "rand 0.8.4", ] [[package]] @@ -1724,7 +1724,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cfcf0ed7fe52a17a03854ec54a9f76d6d84508d1c0e66bc1793301c73fc8493c" dependencies = [ "byteorder", - "rand 0.8.3", + "rand 0.8.4", "rustc-hex", "static_assertions", ] @@ -3384,7 +3384,7 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log", - "rand 0.8.3", + "rand 0.8.4", "smallvec 1.6.1", "socket2 0.4.0", "void", @@ -4056,7 +4056,7 @@ dependencies = [ "num-complex", "num-rational 0.4.0", "num-traits", - "rand 0.8.3", + "rand 0.8.4", "rand_distr", "simba", "typenum", @@ -4216,7 +4216,6 @@ dependencies = [ "sc-consensus-slots", "sc-consensus-uncles", "sc-finality-grandpa", - "sc-finality-grandpa-warp-sync", "sc-keystore", "sc-network", "sc-offchain", @@ -4903,7 +4902,7 @@ dependencies = [ "paste 1.0.4", "pretty_assertions 0.7.2", "pwasm-utils", - "rand 0.8.3", + "rand 0.8.4", "rand_pcg 0.3.0", "serde", "smallvec 1.6.1", @@ -5753,7 +5752,7 @@ dependencies = [ "log", "memmap2", "parking_lot 0.11.1", - "rand 0.8.3", + "rand 0.8.4", ] [[package]] @@ -6482,7 +6481,7 @@ checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" dependencies = [ "env_logger 0.8.3", "log", - "rand 0.8.3", + "rand 0.8.4", ] [[package]] @@ -6550,9 +6549,9 @@ dependencies = [ [[package]] name = "rand" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ef9e7e66b4468674bfcb0c81af8b7fa0bb154fa9f28eb840da5c447baeb8d7e" +checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" dependencies = [ "libc", "rand_chacha 0.3.0", @@ -6620,7 +6619,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "051b398806e42b9cd04ad9ec8f81e355d0a382c543ac6672c62f5a5b452ef142" dependencies = [ "num-traits", - "rand 0.8.3", + "rand 0.8.4", ] [[package]] @@ -7638,7 +7637,7 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.11.1", "pin-project 1.0.5", - "rand 0.7.3", + "rand 0.8.4", "sc-block-builder", "sc-client-api", "sc-consensus", @@ -7700,33 +7699,6 @@ dependencies = [ "substrate-test-runtime-client", ] -[[package]] -name = "sc-finality-grandpa-warp-sync" -version = "0.10.0-dev" -dependencies = [ - "derive_more", - "finality-grandpa", - "futures 0.3.15", - "log", - "num-traits", - "parity-scale-codec", - "parking_lot 0.11.1", - "prost", - "rand 0.8.3", - "sc-block-builder", - "sc-client-api", - "sc-consensus", - "sc-finality-grandpa", - "sc-network", - "sc-service", - "sp-blockchain", - "sp-consensus", - "sp-finality-grandpa", - "sp-keyring", - "sp-runtime", - "substrate-test-runtime-client", -] - [[package]] name = "sc-informant" version = "0.10.0-dev" @@ -7827,6 +7799,7 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-core", + "sp-finality-grandpa", "sp-keyring", "sp-runtime", "sp-test-primitives", @@ -8710,7 +8683,7 @@ dependencies = [ "futures 0.3.15", "httparse", "log", - "rand 0.8.3", + "rand 0.8.4", "sha-1 0.9.4", ] @@ -9550,7 +9523,7 @@ dependencies = [ "lazy_static", "nalgebra", "num-traits", - "rand 0.8.3", + "rand 0.8.4", ] [[package]] @@ -9966,7 +9939,7 @@ checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" dependencies = [ "cfg-if 1.0.0", "libc", - "rand 0.8.3", + "rand 0.8.4", "redox_syscall 0.2.5", "remove_dir_all", "winapi 0.3.9", @@ -10012,6 +9985,7 @@ dependencies = [ "sp-consensus-babe", "sp-core", "sp-externalities", + "sp-finality-grandpa", "sp-inherents", "sp-keyring", "sp-keystore", @@ -10638,7 +10612,7 @@ dependencies = [ "ipnet", "lazy_static", "log", - "rand 0.8.3", + "rand 0.8.4", "smallvec 1.6.1", "thiserror", "tinyvec", @@ -11287,7 +11261,7 @@ dependencies = [ "mach", "memoffset 0.6.1", "more-asserts", - "rand 0.8.3", + "rand 0.8.4", "region", "thiserror", "wasmtime-environ", @@ -11455,7 +11429,7 @@ dependencies = [ "log", "nohash-hasher", "parking_lot 0.11.1", - "rand 0.8.3", + "rand 0.8.4", "static_assertions", ] diff --git a/Cargo.toml b/Cargo.toml index 03115fe5593f..2834344153a8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -40,7 +40,6 @@ members = [ "client/executor/wasmi", "client/executor/wasmtime", "client/finality-grandpa", - "client/finality-grandpa-warp-sync", "client/informant", "client/keystore", "client/light", diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index dbdb3074d686..9eba1d0e9e05 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -162,6 +162,10 @@ pub fn new_full(mut config: Configuration) -> Result } config.network.extra_sets.push(sc_finality_grandpa::grandpa_peers_set_config()); + let warp_sync = Arc::new(sc_finality_grandpa::warp_proof::NetworkProvider::new( + backend.clone(), + grandpa_link.shared_authority_set().clone(), + )); let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { @@ -172,6 +176,7 @@ pub fn new_full(mut config: Configuration) -> Result import_queue, on_demand: None, block_announce_validator_builder: None, + warp_sync: Some(warp_sync), })?; if config.offchain_worker.enabled { @@ -380,6 +385,11 @@ pub fn new_light(mut config: Configuration) -> Result telemetry: telemetry.as_ref().map(|x| x.handle()), })?; + let warp_sync = Arc::new(sc_finality_grandpa::warp_proof::NetworkProvider::new( + backend.clone(), + grandpa_link.shared_authority_set().clone(), + )); + let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, @@ -389,6 +399,7 @@ pub fn new_light(mut config: Configuration) -> Result import_queue, on_demand: Some(on_demand.clone()), block_announce_validator_builder: None, + warp_sync: Some(warp_sync), })?; if config.offchain_worker.enabled { diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index 63da72102df3..908c5ea455cc 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -402,6 +402,10 @@ impl_runtime_apis! { Grandpa::grandpa_authorities() } + fn current_set_id() -> fg_primitives::SetId { + Grandpa::current_set_id() + } + fn submit_report_equivocation_unsigned_extrinsic( _equivocation_proof: fg_primitives::EquivocationProof< ::Hash, diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index 7c8c2d0e3d86..12a76cf323e4 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -77,7 +77,6 @@ sc-service = { version = "0.10.0-dev", default-features = false, path = "../../. sc-tracing = { version = "4.0.0-dev", path = "../../../client/tracing" } sc-telemetry = { version = "4.0.0-dev", path = "../../../client/telemetry" } sc-authority-discovery = { version = "0.10.0-dev", path = "../../../client/authority-discovery" } -sc-finality-grandpa-warp-sync = { version = "0.10.0-dev", path = "../../../client/finality-grandpa-warp-sync", optional = true } # frame dependencies pallet-indices = { version = "4.0.0-dev", path = "../../../frame/indices" } @@ -161,7 +160,6 @@ cli = [ "frame-benchmarking-cli", "substrate-frame-cli", "sc-service/db", - "sc-finality-grandpa-warp-sync", "structopt", "substrate-build-script-utils", "try-runtime-cli", diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index e7181d3caec3..301df01c55f8 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -228,16 +228,10 @@ pub fn new_full_base( let auth_disc_publish_non_global_ips = config.network.allow_non_globals_in_dht; config.network.extra_sets.push(grandpa::grandpa_peers_set_config()); - - #[cfg(feature = "cli")] - config.network.request_response_protocols.push( - sc_finality_grandpa_warp_sync::request_response_config_for_chain( - &config, - task_manager.spawn_handle(), - backend.clone(), - import_setup.1.shared_authority_set().clone(), - ), - ); + let warp_sync = Arc::new(grandpa::warp_proof::NetworkProvider::new( + backend.clone(), + import_setup.1.shared_authority_set().clone(), + )); let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { @@ -248,6 +242,7 @@ pub fn new_full_base( import_queue, on_demand: None, block_announce_validator_builder: None, + warp_sync: Some(warp_sync), })?; if config.offchain_worker.enabled { @@ -512,6 +507,11 @@ pub fn new_light_base( telemetry.as_ref().map(|x| x.handle()), )?; + let warp_sync = Arc::new(grandpa::warp_proof::NetworkProvider::new( + backend.clone(), + grandpa_link.shared_authority_set().clone(), + )); + let (network, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, @@ -521,6 +521,7 @@ pub fn new_light_base( import_queue, on_demand: Some(on_demand.clone()), block_announce_validator_builder: None, + warp_sync: Some(warp_sync), })?; let enable_grandpa = !config.disable_grandpa; diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 181f5fd42376..37b4b24fa6a2 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -1336,6 +1336,10 @@ impl_runtime_apis! { Grandpa::grandpa_authorities() } + fn current_set_id() -> fg_primitives::SetId { + Grandpa::current_set_id() + } + fn submit_report_equivocation_unsigned_extrinsic( equivocation_proof: fg_primitives::EquivocationProof< ::Hash, diff --git a/client/cli/src/arg_enums.rs b/client/cli/src/arg_enums.rs index 83b1c57e071a..72741d7bea2b 100644 --- a/client/cli/src/arg_enums.rs +++ b/client/cli/src/arg_enums.rs @@ -242,6 +242,8 @@ arg_enum! { Fast, // Download blocks without executing them. Download latest state without proofs. FastUnsafe, + // Prove finality and download the latest state. + Warp, } } @@ -253,6 +255,7 @@ impl Into for SyncMode { sc_network::config::SyncMode::Fast { skip_proofs: false, storage_chain_mode: false }, SyncMode::FastUnsafe => sc_network::config::SyncMode::Fast { skip_proofs: true, storage_chain_mode: false }, + SyncMode::Warp => sc_network::config::SyncMode::Warp, } } } diff --git a/client/consensus/aura/src/import_queue.rs b/client/consensus/aura/src/import_queue.rs index 96045fde43a9..a8b046270976 100644 --- a/client/consensus/aura/src/import_queue.rs +++ b/client/consensus/aura/src/import_queue.rs @@ -35,7 +35,7 @@ use sp_blockchain::{ well_known_cache_keys::{self, Id as CacheKeyId}, HeaderBackend, ProvideCache, }; -use sp_consensus::{BlockOrigin, CanAuthorWith, Error as ConsensusError}; +use sp_consensus::{CanAuthorWith, Error as ConsensusError}; use sp_consensus_aura::{ digests::CompatibleDigestItem, inherents::AuraInherentData, AuraApi, ConsensusLog, AURA_ENGINE_ID, @@ -46,7 +46,6 @@ use sp_inherents::{CreateInherentDataProviders, InherentDataProvider as _}; use sp_runtime::{ generic::{BlockId, OpaqueDigestItemId}, traits::{Block as BlockT, DigestItemFor, Header}, - Justifications, }; use std::{fmt::Debug, hash::Hash, marker::PhantomData, sync::Arc}; @@ -206,13 +205,10 @@ where { async fn verify( &mut self, - origin: BlockOrigin, - header: B::Header, - justifications: Option, - mut body: Option>, + mut block: BlockImportParams, ) -> Result<(BlockImportParams, Option)>>), String> { - let hash = header.hash(); - let parent_hash = *header.parent_hash(); + let hash = block.header.hash(); + let parent_hash = *block.header.parent_hash(); let authorities = authorities(self.client.as_ref(), &BlockId::Hash(parent_hash)) .map_err(|e| format!("Could not fetch authorities at {:?}: {:?}", parent_hash, e))?; @@ -234,7 +230,7 @@ where let checked_header = check_header::( &self.client, slot_now + 1, - header, + block.header, hash, &authorities[..], self.check_for_equivocation, @@ -245,8 +241,8 @@ where // if the body is passed through, we need to use the runtime // to check that the internally-set timestamp in the inherents // actually matches the slot set in the seal. - if let Some(inner_body) = body.take() { - let block = B::new(pre_header.clone(), inner_body); + if let Some(inner_body) = block.body.take() { + let new_block = B::new(pre_header.clone(), inner_body); inherent_data.aura_replace_inherent_data(slot); @@ -261,7 +257,7 @@ where .map_err(|e| format!("{:?}", e))? { self.check_inherents( - block.clone(), + new_block.clone(), BlockId::Hash(parent_hash), inherent_data, create_inherent_data_providers, @@ -270,8 +266,8 @@ where .map_err(|e| e.to_string())?; } - let (_, inner_body) = block.deconstruct(); - body = Some(inner_body); + let (_, inner_body) = new_block.deconstruct(); + block.body = Some(inner_body); } trace!(target: "aura", "Checked {:?}; importing.", pre_header); @@ -298,14 +294,12 @@ where _ => None, }); - let mut import_block = BlockImportParams::new(origin, pre_header); - import_block.post_digests.push(seal); - import_block.body = body; - import_block.justifications = justifications; - import_block.fork_choice = Some(ForkChoiceStrategy::LongestChain); - import_block.post_hash = Some(hash); + block.header = pre_header; + block.post_digests.push(seal); + block.fork_choice = Some(ForkChoiceStrategy::LongestChain); + block.post_hash = Some(hash); - Ok((import_block, maybe_keys)) + Ok((block, maybe_keys)) }, CheckedHeader::Deferred(a, b) => { debug!(target: "aura", "Checking {:?} failed; {:?}, {:?}.", hash, a, b); diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index b09cd6ad86b8..172bad669daa 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -118,7 +118,6 @@ use sp_keystore::{SyncCryptoStore, SyncCryptoStorePtr}; use sp_runtime::{ generic::{BlockId, OpaqueDigestItemId}, traits::{Block as BlockT, DigestItemFor, Header, Zero}, - Justifications, }; pub use sc_consensus_slots::SlotProportion; @@ -187,6 +186,19 @@ impl EpochT for Epoch { } } +impl From for Epoch { + fn from(epoch: sp_consensus_babe::Epoch) -> Self { + Epoch { + epoch_index: epoch.epoch_index, + start_slot: epoch.start_slot, + duration: epoch.duration, + authorities: epoch.authorities, + randomness: epoch.randomness, + config: epoch.config, + } + } +} + impl Epoch { /// Create the genesis epoch (epoch #0). This is defined to start at the slot of /// the first block, so that has to be provided. @@ -1128,24 +1140,29 @@ where { async fn verify( &mut self, - origin: BlockOrigin, - header: Block::Header, - justifications: Option, - mut body: Option>, + mut block: BlockImportParams, ) -> BlockVerificationResult { trace!( target: "babe", "Verifying origin: {:?} header: {:?} justification(s): {:?} body: {:?}", - origin, - header, - justifications, - body, + block.origin, + block.header, + block.justifications, + block.body, ); - let hash = header.hash(); - let parent_hash = *header.parent_hash(); + let hash = block.header.hash(); + let parent_hash = *block.header.parent_hash(); + + if block.with_state() { + // When importing whole state we don't calculate epoch descriptor, but rather + // read it from the state after import. We also skip all verifications + // because there's no parent state and we trust the sync module to verify + // that the state is correct and finalized. + return Ok((block, Default::default())) + } - debug!(target: "babe", "We have {:?} logs in this header", header.digest().logs().len()); + debug!(target: "babe", "We have {:?} logs in this header", block.header.digest().logs().len()); let create_inherent_data_providers = self .create_inherent_data_providers @@ -1160,7 +1177,7 @@ where .header_metadata(parent_hash) .map_err(Error::::FetchParentHeader)?; - let pre_digest = find_pre_digest::(&header)?; + let pre_digest = find_pre_digest::(&block.header)?; let (check_header, epoch_descriptor) = { let epoch_changes = self.epoch_changes.shared_data(); let epoch_descriptor = epoch_changes @@ -1179,7 +1196,7 @@ where // We add one to the current slot to allow for some small drift. // FIXME #1019 in the future, alter this queue to allow deferring of headers let v_params = verification::VerificationParams { - header: header.clone(), + header: block.header.clone(), pre_digest: Some(pre_digest), slot_now: slot_now + 1, epoch: viable_epoch.as_ref(), @@ -1203,9 +1220,9 @@ where .check_and_report_equivocation( slot_now, slot, - &header, + &block.header, &verified_info.author, - &origin, + &block.origin, ) .await { @@ -1215,23 +1232,23 @@ where // if the body is passed through, we need to use the runtime // to check that the internally-set timestamp in the inherents // actually matches the slot set in the seal. - if let Some(inner_body) = body.take() { + if let Some(inner_body) = block.body { let mut inherent_data = create_inherent_data_providers .create_inherent_data() .map_err(Error::::CreateInherents)?; inherent_data.babe_replace_inherent_data(slot); - let block = Block::new(pre_header.clone(), inner_body); + let new_block = Block::new(pre_header.clone(), inner_body); self.check_inherents( - block.clone(), + new_block.clone(), BlockId::Hash(parent_hash), inherent_data, create_inherent_data_providers, ) .await?; - let (_, inner_body) = block.deconstruct(); - body = Some(inner_body); + let (_, inner_body) = new_block.deconstruct(); + block.body = Some(inner_body); } trace!(target: "babe", "Checked {:?}; importing.", pre_header); @@ -1242,17 +1259,15 @@ where "pre_header" => ?pre_header, ); - let mut import_block = BlockImportParams::new(origin, pre_header); - import_block.post_digests.push(verified_info.seal); - import_block.body = body; - import_block.justifications = justifications; - import_block.intermediates.insert( + block.header = pre_header; + block.post_digests.push(verified_info.seal); + block.intermediates.insert( Cow::from(INTERMEDIATE_KEY), Box::new(BabeIntermediate:: { epoch_descriptor }) as Box<_>, ); - import_block.post_hash = Some(hash); + block.post_hash = Some(hash); - Ok((import_block, Default::default())) + Ok((block, Default::default())) }, CheckedHeader::Deferred(a, b) => { debug!(target: "babe", "Checking {:?} failed; {:?}, {:?}.", hash, a, b); @@ -1305,6 +1320,72 @@ impl BabeBlockImport { } } +impl BabeBlockImport +where + Block: BlockT, + Inner: BlockImport> + Send + Sync, + Inner::Error: Into, + Client: HeaderBackend + + HeaderMetadata + + AuxStore + + ProvideRuntimeApi + + ProvideCache + + Send + + Sync, + Client::Api: BabeApi + ApiExt, +{ + /// Import whole state after warp sync. + // This function makes multiple transactions to the DB. If one of them fails we may + // end up in an inconsistent state and have to resync. + async fn import_state( + &mut self, + mut block: BlockImportParams>, + new_cache: HashMap>, + ) -> Result { + let hash = block.post_hash(); + let parent_hash = *block.header.parent_hash(); + let number = *block.header.number(); + + block.fork_choice = Some(ForkChoiceStrategy::Custom(true)); + // Reset block weight. + aux_schema::write_block_weight(hash, 0, |values| { + block + .auxiliary + .extend(values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }); + + // First make the client import the state. + let import_result = self.inner.import_block(block, new_cache).await; + let aux = match import_result { + Ok(ImportResult::Imported(aux)) => aux, + Ok(r) => + return Err(ConsensusError::ClientImport(format!( + "Unexpected import result: {:?}", + r + ))), + Err(r) => return Err(r.into()), + }; + + // Read epoch info from the imported state. + let block_id = BlockId::hash(hash); + let current_epoch = self.client.runtime_api().current_epoch(&block_id).map_err(|e| { + ConsensusError::ClientImport(babe_err::(Error::RuntimeApi(e)).into()) + })?; + let next_epoch = self.client.runtime_api().next_epoch(&block_id).map_err(|e| { + ConsensusError::ClientImport(babe_err::(Error::RuntimeApi(e)).into()) + })?; + + let mut epoch_changes = self.epoch_changes.shared_data_locked(); + epoch_changes.reset(parent_hash, hash, number, current_epoch.into(), next_epoch.into()); + aux_schema::write_epoch_changes::(&*epoch_changes, |insert| { + self.client.insert_aux(insert, []) + }) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + + Ok(ImportResult::Imported(aux)) + } +} + #[async_trait::async_trait] impl BlockImport for BabeBlockImport where @@ -1336,7 +1417,7 @@ where match self.client.status(BlockId::Hash(hash)) { Ok(sp_blockchain::BlockStatus::InChain) => { // When re-importing existing block strip away intermediates. - let _ = block.take_intermediate::>(INTERMEDIATE_KEY)?; + let _ = block.take_intermediate::>(INTERMEDIATE_KEY); block.fork_choice = Some(ForkChoiceStrategy::Custom(false)); return self.inner.import_block(block, new_cache).await.map_err(Into::into) }, @@ -1344,6 +1425,10 @@ where Err(e) => return Err(ConsensusError::ClientImport(e.to_string())), } + if block.with_state() { + return self.import_state(block, new_cache).await + } + let pre_digest = find_pre_digest::(&block.header).expect( "valid babe headers must contain a predigest; header has been already verified; qed", ); diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index d21911a7fe50..4b4e0a9d0f3d 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -228,7 +228,6 @@ pub struct BabeTestNet { } type TestHeader = ::Header; -type TestExtrinsic = ::Extrinsic; type TestSelectChain = substrate_test_runtime_client::LongestChain; @@ -257,14 +256,11 @@ impl Verifier for TestVerifier { /// presented to the User in the logs. async fn verify( &mut self, - origin: BlockOrigin, - mut header: TestHeader, - justifications: Option, - body: Option>, + mut block: BlockImportParams, ) -> Result<(BlockImportParams, Option)>>), String> { // apply post-sealing mutations (i.e. stripping seal, if desired). - (self.mutator)(&mut header, Stage::PostSeal); - self.inner.verify(origin, header, justifications, body).await + (self.mutator)(&mut block.header, Stage::PostSeal); + self.inner.verify(block).await } } diff --git a/client/consensus/common/src/block_import.rs b/client/consensus/common/src/block_import.rs index 616378fc9b18..83fb11834dae 100644 --- a/client/consensus/common/src/block_import.rs +++ b/client/consensus/common/src/block_import.rs @@ -112,6 +112,8 @@ pub struct BlockCheckParams { pub parent_hash: Block::Hash, /// Allow importing the block skipping state verification if parent state is missing. pub allow_missing_state: bool, + /// Allow importing the block if parent block is missing. + pub allow_missing_parent: bool, /// Re-validate existing block. pub import_existing: bool, } @@ -306,6 +308,11 @@ impl BlockImportParams { .downcast_mut::() .ok_or(Error::InvalidIntermediate) } + + /// Check if this block contains state import action + pub fn with_state(&self) -> bool { + matches!(self.state_action, StateAction::ApplyChanges(StorageChanges::Import(_))) + } } /// Block import trait. diff --git a/client/consensus/common/src/import_queue.rs b/client/consensus/common/src/import_queue.rs index b1a24e5620d3..57d80cd41c64 100644 --- a/client/consensus/common/src/import_queue.rs +++ b/client/consensus/common/src/import_queue.rs @@ -26,7 +26,7 @@ //! instantiated. The `BasicQueue` and `BasicVerifier` traits allow serial //! queues to be instantiated simply. -use std::collections::HashMap; +use std::{collections::HashMap, iter::FromIterator}; use log::{debug, trace}; use sp_runtime::{ @@ -97,10 +97,7 @@ pub trait Verifier: Send + Sync { /// presented to the User in the logs. async fn verify( &mut self, - origin: BlockOrigin, - header: B::Header, - justifications: Option, - body: Option>, + block: BlockImportParams, ) -> Result<(BlockImportParams, Option)>>), String>; } @@ -222,7 +219,7 @@ pub(crate) async fn import_single_block_metered< trace!(target: "sync", "Header {} has {:?} logs", block.hash, header.digest().logs().len()); let number = header.number().clone(); - let hash = header.hash(); + let hash = block.hash; let parent_hash = header.parent_hash().clone(); let import_handler = |import| match import { @@ -260,6 +257,7 @@ pub(crate) async fn import_single_block_metered< parent_hash, allow_missing_state: block.allow_missing_state, import_existing: block.import_existing, + allow_missing_parent: block.state.is_some(), }) .await, )? { @@ -268,32 +266,14 @@ pub(crate) async fn import_single_block_metered< } let started = wasm_timer::Instant::now(); - let (mut import_block, maybe_keys) = verifier - .verify(block_origin, header, justifications, block.body) - .await - .map_err(|msg| { - if let Some(ref peer) = peer { - trace!(target: "sync", "Verifying {}({}) from {} failed: {}", number, hash, peer, msg); - } else { - trace!(target: "sync", "Verifying {}({}) failed: {}", number, hash, msg); - } - if let Some(metrics) = metrics.as_ref() { - metrics.report_verification(false, started.elapsed()); - } - BlockImportError::VerificationFailed(peer.clone(), msg) - })?; - - if let Some(metrics) = metrics.as_ref() { - metrics.report_verification(true, started.elapsed()); - } - let mut cache = HashMap::new(); - if let Some(keys) = maybe_keys { - cache.extend(keys.into_iter()); - } + let mut import_block = BlockImportParams::new(block_origin, header); + import_block.body = block.body; + import_block.justifications = justifications; + import_block.post_hash = Some(hash); import_block.import_existing = block.import_existing; import_block.indexed_body = block.indexed_body; - let mut import_block = import_block.clear_storage_changes_and_mutate(); + if let Some(state) = block.state { let changes = crate::block_import::StorageChanges::Import(state); import_block.state_action = StateAction::ApplyChanges(changes); @@ -303,6 +283,24 @@ pub(crate) async fn import_single_block_metered< import_block.state_action = StateAction::ExecuteIfPossible; } + let (import_block, maybe_keys) = verifier.verify(import_block).await.map_err(|msg| { + if let Some(ref peer) = peer { + trace!(target: "sync", "Verifying {}({}) from {} failed: {}", number, hash, peer, msg); + } else { + trace!(target: "sync", "Verifying {}({}) failed: {}", number, hash, msg); + } + if let Some(metrics) = metrics.as_ref() { + metrics.report_verification(false, started.elapsed()); + } + BlockImportError::VerificationFailed(peer.clone(), msg) + })?; + + if let Some(metrics) = metrics.as_ref() { + metrics.report_verification(true, started.elapsed()); + } + + let cache = HashMap::from_iter(maybe_keys.unwrap_or_default()); + let import_block = import_block.clear_storage_changes_and_mutate(); let imported = import_handle.import_block(import_block, cache).await; if let Some(metrics) = metrics.as_ref() { metrics.report_verification_and_import(started.elapsed()); diff --git a/client/consensus/common/src/import_queue/basic_queue.rs b/client/consensus/common/src/import_queue/basic_queue.rs index 2de5f578a7a6..dbf779c074f2 100644 --- a/client/consensus/common/src/import_queue/basic_queue.rs +++ b/client/consensus/common/src/import_queue/basic_queue.rs @@ -455,12 +455,9 @@ mod tests { impl Verifier for () { async fn verify( &mut self, - origin: BlockOrigin, - header: Header, - _justifications: Option, - _body: Option>, + block: BlockImportParams, ) -> Result<(BlockImportParams, Option)>>), String> { - Ok((BlockImportParams::new(origin, header), None)) + Ok((BlockImportParams::new(block.origin, block.header), None)) } } diff --git a/client/consensus/epochs/src/lib.rs b/client/consensus/epochs/src/lib.rs index e93724e5895f..52327dbbf60e 100644 --- a/client/consensus/epochs/src/lib.rs +++ b/client/consensus/epochs/src/lib.rs @@ -28,7 +28,7 @@ use sp_runtime::traits::{Block as BlockT, NumberFor, One, Zero}; use std::{ borrow::{Borrow, BorrowMut}, collections::BTreeMap, - ops::Add, + ops::{Add, Sub}, }; /// A builder for `is_descendent_of` functions. @@ -228,7 +228,7 @@ impl ViableEpochDescriptor { } /// Persisted epoch stored in EpochChanges. -#[derive(Clone, Encode, Decode, Debug)] +#[derive(Clone, Encode, Decode)] pub enum PersistedEpoch { /// Genesis persisted epoch data. epoch_0, epoch_1. Genesis(E, E), @@ -322,7 +322,7 @@ where impl EpochChanges where Hash: PartialEq + Ord + AsRef<[u8]> + AsMut<[u8]> + Copy, - Number: Ord + One + Zero + Add + Copy, + Number: Ord + One + Zero + Add + Sub + Copy, { /// Create a new epoch change. pub fn new() -> Self { @@ -614,6 +614,25 @@ where pub fn tree(&self) -> &ForkTree> { &self.inner } + + /// Reset to a specified pair of epochs, as if they were announced at blocks `parent_hash` and `hash`. + pub fn reset(&mut self, parent_hash: Hash, hash: Hash, number: Number, current: E, next: E) { + self.inner = ForkTree::new(); + self.epochs.clear(); + let persisted = PersistedEpoch::Regular(current); + let header = PersistedEpochHeader::from(&persisted); + let _res = self.inner.import(parent_hash, number - One::one(), header, &|_, _| { + Ok(false) as Result> + }); + self.epochs.insert((parent_hash, number - One::one()), persisted); + + let persisted = PersistedEpoch::Regular(next); + let header = PersistedEpochHeader::from(&persisted); + let _res = self.inner.import(hash, number, header, &|_, _| { + Ok(true) as Result> + }); + self.epochs.insert((hash, number), persisted); + } } /// Type alias to produce the epoch-changes tree from a block type. @@ -694,6 +713,7 @@ mod tests { #[test] fn genesis_epoch_is_created_but_not_imported() { + // // A - B // \ // — C @@ -735,6 +755,7 @@ mod tests { #[test] fn epoch_changes_between_blocks() { + // // A - B // \ // — C diff --git a/client/consensus/manual-seal/src/consensus/babe.rs b/client/consensus/manual-seal/src/consensus/babe.rs index 9edcb8fd13a1..d18170e9a0d6 100644 --- a/client/consensus/manual-seal/src/consensus/babe.rs +++ b/client/consensus/manual-seal/src/consensus/babe.rs @@ -39,7 +39,7 @@ use std::{ use sc_consensus::{BlockImportParams, ForkChoiceStrategy, Verifier}; use sp_api::{ProvideRuntimeApi, TransactionFor}; use sp_blockchain::{HeaderBackend, HeaderMetadata}; -use sp_consensus::{BlockOrigin, CacheKeyId}; +use sp_consensus::CacheKeyId; use sp_consensus_babe::{ digests::{NextEpochDescriptor, PreDigest, SecondaryPlainPreDigest}, inherents::BabeInherentData, @@ -50,7 +50,6 @@ use sp_inherents::{InherentData, InherentDataProvider, InherentIdentifier}; use sp_runtime::{ generic::{BlockId, Digest}, traits::{Block as BlockT, DigestFor, DigestItemFor, Header, Zero}, - Justifications, }; use sp_timestamp::{InherentType, TimestampInherentData, INHERENT_IDENTIFIER}; @@ -98,20 +97,14 @@ where { async fn verify( &mut self, - origin: BlockOrigin, - header: B::Header, - justifications: Option, - body: Option>, + mut import_params: BlockImportParams, ) -> Result<(BlockImportParams, Option)>>), String> { - let mut import_params = BlockImportParams::new(origin, header.clone()); - import_params.justifications = justifications; - import_params.body = body; import_params.finalized = false; import_params.fork_choice = Some(ForkChoiceStrategy::LongestChain); - let pre_digest = find_pre_digest::(&header)?; + let pre_digest = find_pre_digest::(&import_params.header)?; - let parent_hash = header.parent_hash(); + let parent_hash = import_params.header.parent_hash(); let parent = self .client .header(BlockId::Hash(*parent_hash)) diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index 7d4dfefe50c6..4f23bdcf6592 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -27,9 +27,9 @@ use sc_consensus::{ import_queue::{BasicQueue, BoxBlockImport, Verifier}, }; use sp_blockchain::HeaderBackend; -use sp_consensus::{BlockOrigin, CacheKeyId, Environment, Proposer, SelectChain}; +use sp_consensus::{CacheKeyId, Environment, Proposer, SelectChain}; use sp_inherents::CreateInherentDataProviders; -use sp_runtime::{traits::Block as BlockT, ConsensusEngineId, Justifications}; +use sp_runtime::{traits::Block as BlockT, ConsensusEngineId}; use std::{marker::PhantomData, sync::Arc}; mod error; @@ -59,18 +59,11 @@ struct ManualSealVerifier; impl Verifier for ManualSealVerifier { async fn verify( &mut self, - origin: BlockOrigin, - header: B::Header, - justifications: Option, - body: Option>, + mut block: BlockImportParams, ) -> Result<(BlockImportParams, Option)>>), String> { - let mut import_params = BlockImportParams::new(origin, header); - import_params.justifications = justifications; - import_params.body = body; - import_params.finalized = false; - import_params.fork_choice = Some(ForkChoiceStrategy::LongestChain); - - Ok((import_params, None)) + block.finalized = false; + block.fork_choice = Some(ForkChoiceStrategy::LongestChain); + Ok((block, None)) } } diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index 85a37e73535a..17bd02f6a565 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -58,15 +58,14 @@ use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder as BlockBuilderApi; use sp_blockchain::{well_known_cache_keys::Id as CacheKeyId, HeaderBackend, ProvideCache}; use sp_consensus::{ - BlockOrigin, CanAuthorWith, Environment, Error as ConsensusError, Proposer, SelectChain, - SyncOracle, + CanAuthorWith, Environment, Error as ConsensusError, Proposer, SelectChain, SyncOracle, }; use sp_consensus_pow::{Seal, TotalDifficulty, POW_ENGINE_ID}; use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; use sp_runtime::{ generic::{BlockId, Digest, DigestItem}, traits::{Block as BlockT, Header as HeaderT}, - Justifications, RuntimeString, + RuntimeString, }; use std::{ borrow::Cow, cmp::Ordering, collections::HashMap, marker::PhantomData, sync::Arc, @@ -461,26 +460,20 @@ where { async fn verify( &mut self, - origin: BlockOrigin, - header: B::Header, - justifications: Option, - body: Option>, + mut block: BlockImportParams, ) -> Result<(BlockImportParams, Option)>>), String> { - let hash = header.hash(); - let (checked_header, seal) = self.check_header(header)?; + let hash = block.header.hash(); + let (checked_header, seal) = self.check_header(block.header)?; let intermediate = PowIntermediate:: { difficulty: None }; - - let mut import_block = BlockImportParams::new(origin, checked_header); - import_block.post_digests.push(seal); - import_block.body = body; - import_block.justifications = justifications; - import_block + block.header = checked_header; + block.post_digests.push(seal); + block .intermediates .insert(Cow::from(INTERMEDIATE_KEY), Box::new(intermediate) as Box<_>); - import_block.post_hash = Some(hash); + block.post_hash = Some(hash); - Ok((import_block, None)) + Ok((block, None)) } } diff --git a/client/db/src/cache/list_cache.rs b/client/db/src/cache/list_cache.rs index 9499ae2a89f4..1808d431dd05 100644 --- a/client/db/src/cache/list_cache.rs +++ b/client/db/src/cache/list_cache.rs @@ -302,6 +302,7 @@ impl> ListCache let prev_operation = operations.operations.last(); debug_assert!( entry_type != EntryType::Final || + self.unfinalized.is_empty() || self.best_finalized_block.hash == parent.hash || match prev_operation { Some(&CommitOperation::BlockFinalized(ref best_finalized_block, _, _)) => diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs index a02e1cf7add9..c0649853160f 100644 --- a/client/db/src/changes_tries_storage.rs +++ b/client/db/src/changes_tries_storage.rs @@ -358,18 +358,23 @@ impl DbChangesTrieStorage { let next_config = match cache_tx { Some(cache_tx) if config_for_new_block && cache_tx.new_config.is_some() => { let config = cache_tx.new_config.clone().expect("guarded by is_some(); qed"); - ChangesTrieConfigurationRange { + Ok(ChangesTrieConfigurationRange { zero: (block_num, block_hash), end: None, config, - } + }) }, _ if config_for_new_block => self.configuration_at(&BlockId::Hash( *new_header .expect("config_for_new_block is only true when new_header is passed; qed") .parent_hash(), - ))?, - _ => self.configuration_at(&BlockId::Hash(next_digest_range_start_hash))?, + )), + _ => self.configuration_at(&BlockId::Hash(next_digest_range_start_hash)), + }; + let next_config = match next_config { + Ok(next_config) => next_config, + Err(ClientError::UnknownBlock(_)) => break, // No block means nothing to prune. + Err(e) => return Err(e), }; if let Some(config) = next_config.config { let mut oldest_digest_range = config diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 455ec1ef6b9d..dda469f4fd33 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -692,7 +692,10 @@ impl HeaderMetadata for BlockchainDb { header_metadata }) .ok_or_else(|| { - ClientError::UnknownBlock(format!("header not found in db: {}", hash)) + ClientError::UnknownBlock(format!( + "Header was not found in the database: {:?}", + hash + )) }) }, Ok, @@ -1210,8 +1213,11 @@ impl Backend { return Err(sp_blockchain::Error::SetHeadTooOld.into()) } - // cannot find tree route with empty DB. - if meta.best_hash != Default::default() { + let parent_exists = + self.blockchain.status(BlockId::Hash(route_to))? == sp_blockchain::BlockStatus::InChain; + + // Cannot find tree route with empty DB or when imported a detached block. + if meta.best_hash != Default::default() && parent_exists { let tree_route = sp_blockchain::tree_route(&self.blockchain, meta.best_hash, route_to)?; // uncanonicalize: check safety violations and ensure the numbers no longer @@ -1261,8 +1267,10 @@ impl Backend { ) -> ClientResult<()> { let last_finalized = last_finalized.unwrap_or_else(|| self.blockchain.meta.read().finalized_hash); - if *header.parent_hash() != last_finalized { - return Err(::sp_blockchain::Error::NonSequentialFinalization(format!( + if last_finalized != self.blockchain.meta.read().genesis_hash && + *header.parent_hash() != last_finalized + { + return Err(sp_blockchain::Error::NonSequentialFinalization(format!( "Last finalized {:?} not parent of {:?}", last_finalized, header.hash() @@ -1588,7 +1596,7 @@ impl Backend { columns::META, meta_keys::LEAF_PREFIX, ); - }; + } let mut children = children::read_children( &*self.storage.db, @@ -1598,14 +1606,14 @@ impl Backend { )?; if !children.contains(&hash) { children.push(hash); + children::write_children( + &mut transaction, + columns::META, + meta_keys::CHILDREN_PREFIX, + parent_hash, + children, + ); } - children::write_children( - &mut transaction, - columns::META, - meta_keys::CHILDREN_PREFIX, - parent_hash, - children, - ); } meta_updates.push(MetaUpdate { @@ -1615,7 +1623,6 @@ impl Backend { is_finalized: finalized, with_state: operation.commit_state, }); - Some((pending_block.header, number, hash, enacted, retracted, is_best, cache)) } else { None diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index 706538e80724..66432a7aa51c 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -22,7 +22,7 @@ futures = "0.3.9" futures-timer = "3.0.1" log = "0.4.8" parking_lot = "0.11.1" -rand = "0.7.2" +rand = "0.8.4" parity-scale-codec = { version = "2.0.0", features = ["derive"] } sp-application-crypto = { version = "4.0.0-dev", path = "../../primitives/application-crypto" } sp-arithmetic = { version = "4.0.0-dev", path = "../../primitives/arithmetic" } diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index 9cfd49eeb796..f27a530ed2f4 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -1087,7 +1087,7 @@ where // random between `[0, 2 * gossip_duration]` seconds. let delay: u64 = - thread_rng().gen_range(0, 2 * self.config.gossip_duration.as_millis() as u64); + thread_rng().gen_range(0..2 * self.config.gossip_duration.as_millis() as u64); Box::pin(Delay::new(Duration::from_millis(delay)).map(Ok)) } diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index 84e6fa9e1fba..a86421b4a0ef 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -19,7 +19,7 @@ use std::{collections::HashMap, marker::PhantomData, sync::Arc}; use log::debug; -use parity_scale_codec::Encode; +use parity_scale_codec::{Decode, Encode}; use sc_client_api::{backend::Backend, utils::is_descendent_of}; use sc_consensus::{ @@ -27,10 +27,11 @@ use sc_consensus::{ BlockCheckParams, BlockImport, BlockImportParams, ImportResult, JustificationImport, }; use sc_telemetry::TelemetryHandle; -use sp_api::TransactionFor; +use sp_api::{Core, RuntimeApiInfo, TransactionFor}; use sp_blockchain::{well_known_cache_keys, BlockStatus}; use sp_consensus::{BlockOrigin, Error as ConsensusError, SelectChain}; -use sp_finality_grandpa::{ConsensusLog, ScheduledChange, SetId, GRANDPA_ENGINE_ID}; +use sp_core::hashing::twox_128; +use sp_finality_grandpa::{ConsensusLog, GrandpaApi, ScheduledChange, SetId, GRANDPA_ENGINE_ID}; use sp_runtime::{ generic::{BlockId, OpaqueDigestItemId}, traits::{Block as BlockT, DigestFor, Header as HeaderT, NumberFor, Zero}, @@ -43,7 +44,7 @@ use crate::{ environment::finalize_block, justification::GrandpaJustification, notification::GrandpaJustificationSender, - ClientForGrandpa, CommandOrError, Error, NewAuthoritySet, VoterCommand, + AuthoritySetChanges, ClientForGrandpa, CommandOrError, Error, NewAuthoritySet, VoterCommand, }; /// A block-import handler for GRANDPA. @@ -230,6 +231,10 @@ where DigestFor: Encode, BE: Backend, Client: ClientForGrandpa, + Client::Api: GrandpaApi, + for<'a> &'a Client: + BlockImport>, + TransactionFor: 'static, { // check for a new authority set change. fn check_new_change( @@ -418,6 +423,91 @@ where Ok(PendingSetChanges { just_in_case, applied_changes, do_pause }) } + + /// Read current set id form a given state. + fn current_set_id(&self, id: &BlockId) -> Result { + let runtime_version = self.inner.runtime_api().version(id).map_err(|e| { + ConsensusError::ClientImport(format!( + "Unable to retrieve current runtime version. {}", + e + )) + })?; + if runtime_version + .api_version(&>::ID) + .map_or(false, |v| v < 3) + { + // The new API is not supported in this runtime. Try reading directly from storage. + // This code may be removed once warp sync to an old runtime is no longer needed. + for prefix in ["GrandpaFinality", "Grandpa"] { + let k = [twox_128(prefix.as_bytes()), twox_128(b"CurrentSetId")].concat(); + if let Ok(Some(id)) = + self.inner.storage(&id, &sc_client_api::StorageKey(k.to_vec())) + { + if let Ok(id) = SetId::decode(&mut id.0.as_ref()) { + return Ok(id) + } + } + } + Err(ConsensusError::ClientImport("Unable to retrieve current set id.".into())) + } else { + self.inner + .runtime_api() + .current_set_id(&id) + .map_err(|e| ConsensusError::ClientImport(e.to_string())) + } + } + + /// Import whole new state and reset authority set. + async fn import_state( + &mut self, + mut block: BlockImportParams>, + new_cache: HashMap>, + ) -> Result { + let hash = block.post_hash(); + let number = *block.header.number(); + // Force imported state finality. + block.finalized = true; + let import_result = (&*self.inner).import_block(block, new_cache).await; + match import_result { + Ok(ImportResult::Imported(aux)) => { + // We've just imported a new state. We trust the sync module has verified + // finality proofs and that the state is correct and final. + // So we can read the authority list and set id from the state. + self.authority_set_hard_forks.clear(); + let block_id = BlockId::hash(hash); + let authorities = self + .inner + .runtime_api() + .grandpa_authorities(&block_id) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + let set_id = self.current_set_id(&block_id)?; + let authority_set = AuthoritySet::new( + authorities.clone(), + set_id, + fork_tree::ForkTree::new(), + Vec::new(), + AuthoritySetChanges::empty(), + ) + .ok_or_else(|| ConsensusError::ClientImport("Invalid authority list".into()))?; + *self.authority_set.inner_locked() = authority_set.clone(); + + crate::aux_schema::update_authority_set::( + &authority_set, + None, + |insert| self.inner.insert_aux(insert, []), + ) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + let new_set = + NewAuthoritySet { canon_number: number, canon_hash: hash, set_id, authorities }; + let _ = self + .send_voter_commands + .unbounded_send(VoterCommand::ChangeAuthorities(new_set)); + Ok(ImportResult::Imported(aux)) + }, + Ok(r) => Ok(r), + Err(e) => Err(ConsensusError::ClientImport(e.to_string())), + } + } } #[async_trait::async_trait] @@ -427,6 +517,7 @@ where DigestFor: Encode, BE: Backend, Client: ClientForGrandpa, + Client::Api: GrandpaApi, for<'a> &'a Client: BlockImport>, TransactionFor: 'static, @@ -455,6 +546,10 @@ where Err(e) => return Err(ConsensusError::ClientImport(e.to_string())), } + if block.with_state() { + return self.import_state(block, new_cache).await + } + // on initial sync we will restrict logging under info to avoid spam. let initial_sync = block.origin == BlockOrigin::NetworkInitialSync; diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index 8f8ce25b60a5..2a10dfc0d50d 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -64,7 +64,7 @@ use prometheus_endpoint::{PrometheusError, Registry}; use sc_client_api::{ backend::{AuxStore, Backend}, BlockchainEvents, CallExecutor, ExecutionStrategy, ExecutorProvider, Finalizer, LockImportRun, - TransactionFor, + StorageProvider, TransactionFor, }; use sc_consensus::BlockImport; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO}; @@ -119,6 +119,7 @@ mod notification; mod observer; mod until_imported; mod voting_rule; +pub mod warp_proof; pub use authorities::{AuthoritySet, AuthoritySetChanges, SharedAuthoritySet}; pub use aux_schema::best_justification; @@ -335,6 +336,7 @@ pub trait ClientForGrandpa: + ProvideRuntimeApi + ExecutorProvider + BlockImport, Error = sp_consensus::Error> + + StorageProvider where BE: Backend, Block: BlockT, @@ -353,7 +355,8 @@ where + BlockchainEvents + ProvideRuntimeApi + ExecutorProvider - + BlockImport, Error = sp_consensus::Error>, + + BlockImport, Error = sp_consensus::Error> + + StorageProvider, { } diff --git a/client/finality-grandpa/src/observer.rs b/client/finality-grandpa/src/observer.rs index cbea6c138c90..dd120fdd1450 100644 --- a/client/finality-grandpa/src/observer.rs +++ b/client/finality-grandpa/src/observer.rs @@ -177,12 +177,11 @@ where { let LinkHalf { client, - select_chain: _, persistent_data, voter_commands_rx, justification_sender, - justification_stream: _, telemetry, + .. } = link; let network = NetworkBridge::new( diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index bf9faec70753..6b151f314b5c 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -203,6 +203,10 @@ sp_api::mock_impl_runtime_apis! { self.inner.genesis_authorities.clone() } + fn current_set_id(&self) -> SetId { + 0 + } + fn submit_report_equivocation_unsigned_extrinsic( _equivocation_proof: EquivocationProof, _key_owner_proof: OpaqueKeyOwnershipProof, diff --git a/client/finality-grandpa-warp-sync/src/proof.rs b/client/finality-grandpa/src/warp_proof.rs similarity index 68% rename from client/finality-grandpa-warp-sync/src/proof.rs rename to client/finality-grandpa/src/warp_proof.rs index d2484a800e63..86b57c78a43e 100644 --- a/client/finality-grandpa-warp-sync/src/proof.rs +++ b/client/finality-grandpa/src/warp_proof.rs @@ -14,12 +14,16 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use codec::{Decode, Encode}; +//! Utilities for generating and verifying GRANDPA warp sync proofs. -use sc_client_api::Backend as ClientBackend; -use sc_finality_grandpa::{ - find_scheduled_change, AuthoritySetChanges, BlockNumberOps, GrandpaJustification, +use sp_runtime::codec::{self, Decode, Encode}; + +use crate::{ + best_justification, find_scheduled_change, AuthoritySetChanges, BlockNumberOps, + GrandpaJustification, SharedAuthoritySet, }; +use sc_client_api::Backend as ClientBackend; +use sc_network::warp_request_handler::{EncodedProof, VerificationResult, WarpSyncProvider}; use sp_blockchain::{Backend as BlockchainBackend, HeaderBackend}; use sp_finality_grandpa::{AuthorityList, SetId, GRANDPA_ENGINE_ID}; use sp_runtime::{ @@ -27,13 +31,34 @@ use sp_runtime::{ traits::{Block as BlockT, Header as HeaderT, NumberFor, One}, }; -use crate::HandleRequestError; +use std::sync::Arc; + +/// Warp proof processing error. +#[derive(Debug, derive_more::Display, derive_more::From)] +pub enum Error { + /// Decoding error. + #[display(fmt = "Failed to decode block hash: {}.", _0)] + DecodeScale(codec::Error), + /// Client backend error. + Client(sp_blockchain::Error), + /// Invalid request data. + #[from(ignore)] + InvalidRequest(String), + /// Invalid warp proof. + #[from(ignore)] + InvalidProof(String), + /// Missing header or authority set change data. + #[display(fmt = "Missing required data to be able to answer request.")] + MissingData, +} + +impl std::error::Error for Error {} /// The maximum size in bytes of the `WarpSyncProof`. -pub(super) const MAX_WARP_SYNC_PROOF_SIZE: usize = 16 * 1024 * 1024; +pub(super) const MAX_WARP_SYNC_PROOF_SIZE: usize = 8 * 1024 * 1024; /// A proof of an authority set change. -#[derive(Decode, Encode)] +#[derive(Decode, Encode, Debug)] pub struct WarpSyncFragment { /// The last block that the given authority set finalized. This block should contain a digest /// signaling an authority set change from which we can fetch the next authority set. @@ -54,11 +79,11 @@ impl WarpSyncProof { /// Generates a warp sync proof starting at the given block. It will generate authority set /// change proofs for all changes that happened from `begin` until the current authority set /// (capped by MAX_WARP_SYNC_PROOF_SIZE). - pub fn generate( + fn generate( backend: &Backend, begin: Block::Hash, set_changes: &AuthoritySetChanges>, - ) -> Result, HandleRequestError> + ) -> Result, Error> where Backend: ClientBackend, { @@ -67,12 +92,10 @@ impl WarpSyncProof { let begin_number = blockchain .block_number_from_id(&BlockId::Hash(begin))? - .ok_or_else(|| HandleRequestError::InvalidRequest("Missing start block".to_string()))?; + .ok_or_else(|| Error::InvalidRequest("Missing start block".to_string()))?; if begin_number > blockchain.info().finalized_number { - return Err(HandleRequestError::InvalidRequest( - "Start block is not finalized".to_string(), - )) + return Err(Error::InvalidRequest("Start block is not finalized".to_string())) } let canon_hash = blockchain.hash(begin_number)?.expect( @@ -82,7 +105,7 @@ impl WarpSyncProof { ); if canon_hash != begin { - return Err(HandleRequestError::InvalidRequest( + return Err(Error::InvalidRequest( "Start block is not in the finalized chain".to_string(), )) } @@ -91,8 +114,7 @@ impl WarpSyncProof { let mut proofs_encoded_len = 0; let mut proof_limit_reached = false; - let set_changes = - set_changes.iter_from(begin_number).ok_or(HandleRequestError::MissingData)?; + let set_changes = set_changes.iter_from(begin_number).ok_or(Error::MissingData)?; for (_, last_block) in set_changes { let header = blockchain.header(BlockId::Number(*last_block))?.expect( @@ -137,19 +159,18 @@ impl WarpSyncProof { let is_finished = if proof_limit_reached { false } else { - let latest_justification = - sc_finality_grandpa::best_justification(backend)?.filter(|justification| { - // the existing best justification must be for a block higher than the - // last authority set change. if we didn't prove any authority set - // change then we fallback to make sure it's higher or equal to the - // initial warp sync block. - let limit = proofs - .last() - .map(|proof| proof.justification.target().0 + One::one()) - .unwrap_or(begin_number); - - justification.target().0 >= limit - }); + let latest_justification = best_justification(backend)?.filter(|justification| { + // the existing best justification must be for a block higher than the + // last authority set change. if we didn't prove any authority set + // change then we fallback to make sure it's higher or equal to the + // initial warp sync block. + let limit = proofs + .last() + .map(|proof| proof.justification.target().0 + One::one()) + .unwrap_or(begin_number); + + justification.target().0 >= limit + }); if let Some(latest_justification) = latest_justification { let header = blockchain.header(BlockId::Hash(latest_justification.target().1))? @@ -167,12 +188,13 @@ impl WarpSyncProof { } /// Verifies the warp sync proof starting at the given set id and with the given authorities. + /// Verification stops when either the proof is exhausted or finality for the target header can be proven. /// If the proof is valid the new set id and authorities is returned. - pub fn verify( + fn verify( &self, set_id: SetId, authorities: AuthorityList, - ) -> Result<(SetId, AuthorityList), HandleRequestError> + ) -> Result<(SetId, AuthorityList), Error> where NumberFor: BlockNumberOps, { @@ -183,37 +205,107 @@ impl WarpSyncProof { proof .justification .verify(current_set_id, ¤t_authorities) - .map_err(|err| HandleRequestError::InvalidProof(err.to_string()))?; + .map_err(|err| Error::InvalidProof(err.to_string()))?; if proof.justification.target().1 != proof.header.hash() { - return Err(HandleRequestError::InvalidProof( - "mismatch between header and justification".to_owned(), + return Err(Error::InvalidProof( + "Mismatch between header and justification".to_owned(), )) } if let Some(scheduled_change) = find_scheduled_change::(&proof.header) { current_authorities = scheduled_change.next_authorities; current_set_id += 1; - } else if fragment_num != self.proofs.len() - 1 { - // Only the last fragment of the proof is allowed to be missing the authority - // set change. - return Err(HandleRequestError::InvalidProof( + } else if fragment_num != self.proofs.len() - 1 || !self.is_finished { + // Only the last fragment of the last proof message is allowed to be missing + // the authority set change. + return Err(Error::InvalidProof( "Header is missing authority set change digest".to_string(), )) } } - Ok((current_set_id, current_authorities)) } } +/// Implements network API for warp sync. +pub struct NetworkProvider> +where + NumberFor: BlockNumberOps, +{ + backend: Arc, + authority_set: SharedAuthoritySet>, +} + +impl> NetworkProvider +where + NumberFor: BlockNumberOps, +{ + /// Create a new istance for a given backend and authority set. + pub fn new( + backend: Arc, + authority_set: SharedAuthoritySet>, + ) -> Self { + NetworkProvider { backend, authority_set } + } +} + +impl> WarpSyncProvider + for NetworkProvider +where + NumberFor: BlockNumberOps, +{ + fn generate( + &self, + start: Block::Hash, + ) -> Result> { + let proof = WarpSyncProof::::generate( + &*self.backend, + start, + &self.authority_set.authority_set_changes(), + ) + .map_err(Box::new)?; + Ok(EncodedProof(proof.encode())) + } + + fn verify( + &self, + proof: &EncodedProof, + set_id: SetId, + authorities: AuthorityList, + ) -> Result, Box> { + let EncodedProof(proof) = proof; + let proof = WarpSyncProof::::decode(&mut proof.as_slice()) + .map_err(|e| format!("Proof decoding error: {:?}", e))?; + let last_header = proof + .proofs + .last() + .map(|p| p.header.clone()) + .ok_or_else(|| "Empty proof".to_string())?; + let (next_set_id, next_authorities) = + proof.verify(set_id, authorities).map_err(Box::new)?; + if proof.is_finished { + Ok(VerificationResult::::Complete(next_set_id, next_authorities, last_header)) + } else { + Ok(VerificationResult::::Partial( + next_set_id, + next_authorities, + last_header.hash(), + )) + } + } + + fn current_authorities(&self) -> AuthorityList { + self.authority_set.inner().current_authorities.clone() + } +} + #[cfg(test)] mod tests { - use crate::WarpSyncProof; - use codec::Encode; + use super::{codec::Encode, WarpSyncProof}; + use crate::{AuthoritySetChanges, GrandpaJustification}; use rand::prelude::*; use sc_block_builder::BlockBuilderProvider; - use sc_finality_grandpa::{AuthoritySetChanges, GrandpaJustification}; use sp_blockchain::HeaderBackend; use sp_consensus::BlockOrigin; use sp_finality_grandpa::GRANDPA_ENGINE_ID; diff --git a/client/informant/src/display.rs b/client/informant/src/display.rs index 4e91c22f9efd..76e21215c245 100644 --- a/client/informant/src/display.rs +++ b/client/informant/src/display.rs @@ -40,6 +40,7 @@ use wasm_timer::Instant; /// /// Call `InformantDisplay::new` to initialize the state, then regularly call `display` with the /// information to display. +/// pub struct InformantDisplay { /// Head of chain block number from the last time `display` has been called. /// `None` if `display` has never been called. @@ -91,23 +92,36 @@ impl InformantDisplay { (diff_bytes_inbound, diff_bytes_outbound) }; - let (level, status, target) = - match (net_status.sync_state, net_status.best_seen_block, net_status.state_sync) { - (_, _, Some(state)) => ( - "⚙️ ", - "Downloading state".into(), - format!( - ", {}%, ({:.2}) Mib", - state.percentage, - (state.size as f32) / (1024f32 * 1024f32) - ), + let (level, status, target) = match ( + net_status.sync_state, + net_status.best_seen_block, + net_status.state_sync, + net_status.warp_sync, + ) { + (_, _, _, Some(warp)) => ( + "⏩", + "Warping".into(), + format!( + ", {}, ({:.2}) Mib", + warp.phase, + (warp.total_bytes as f32) / (1024f32 * 1024f32) + ), + ), + (_, _, Some(state), _) => ( + "⚙️ ", + "Downloading state".into(), + format!( + ", {}%, ({:.2}) Mib", + state.percentage, + (state.size as f32) / (1024f32 * 1024f32) ), - (SyncState::Idle, _, _) => ("💤", "Idle".into(), "".into()), - (SyncState::Downloading, None, _) => - ("⚙️ ", format!("Preparing{}", speed), "".into()), - (SyncState::Downloading, Some(n), None) => - ("⚙️ ", format!("Syncing{}", speed), format!(", target=#{}", n)), - }; + ), + (SyncState::Idle, _, _, _) => ("💤", "Idle".into(), "".into()), + (SyncState::Downloading, None, _, _) => + ("⚙️ ", format!("Preparing{}", speed), "".into()), + (SyncState::Downloading, Some(n), None, _) => + ("⚙️ ", format!("Syncing{}", speed), format!(", target=#{}", n)), + }; if self.format.enable_color { info!( diff --git a/client/informant/src/lib.rs b/client/informant/src/lib.rs index 6a91f583cd3d..c7c90a626a34 100644 --- a/client/informant/src/lib.rs +++ b/client/informant/src/lib.rs @@ -21,7 +21,7 @@ use ansi_term::Colour; use futures::prelude::*; use futures_timer::Delay; -use log::{info, trace, warn}; +use log::{debug, info, trace}; use parity_util_mem::MallocSizeOf; use sc_client_api::{BlockchainEvents, UsageProvider}; use sc_network::NetworkService; @@ -143,7 +143,7 @@ where ancestor.hash, ), Ok(_) => {}, - Err(e) => warn!("Error computing tree route: {}", e), + Err(e) => debug!("Error computing tree route: {}", e), } } } diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index 9c6b580fb9c6..a24b8fe5310a 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -57,6 +57,7 @@ sc-consensus = { version = "0.10.0-dev", path = "../consensus/common" } sp-core = { version = "4.0.0-dev", path = "../../primitives/core" } sp-runtime = { version = "4.0.0-dev", path = "../../primitives/runtime" } sp-utils = { version = "4.0.0-dev", path = "../../primitives/utils" } +sp-finality-grandpa = { version = "4.0.0-dev", path = "../../primitives/finality-grandpa" } thiserror = "1" unsigned-varint = { version = "0.6.0", features = ["futures", "asynchronous_codec"] } void = "1.0.2" diff --git a/client/network/README.md b/client/network/README.md index 914720f53e2a..c361bc9249f7 100644 --- a/client/network/README.md +++ b/client/network/README.md @@ -203,6 +203,69 @@ integer representing the role of the node: In the future, though, these restrictions will be removed. +# Sync + +The crate implements a number of syncing algorithms. The main purpose of the syncing algorithm is +get the chain to the latest state and keep it synced with the rest of the network by downloading and +importing new data as soon as it becomes available. Once the node starts it catches up with the network +with one of the initial sync methods listed below, and once it is completed uses a keep-up sync to +download new blocks. + +## Full and light sync + +This is the default syncing method for the initial and keep-up sync. The algorithm starts with the +current best block and downloads block data progressively from multiple peers if available. Once +there's a sequence of blocks ready to be imported they are fed to the import queue. Full nodes download +and execute full blocks, while light nodes only download and import headers. This continues until each peers +has no more new blocks to give. + +For each peer the sync maintains the number of our common best block with that peer. This number is updates +whenever peer announce new blocks or our best block advances. This allows to keep track of peers that have new +block data and request new information as soon as it is announced. In keep-up mode, we also track peers that +announce blocks on all branches and not just the best branch. The sync algorithm tries to be greedy and download +All data that's announced. + +## Fast sync + +In this mode the initial downloads and verifies full header history. This allows to validate +authority set transitions and arrive at a recent header. After header chain is verified and imported +the node starts downloading a state snapshot using the state request protocol. Each `StateRequest` +contains a starting storage key, which is empty for the first request. +`StateResponse` contains a storage proof for a sequence of keys and values in the storage +starting (but not including) from the key that is in the request. After iterating the proof trie against +the storage root that is in the target header, the node issues The next `StateRequest` with set starting +key set to the last key from the previous response. This continues until trie iteration reaches the end. +The state is then imported into the database and the keep-up sync starts in normal full/light sync mode. + +## Warp sync + +This is similar to fast sync, but instead of downloading and verifying full header chain, the algorithm +only downloads finalized authority set changes. + +### GRANDPA warp sync. + +GRANDPA keeps justifications for each finalized authority set change. Each change is signed by the +authorities from the previous set. By downloading and verifying these signed hand-offs starting from genesis, +we arrive at a recent header faster than downloading full header chain. Each `WarpSyncRequest` contains a block +hash to a to start collecting proofs from. `WarpSyncResponse` contains a sequence of block headers and +justifications. The proof downloader checks the justifications and continues requesting proofs from the last +header hash, until it arrives at some recent header. + +Once the finality chain is proved for a header, the state matching the header is downloaded much like during +the fast sync. The state is verified to match the header storage root. After the state is imported into the +database it is queried for the information that allows GRANDPA and BABE to continue operating from that state. +This includes BABE epoch information and GRANDPA authority set id. + +### Background block download. + +After the latest state has been imported the node is fully operational, but is still missing historic block +data. I.e. it is unable to serve bock bodies and headers other than the most recent one. To make sure all +nodes have block history available, a background sync process is started that downloads all the missing blocks. +It is run in parallel with the keep-up sync and does not interfere with downloading of the recent blocks. +During this download we also import GRANPA justifications for blocks with authority set changes, so that +The warp-synced node has all the data to serve for other nodes nodes that might want to sync from it with +any method. + # Usage Using the `sc-network` crate is done through the [`NetworkWorker`] struct. Create this diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 73d5ec357b2c..c181ee4e6339 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -26,6 +26,7 @@ use crate::{ }; use bytes::Bytes; +use codec::Encode; use futures::{channel::oneshot, stream::StreamExt}; use libp2p::{ core::{Multiaddr, PeerId, PublicKey}, @@ -87,6 +88,11 @@ pub struct Behaviour { /// [`request_responses::RequestResponsesBehaviour`]. #[behaviour(ignore)] state_request_protocol_name: String, + + /// Protocol name used to send out warp sync requests via + /// [`request_responses::RequestResponsesBehaviour`]. + #[behaviour(ignore)] + warp_sync_protocol_name: Option, } /// Event generated by `Behaviour`. @@ -195,6 +201,7 @@ impl Behaviour { disco_config: DiscoveryConfig, block_request_protocol_config: request_responses::ProtocolConfig, state_request_protocol_config: request_responses::ProtocolConfig, + warp_sync_protocol_config: Option, bitswap: Option>, light_client_request_protocol_config: request_responses::ProtocolConfig, // All remaining request protocol configs. @@ -203,9 +210,16 @@ impl Behaviour { // Extract protocol name and add to `request_response_protocols`. let block_request_protocol_name = block_request_protocol_config.name.to_string(); let state_request_protocol_name = state_request_protocol_config.name.to_string(); + let warp_sync_protocol_name = match warp_sync_protocol_config { + Some(config) => { + let name = config.name.to_string(); + request_response_protocols.push(config); + Some(name) + }, + None => None, + }; request_response_protocols.push(block_request_protocol_config); request_response_protocols.push(state_request_protocol_config); - request_response_protocols.push(light_client_request_protocol_config); Ok(Behaviour { @@ -220,6 +234,7 @@ impl Behaviour { events: VecDeque::new(), block_request_protocol_name, state_request_protocol_name, + warp_sync_protocol_name, }) } @@ -368,6 +383,24 @@ impl NetworkBehaviourEventProcess> for Behavi IfDisconnected::ImmediateError, ); }, + CustomMessageOutcome::WarpSyncRequest { target, request, pending_response } => + match &self.warp_sync_protocol_name { + Some(name) => self.request_responses.send_request( + &target, + name, + request.encode(), + pending_response, + IfDisconnected::ImmediateError, + ), + None => { + log::warn!( + target: "sync", + "Trying to send warp sync request when no protocol is configured {:?}", + request, + ); + return + }, + }, CustomMessageOutcome::NotificationStreamOpened { remote, protocol, diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 2581a08d4246..dd60f329128f 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -27,6 +27,7 @@ pub use crate::{ request_responses::{ IncomingRequest, OutgoingResponse, ProtocolConfig as RequestResponseConfig, }, + warp_request_handler::WarpSyncProvider, }; pub use libp2p::{build_multiaddr, core::PublicKey, identity, wasm_ext::ExtTransport}; @@ -137,6 +138,9 @@ pub struct Params { /// [`crate::state_request_handler::StateRequestHandler::new`] allowing /// both outgoing and incoming requests. pub state_request_protocol_config: RequestResponseConfig, + + /// Optional warp sync protocol support. Include protocol config and sync provider. + pub warp_sync: Option<(Arc>, RequestResponseConfig)>, } /// Role of the local node. @@ -268,6 +272,7 @@ impl fmt::Debug for ProtocolId { /// assert_eq!(peer_id, "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".parse::().unwrap()); /// assert_eq!(addr, "/ip4/198.51.100.19/tcp/30333".parse::().unwrap()); /// ``` +/// pub fn parse_str_addr(addr_str: &str) -> Result<(PeerId, Multiaddr), ParseErr> { let addr: Multiaddr = addr_str.parse()?; parse_addr(addr) @@ -391,6 +396,8 @@ pub enum SyncMode { /// Download indexed transactions for recent blocks. storage_chain_mode: bool, }, + /// Warp sync - verify authority set transitions and the latest state. + Warp, } impl Default for SyncMode { diff --git a/client/network/src/gossip/tests.rs b/client/network/src/gossip/tests.rs index f4f96b863d62..88c4160bc506 100644 --- a/client/network/src/gossip/tests.rs +++ b/client/network/src/gossip/tests.rs @@ -53,10 +53,7 @@ fn build_test_full_node(network_config: config::NetworkConfiguration) impl sc_consensus::Verifier for PassThroughVerifier { async fn verify( &mut self, - origin: sp_consensus::BlockOrigin, - header: B::Header, - justifications: Option, - body: Option>, + mut block: sp_consensus::BlockImportParams, ) -> Result< ( sc_consensus::BlockImportParams, @@ -64,7 +61,7 @@ fn build_test_full_node(network_config: config::NetworkConfiguration) ), String, > { - let maybe_keys = header + let maybe_keys = block.header .digest() .log(|l| { l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"aura")) @@ -79,12 +76,9 @@ fn build_test_full_node(network_config: config::NetworkConfiguration) )] }); - let mut import = sc_consensus::BlockImportParams::new(origin, header); - import.body = body; - import.finalized = self.0; - import.justifications = justifications; - import.fork_choice = Some(sc_consensus::ForkChoiceStrategy::LongestChain); - Ok((import, maybe_keys)) + block.finalized = self.0; + block.fork_choice = Some(sc_consensus::ForkChoiceStrategy::LongestChain); + Ok((block, maybe_keys)) } } @@ -144,6 +138,7 @@ fn build_test_full_node(network_config: config::NetworkConfiguration) block_request_protocol_config, state_request_protocol_config, light_client_request_protocol_config, + warp_sync: None, }) .unwrap(); diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index c812390ec6a6..633baaca47aa 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -243,6 +243,7 @@ //! - Calling `trigger_repropagate` when a transaction is added to the pool. //! //! More precise usage details are still being worked on and will likely change in the future. +//! mod behaviour; mod chain; @@ -264,12 +265,13 @@ pub mod light_client_requests; pub mod network_state; pub mod state_request_handler; pub mod transactions; +pub mod warp_request_handler; #[doc(inline)] pub use libp2p::{multiaddr, Multiaddr, PeerId}; pub use protocol::{ event::{DhtEvent, Event, ObservedRole}, - sync::{StateDownloadProgress, SyncState}, + sync::{StateDownloadProgress, SyncState, WarpSyncPhase, WarpSyncProgress}, PeerInfo, }; pub use service::{ @@ -326,4 +328,6 @@ pub struct NetworkStatus { pub total_bytes_outbound: u64, /// State sync in progress. pub state_sync: Option, + /// Warp sync in progress. + pub warp_sync: Option, } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 2af33cd1c5a1..a5675dbdc34d 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -18,11 +18,12 @@ use crate::{ chain::Client, - config::{self, ProtocolId}, + config::{self, ProtocolId, WarpSyncProvider}, error, request_responses::RequestFailure, schema::v1::StateResponse, utils::{interval, LruHashSet}, + warp_request_handler::EncodedProof, }; use bytes::Bytes; @@ -196,6 +197,7 @@ pub struct Protocol { enum PeerRequest { Block(message::BlockRequest), State, + WarpProof, } /// Peer information @@ -239,6 +241,7 @@ impl ProtocolConfig { config::SyncMode::Full => sync::SyncMode::Full, config::SyncMode::Fast { skip_proofs, storage_chain_mode } => sync::SyncMode::LightState { skip_proofs, storage_chain_mode }, + config::SyncMode::Warp => sync::SyncMode::Warp, } } } @@ -293,6 +296,7 @@ impl Protocol { notifications_protocols_handshakes: Vec>, block_announce_validator: Box + Send>, metrics_registry: Option<&Registry>, + warp_sync_provider: Option>>, ) -> error::Result<(Protocol, sc_peerset::PeersetHandle, Vec<(PeerId, Multiaddr)>)> { let info = chain.info(); let sync = ChainSync::new( @@ -300,6 +304,7 @@ impl Protocol { chain.clone(), block_announce_validator, config.max_parallel_downloads, + warp_sync_provider, ) .map_err(Box::new)?; @@ -724,6 +729,26 @@ impl Protocol { } } + /// Must be called in response to a [`CustomMessageOutcome::WarpSyncRequest`] being emitted. + /// Must contain the same `PeerId` and request that have been emitted. + pub fn on_warp_sync_response( + &mut self, + peer_id: PeerId, + response: crate::warp_request_handler::EncodedProof, + ) -> CustomMessageOutcome { + match self.sync.on_warp_sync_data(&peer_id, response) { + Ok(sync::OnWarpSyncData::WarpProofRequest(peer, req)) => + prepare_warp_sync_request::(&mut self.peers, peer, req), + Ok(sync::OnWarpSyncData::StateRequest(peer, req)) => + prepare_state_request::(&mut self.peers, peer, req), + Err(sync::BadPeer(id, repu)) => { + self.behaviour.disconnect_peer(&id, HARDCODED_PEERSETS_SYNC); + self.peerset_handle.report_peer(id, repu); + CustomMessageOutcome::None + }, + } + } + /// Perform time based maintenance. /// /// > **Note**: This method normally doesn't have to be called except for testing purposes. @@ -1248,6 +1273,19 @@ fn prepare_state_request( CustomMessageOutcome::StateRequest { target: who, request, pending_response: tx } } +fn prepare_warp_sync_request( + peers: &mut HashMap>, + who: PeerId, + request: crate::warp_request_handler::Request, +) -> CustomMessageOutcome { + let (tx, rx) = oneshot::channel(); + + if let Some(ref mut peer) = peers.get_mut(&who) { + peer.request = Some((PeerRequest::WarpProof, rx)); + } + CustomMessageOutcome::WarpSyncRequest { target: who, request, pending_response: tx } +} + /// Outcome of an incoming custom message. #[derive(Debug)] #[must_use] @@ -1291,6 +1329,12 @@ pub enum CustomMessageOutcome { request: crate::schema::v1::StateRequest, pending_response: oneshot::Sender, RequestFailure>>, }, + /// A new warp sync request must be emitted. + WarpSyncRequest { + target: PeerId, + request: crate::warp_request_handler::Request, + pending_response: oneshot::Sender, RequestFailure>>, + }, /// Peer has a reported a new head of chain. PeerNewBest(PeerId, NumberFor), /// Now connected to a new peer for syncing purposes. @@ -1364,6 +1408,7 @@ impl NetworkBehaviour for Protocol { // Check for finished outgoing requests. let mut finished_block_requests = Vec::new(); let mut finished_state_requests = Vec::new(); + let mut finished_warp_sync_requests = Vec::new(); for (id, peer) in self.peers.iter_mut() { if let Peer { request: Some((_, pending_response)), .. } = peer { match pending_response.poll_unpin(cx) { @@ -1412,6 +1457,9 @@ impl NetworkBehaviour for Protocol { finished_state_requests.push((id.clone(), protobuf_response)); }, + PeerRequest::WarpProof => { + finished_warp_sync_requests.push((id.clone(), resp)); + }, } }, Poll::Ready(Ok(Err(e))) => { @@ -1474,6 +1522,10 @@ impl NetworkBehaviour for Protocol { let ev = self.on_state_response(id, protobuf_response); self.pending_messages.push_back(ev); } + for (id, response) in finished_warp_sync_requests { + let ev = self.on_warp_sync_response(id, EncodedProof(response)); + self.pending_messages.push_back(ev); + } while let Poll::Ready(Some(())) = self.tick_timeout.poll_next_unpin(cx) { self.tick(); @@ -1491,6 +1543,10 @@ impl NetworkBehaviour for Protocol { let event = prepare_block_request(&mut self.peers, id, request); self.pending_messages.push_back(event); } + if let Some((id, request)) = self.sync.warp_sync_request() { + let event = prepare_warp_sync_request(&mut self.peers, id, request); + self.pending_messages.push_back(event); + } // Check if there is any block announcement validation finished. while let Poll::Ready(result) = self.sync.poll_block_announce_validation(cx) { diff --git a/client/network/src/protocol/sync.rs b/client/network/src/protocol/sync.rs index 8918d7adde09..e9bf14a623b6 100644 --- a/client/network/src/protocol/sync.rs +++ b/client/network/src/protocol/sync.rs @@ -27,6 +27,7 @@ //! The `ChainSync` struct maintains the state of the block requests. Whenever something happens on //! the network, or whenever a block has been successfully verified, call the appropriate method in //! order to update it. +//! use crate::{ protocol::message::{self, BlockAnnounce, BlockAttributes, BlockRequest, BlockResponse}, @@ -62,10 +63,12 @@ use std::{ pin::Pin, sync::Arc, }; +use warp::{WarpProofRequest, WarpSync, WarpSyncProvider}; mod blocks; mod extra_requests; mod state; +mod warp; /// Maximum blocks to request in a single packet. const MAX_BLOCKS_TO_REQUEST: usize = 128; @@ -101,6 +104,9 @@ const STATE_SYNC_FINALITY_THRESHOLD: u32 = 8; /// so far behind. const MAJOR_SYNC_BLOCKS: u8 = 5; +/// Number of peers that need to be connected before warp sync is started. +const MIN_PEERS_TO_START_WARP_SYNC: usize = 3; + mod rep { use sc_peerset::ReputationChange as Rep; /// Reputation change when a peer sent us a message that led to a @@ -217,6 +223,10 @@ pub struct ChainSync { block_announce_validation_per_peer_stats: HashMap, /// State sync in progress, if any. state_sync: Option>, + /// Warp sync in progress, if any. + warp_sync: Option>, + /// Warp sync provider. + warp_sync_provider: Option>>, /// Enable importing existing blocks. This is used used after the state download to /// catch up to the latest state while re-importing blocks. import_existing: bool, @@ -290,6 +300,8 @@ pub enum PeerSyncState { DownloadingJustification(B::Hash), /// Downloading state. DownloadingState, + /// Downloading warp proof. + DownloadingWarpProof, } impl PeerSyncState { @@ -316,6 +328,39 @@ pub struct StateDownloadProgress { pub size: u64, } +/// Reported warp sync phase. +#[derive(Clone, Eq, PartialEq, Debug)] +pub enum WarpSyncPhase { + /// Waiting for peers to connect. + AwaitingPeers, + /// Downloading and verifying grandpa warp proofs. + DownloadingWarpProofs, + /// Downloading state data. + DownloadingState, + /// Importing state. + ImportingState, +} + +impl fmt::Display for WarpSyncPhase { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + WarpSyncPhase::AwaitingPeers => write!(f, "Waiting for peers"), + WarpSyncPhase::DownloadingWarpProofs => write!(f, "Downloading finality proofs"), + WarpSyncPhase::DownloadingState => write!(f, "Downloading state"), + WarpSyncPhase::ImportingState => write!(f, "Importing state"), + } + } +} + +/// Reported warp sync progress. +#[derive(Clone, Eq, PartialEq, Debug)] +pub struct WarpSyncProgress { + /// Estimated download percentage. + pub phase: WarpSyncPhase, + /// Total bytes downloaded so far. + pub total_bytes: u64, +} + /// Syncing status and statistics. #[derive(Clone)] pub struct Status { @@ -329,6 +374,8 @@ pub struct Status { pub queued_blocks: u32, /// State sync status in progress, if any. pub state_sync: Option, + /// Warp sync in progress, if any. + pub warp_sync: Option, } /// A peer did not behave as expected and should be reported. @@ -373,6 +420,15 @@ pub enum OnStateData { Request(PeerId, StateRequest), } +/// Result of [`ChainSync::on_warp_sync_data`]. +#[derive(Debug)] +pub enum OnWarpSyncData { + /// Warp proof request is issued. + WarpProofRequest(PeerId, warp::WarpProofRequest), + /// A new state request needs to be made to the given peer. + StateRequest(PeerId, StateRequest), +} + /// Result of [`ChainSync::poll_block_announce_validation`]. #[derive(Debug, Clone, PartialEq, Eq)] pub enum PollBlockAnnounceValidation { @@ -460,6 +516,8 @@ pub enum SyncMode { Full, // Sync headers and the last finalied state LightState { storage_chain_mode: bool, skip_proofs: bool }, + // Warp sync mode. + Warp, } /// Result of [`ChainSync::has_slot_for_block_announce_validation`]. @@ -479,6 +537,7 @@ impl ChainSync { client: Arc>, block_announce_validator: Box + Send>, max_parallel_downloads: u32, + warp_sync_provider: Option>>, ) -> Result { let mut sync = ChainSync { client, @@ -497,6 +556,8 @@ impl ChainSync { block_announce_validation: Default::default(), block_announce_validation_per_peer_stats: Default::default(), state_sync: None, + warp_sync: None, + warp_sync_provider, import_existing: false, }; sync.reset_sync_start_point()?; @@ -508,7 +569,7 @@ impl ChainSync { SyncMode::Full => BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, SyncMode::Light => BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION, - SyncMode::LightState { storage_chain_mode: false, .. } => + SyncMode::LightState { storage_chain_mode: false, .. } | SyncMode::Warp => BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION | BlockAttributes::BODY, SyncMode::LightState { storage_chain_mode: true, .. } => BlockAttributes::HEADER | @@ -522,6 +583,7 @@ impl ChainSync { SyncMode::Full => false, SyncMode::Light => true, SyncMode::LightState { .. } => true, + SyncMode::Warp => true, } } @@ -550,12 +612,20 @@ impl ChainSync { SyncState::Idle }; + let warp_sync_progress = match (&self.warp_sync, &self.mode) { + (None, SyncMode::Warp) => + Some(WarpSyncProgress { phase: WarpSyncPhase::AwaitingPeers, total_bytes: 0 }), + (Some(sync), _) => Some(sync.progress()), + _ => None, + }; + Status { state: sync_state, best_seen_block: best_seen, num_peers: self.peers.len() as u32, queued_blocks: self.queue_blocks.len() as u32, state_sync: self.state_sync.as_ref().map(|s| s.progress()), + warp_sync: warp_sync_progress, } } @@ -620,6 +690,17 @@ impl ChainSync { return Ok(None) } + if let SyncMode::Warp = &self.mode { + if self.peers.len() >= MIN_PEERS_TO_START_WARP_SYNC && self.warp_sync.is_none() + { + log::debug!(target: "sync", "Starting warp state sync."); + if let Some(provider) = &self.warp_sync_provider { + self.warp_sync = + Some(WarpSync::new(self.client.clone(), provider.clone())); + } + } + } + // If we are at genesis, just start downloading. let (state, req) = if self.best_queued_number.is_zero() { debug!( @@ -792,7 +873,8 @@ impl ChainSync { /// Get an iterator over all block requests of all peers. pub fn block_requests(&mut self) -> impl Iterator)> + '_ { - if self.pending_requests.is_empty() || self.state_sync.is_some() { + if self.pending_requests.is_empty() || self.state_sync.is_some() || self.warp_sync.is_some() + { return Either::Left(std::iter::empty()) } if self.queue_blocks.len() > MAX_IMPORTING_BLOCKS { @@ -876,16 +958,16 @@ impl ChainSync { Either::Right(iter) } - /// Get a state request, if any + /// Get a state request, if any. pub fn state_request(&mut self) -> Option<(PeerId, StateRequest)> { + if self.peers.iter().any(|(_, peer)| peer.state == PeerSyncState::DownloadingState) { + // Only one pending state request is allowed. + return None + } if let Some(sync) = &self.state_sync { if sync.is_complete() { return None } - if self.peers.iter().any(|(_, peer)| peer.state == PeerSyncState::DownloadingState) { - // Only one pending state request is allowed. - return None - } for (id, peer) in self.peers.iter_mut() { if peer.state.is_available() && peer.common_number >= sync.target_block_num() { trace!(target: "sync", "New StateRequest for {}", id); @@ -895,6 +977,55 @@ impl ChainSync { } } } + if let Some(sync) = &self.warp_sync { + if sync.is_complete() { + return None + } + if let (Some(request), Some(target)) = + (sync.next_state_request(), sync.target_block_number()) + { + for (id, peer) in self.peers.iter_mut() { + if peer.state.is_available() && peer.best_number >= target { + trace!(target: "sync", "New StateRequest for {}", id); + peer.state = PeerSyncState::DownloadingState; + return Some((id.clone(), request)) + } + } + } + } + None + } + + /// Get a warp sync request, if any. + pub fn warp_sync_request(&mut self) -> Option<(PeerId, WarpProofRequest)> { + if self + .peers + .iter() + .any(|(_, peer)| peer.state == PeerSyncState::DownloadingWarpProof) + { + // Only one pending state request is allowed. + return None + } + if let Some(sync) = &self.warp_sync { + if sync.is_complete() { + return None + } + if let Some(request) = sync.next_warp_poof_request() { + let mut targets: Vec<_> = self.peers.values().map(|p| p.best_number).collect(); + if !targets.is_empty() { + targets.sort(); + let median = targets[targets.len() / 2]; + // Find a random peer that is synced as much as peer majority. + for (id, peer) in self.peers.iter_mut() { + if peer.state.is_available() && peer.best_number >= median { + trace!(target: "sync", "New WarpProofRequest for {}", id); + peer.state = PeerSyncState::DownloadingWarpProof; + return Some((id.clone(), request)) + } + } + } + } + } None } @@ -1055,7 +1186,8 @@ impl ChainSync { }, PeerSyncState::Available | PeerSyncState::DownloadingJustification(..) | - PeerSyncState::DownloadingState => Vec::new(), + PeerSyncState::DownloadingState | + PeerSyncState::DownloadingWarpProof => Vec::new(), } } else { // When request.is_none() this is a block announcement. Just accept blocks. @@ -1105,6 +1237,15 @@ impl ChainSync { response.proof.len(), ); sync.import(response) + } else if let Some(sync) = &mut self.warp_sync { + debug!( + target: "sync", + "Importing state data from {} with {} keys, {} proof nodes.", + who, + response.entries.len(), + response.proof.len(), + ); + sync.import_state(response) } else { debug!(target: "sync", "Ignored obsolete state response from {}", who); return Err(BadPeer(who.clone(), rep::NOT_REQUESTED)) @@ -1112,12 +1253,7 @@ impl ChainSync { match import_result { state::ImportResult::Import(hash, header, state) => { - let origin = if self.status().state != SyncState::Downloading { - BlockOrigin::NetworkBroadcast - } else { - BlockOrigin::NetworkInitialSync - }; - + let origin = BlockOrigin::NetworkInitialSync; let block = IncomingBlock { hash, header: Some(header), @@ -1142,6 +1278,39 @@ impl ChainSync { } } + /// Handle a response from the remote to a warp proof request that we made. + /// + /// Returns next request. + pub fn on_warp_sync_data( + &mut self, + who: &PeerId, + response: warp::EncodedProof, + ) -> Result, BadPeer> { + let import_result = if let Some(sync) = &mut self.warp_sync { + debug!( + target: "sync", + "Importing warp proof data from {}, {} bytes.", + who, + response.0.len(), + ); + sync.import_warp_proof(response) + } else { + debug!(target: "sync", "Ignored obsolete warp sync response from {}", who); + return Err(BadPeer(who.clone(), rep::NOT_REQUESTED)) + }; + + match import_result { + warp::WarpProofImportResult::StateRequest(request) => + Ok(OnWarpSyncData::StateRequest(who.clone(), request)), + warp::WarpProofImportResult::WarpProofRequest(request) => + Ok(OnWarpSyncData::WarpProofRequest(who.clone(), request)), + warp::WarpProofImportResult::BadResponse => { + debug!(target: "sync", "Bad proof data received from {}", who); + Err(BadPeer(who.clone(), rep::BAD_BLOCK)) + }, + } + } + fn validate_and_queue_blocks( &mut self, mut new_blocks: Vec>, @@ -1308,6 +1477,20 @@ impl ChainSync { self.mode = SyncMode::Full; output.extend(self.restart()); } + let warp_sync_complete = self + .warp_sync + .as_ref() + .map_or(false, |s| s.target_block_hash() == Some(hash)); + if warp_sync_complete { + info!( + target: "sync", + "Warp sync is complete ({} MiB), restarting block sync.", + self.warp_sync.as_ref().map_or(0, |s| s.progress().total_bytes / (1024 * 1024)), + ); + self.warp_sync = None; + self.mode = SyncMode::Full; + output.extend(self.restart()); + } }, Err(BlockImportError::IncompleteHeader(who)) => if let Some(peer) = who { @@ -1349,6 +1532,7 @@ impl ChainSync { e @ Err(BlockImportError::UnknownParent) | e @ Err(BlockImportError::Other(_)) => { warn!(target: "sync", "💔 Error importing block {:?}: {:?}", hash, e); self.state_sync = None; + self.warp_sync = None; output.extend(self.restart()); }, Err(BlockImportError::Cancelled) => {}, @@ -1828,6 +2012,13 @@ impl ChainSync { ); self.mode = SyncMode::Full; } + if matches!(self.mode, SyncMode::Warp) && info.finalized_state.is_some() { + log::warn!( + target: "sync", + "Can't use warp sync mode with a partially synced database. Reverting to full sync mode." + ); + self.mode = SyncMode::Full; + } self.import_existing = false; self.best_queued_hash = info.best_hash; self.best_queued_number = info.best_number; @@ -2253,7 +2444,8 @@ mod test { let peer_id = PeerId::random(); let mut sync = - ChainSync::new(SyncMode::Full, client.clone(), block_announce_validator, 1).unwrap(); + ChainSync::new(SyncMode::Full, client.clone(), block_announce_validator, 1, None) + .unwrap(); let (a1_hash, a1_number) = { let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; @@ -2307,6 +2499,7 @@ mod test { client.clone(), Box::new(DefaultBlockAnnounceValidator), 1, + None, ) .unwrap(); @@ -2470,6 +2663,7 @@ mod test { client.clone(), Box::new(DefaultBlockAnnounceValidator), 5, + None, ) .unwrap(); @@ -2584,6 +2778,7 @@ mod test { client.clone(), Box::new(DefaultBlockAnnounceValidator), 5, + None, ) .unwrap(); @@ -2707,6 +2902,7 @@ mod test { client.clone(), Box::new(DefaultBlockAnnounceValidator), 5, + None, ) .unwrap(); @@ -2814,6 +3010,7 @@ mod test { client.clone(), Box::new(DefaultBlockAnnounceValidator), 1, + None, ) .unwrap(); diff --git a/client/network/src/protocol/sync/warp.rs b/client/network/src/protocol/sync/warp.rs new file mode 100644 index 000000000000..fae0e2f5452a --- /dev/null +++ b/client/network/src/protocol/sync/warp.rs @@ -0,0 +1,181 @@ +// This file is part of Substrate. + +// Copyright (C) 2021 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +///! Warp sync support. +pub use super::state::ImportResult; +use super::state::StateSync; +pub use crate::warp_request_handler::{ + EncodedProof, Request as WarpProofRequest, VerificationResult, WarpSyncProvider, +}; +use crate::{ + chain::Client, + schema::v1::{StateRequest, StateResponse}, + WarpSyncPhase, WarpSyncProgress, +}; +use sp_finality_grandpa::{AuthorityList, SetId}; +use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; +use std::sync::Arc; + +enum Phase { + WarpProof { set_id: SetId, authorities: AuthorityList, last_hash: B::Hash }, + State(StateSync), +} + +/// Import warp proof result. +pub enum WarpProofImportResult { + /// Start downloading state data. + StateRequest(StateRequest), + /// Continue dowloading warp sync proofs. + WarpProofRequest(WarpProofRequest), + /// Bad proof. + BadResponse, +} + +/// Warp sync state machine. Accumulates warp proofs and state. +pub struct WarpSync { + phase: Phase, + client: Arc>, + warp_sync_provider: Arc>, + total_proof_bytes: u64, +} + +impl WarpSync { + /// Create a new instance. + pub fn new( + client: Arc>, + warp_sync_provider: Arc>, + ) -> Self { + let last_hash = client.hash(Zero::zero()).unwrap().expect("Genesis header always exists"); + let phase = Phase::WarpProof { + set_id: 0, + authorities: warp_sync_provider.current_authorities(), + last_hash, + }; + WarpSync { client, warp_sync_provider, phase, total_proof_bytes: 0 } + } + + /// Validate and import a state reponse. + pub fn import_state(&mut self, response: StateResponse) -> ImportResult { + match &mut self.phase { + Phase::WarpProof { .. } => { + log::debug!(target: "sync", "Unexpected state response"); + return ImportResult::BadResponse + }, + Phase::State(sync) => sync.import(response), + } + } + + /// Validate and import a warp proof reponse. + pub fn import_warp_proof(&mut self, response: EncodedProof) -> WarpProofImportResult { + match &mut self.phase { + Phase::State(_) => { + log::debug!(target: "sync", "Unexpected warp proof response"); + WarpProofImportResult::BadResponse + }, + Phase::WarpProof { set_id, authorities, last_hash } => { + match self.warp_sync_provider.verify( + &response, + *set_id, + std::mem::take(authorities), + ) { + Err(e) => { + log::debug!(target: "sync", "Bad warp proof response: {:?}", e); + return WarpProofImportResult::BadResponse + }, + Ok(VerificationResult::Partial(new_set_id, new_authorities, new_last_hash)) => { + log::debug!(target: "sync", "Verified partial proof, set_id={:?}", new_set_id); + *set_id = new_set_id; + *authorities = new_authorities; + *last_hash = new_last_hash.clone(); + self.total_proof_bytes += response.0.len() as u64; + WarpProofImportResult::WarpProofRequest(WarpProofRequest { + begin: new_last_hash, + }) + }, + Ok(VerificationResult::Complete(new_set_id, _, header)) => { + log::debug!(target: "sync", "Verified complete proof, set_id={:?}", new_set_id); + self.total_proof_bytes += response.0.len() as u64; + let state_sync = StateSync::new(self.client.clone(), header, false); + let request = state_sync.next_request(); + self.phase = Phase::State(state_sync); + WarpProofImportResult::StateRequest(request) + }, + } + }, + } + } + + /// Produce next state request. + pub fn next_state_request(&self) -> Option { + match &self.phase { + Phase::WarpProof { .. } => None, + Phase::State(sync) => Some(sync.next_request()), + } + } + + /// Produce next warp proof request. + pub fn next_warp_poof_request(&self) -> Option> { + match &self.phase { + Phase::State(_) => None, + Phase::WarpProof { last_hash, .. } => + Some(WarpProofRequest { begin: last_hash.clone() }), + } + } + + /// Return target block hash if it is known. + pub fn target_block_hash(&self) -> Option { + match &self.phase { + Phase::State(s) => Some(s.target()), + Phase::WarpProof { .. } => None, + } + } + + /// Return target block number if it is known. + pub fn target_block_number(&self) -> Option> { + match &self.phase { + Phase::State(s) => Some(s.target_block_num()), + Phase::WarpProof { .. } => None, + } + } + + /// Check if the state is complete. + pub fn is_complete(&self) -> bool { + match &self.phase { + Phase::WarpProof { .. } => false, + Phase::State(sync) => sync.is_complete(), + } + } + + /// Returns state sync estimated progress (percentage, bytes) + pub fn progress(&self) -> WarpSyncProgress { + match &self.phase { + Phase::WarpProof { .. } => WarpSyncProgress { + phase: WarpSyncPhase::DownloadingWarpProofs, + total_bytes: self.total_proof_bytes, + }, + Phase::State(sync) => WarpSyncProgress { + phase: if self.is_complete() { + WarpSyncPhase::ImportingState + } else { + WarpSyncPhase::DownloadingState + }, + total_bytes: self.total_proof_bytes + sync.progress().size, + }, + } + } +} diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 83cf2d675823..31d4488bc9aa 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -186,6 +186,12 @@ impl NetworkWorker { ); let default_notif_handshake_message = Roles::from(¶ms.role).encode(); + + let (warp_sync_provider, warp_sync_protocol_config) = match params.warp_sync { + Some((p, c)) => (Some(p), Some(c)), + None => (None, None), + }; + let (protocol, peerset_handle, mut known_addresses) = Protocol::new( protocol::ProtocolConfig { roles: From::from(¶ms.role), @@ -203,6 +209,7 @@ impl NetworkWorker { .collect(), params.block_announce_validator, params.metrics_registry.as_ref(), + warp_sync_provider, )?; // List of multiaddresses that we know in the network. @@ -346,6 +353,7 @@ impl NetworkWorker { discovery_config, params.block_request_protocol_config, params.state_request_protocol_config, + warp_sync_protocol_config, bitswap, params.light_client_request_protocol_config, params.network_config.request_response_protocols, @@ -461,6 +469,7 @@ impl NetworkWorker { total_bytes_inbound: self.total_bytes_inbound(), total_bytes_outbound: self.total_bytes_outbound(), state_sync: status.state_sync, + warp_sync: status.warp_sync, } } diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index a149b09a22dd..8cad044636c2 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -50,10 +50,7 @@ fn build_test_full_node( impl sc_consensus::Verifier for PassThroughVerifier { async fn verify( &mut self, - origin: sp_consensus::BlockOrigin, - header: B::Header, - justifications: Option, - body: Option>, + mut block: sc_consensus::BlockImportParams, ) -> Result< ( sc_consensus::BlockImportParams, @@ -61,7 +58,8 @@ fn build_test_full_node( ), String, > { - let maybe_keys = header + let maybe_keys = block + .header .digest() .log(|l| { l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"aura")) @@ -75,12 +73,9 @@ fn build_test_full_node( vec![(sp_blockchain::well_known_cache_keys::AUTHORITIES, blob.to_vec())] }); - let mut import = sc_consensus::BlockImportParams::new(origin, header); - import.body = body; - import.finalized = self.0; - import.justifications = justifications; - import.fork_choice = Some(sc_consensus::ForkChoiceStrategy::LongestChain); - Ok((import, maybe_keys)) + block.finalized = self.0; + block.fork_choice = Some(sc_consensus::ForkChoiceStrategy::LongestChain); + Ok((block, maybe_keys)) } } @@ -132,6 +127,7 @@ fn build_test_full_node( block_request_protocol_config, state_request_protocol_config, light_client_request_protocol_config, + warp_sync: None, }) .unwrap(); diff --git a/client/finality-grandpa-warp-sync/src/lib.rs b/client/network/src/warp_request_handler.rs similarity index 51% rename from client/finality-grandpa-warp-sync/src/lib.rs rename to client/network/src/warp_request_handler.rs index c74c4d15f9f4..beb9d1ce528a 100644 --- a/client/finality-grandpa-warp-sync/src/lib.rs +++ b/client/network/src/warp_request_handler.rs @@ -16,58 +16,61 @@ //! Helper for handling (i.e. answering) grandpa warp sync requests from a remote peer. +use crate::config::{IncomingRequest, OutgoingResponse, ProtocolId, RequestResponseConfig}; use codec::{Decode, Encode}; use futures::{ channel::{mpsc, oneshot}, stream::StreamExt, }; use log::debug; -use sc_client_api::Backend; -use sc_finality_grandpa::SharedAuthoritySet; -use sc_network::config::{IncomingRequest, OutgoingResponse, ProtocolId, RequestResponseConfig}; -use sc_service::{ - config::{Configuration, Role}, - SpawnTaskHandle, -}; -use sp_runtime::traits::{Block as BlockT, NumberFor}; +use sp_finality_grandpa::{AuthorityList, SetId}; +use sp_runtime::traits::Block as BlockT; use std::{sync::Arc, time::Duration}; -mod proof; - -pub use proof::{WarpSyncFragment, WarpSyncProof}; - -/// Generates the appropriate [`RequestResponseConfig`] for a given chain configuration. -pub fn request_response_config_for_chain + 'static>( - config: &Configuration, - spawn_handle: SpawnTaskHandle, - backend: Arc, - authority_set: SharedAuthoritySet>, -) -> RequestResponseConfig -where - NumberFor: sc_finality_grandpa::BlockNumberOps, -{ - let protocol_id = config.protocol_id(); - - if matches!(config.role, Role::Light) { - // Allow outgoing requests but deny incoming requests. - generate_request_response_config(protocol_id.clone()) - } else { - // Allow both outgoing and incoming requests. - let (handler, request_response_config) = - GrandpaWarpSyncRequestHandler::new(protocol_id.clone(), backend.clone(), authority_set); - spawn_handle.spawn("grandpa-warp-sync", handler.run()); - request_response_config - } +/// Scale-encoded warp sync proof response. +pub struct EncodedProof(pub Vec); + +/// Warp sync request +#[derive(Encode, Decode, Debug)] +pub struct Request { + /// Start collecting proofs from this block. + pub begin: B::Hash, +} + +const MAX_RESPONSE_SIZE: u64 = 16 * 1024 * 1024; + +/// Proof verification result. +pub enum VerificationResult { + /// Proof is valid, but the target was not reached. + Partial(SetId, AuthorityList, Block::Hash), + /// Target finality is proved. + Complete(SetId, AuthorityList, Block::Header), } -const LOG_TARGET: &str = "finality-grandpa-warp-sync-request-handler"; +/// Warp sync backend. Handles retrieveing and verifying warp sync proofs. +pub trait WarpSyncProvider: Send + Sync { + /// Generate proof starting at given block hash. The proof is accumulated until maximum proof size is reached. + fn generate( + &self, + start: B::Hash, + ) -> Result>; + /// Verify warp proof agains current set of authorities. + fn verify( + &self, + proof: &EncodedProof, + set_id: SetId, + authorities: AuthorityList, + ) -> Result, Box>; + /// Get current list of authorities. This is supposed to be genesis authorities when starting sync. + fn current_authorities(&self) -> AuthorityList; +} /// Generates a [`RequestResponseConfig`] for the grandpa warp sync request protocol, refusing incoming requests. pub fn generate_request_response_config(protocol_id: ProtocolId) -> RequestResponseConfig { RequestResponseConfig { name: generate_protocol_name(protocol_id).into(), max_request_size: 32, - max_response_size: proof::MAX_WARP_SYNC_PROOF_SIZE as u64, + max_response_size: MAX_RESPONSE_SIZE, request_timeout: Duration::from_secs(10), inbound_queue: None, } @@ -82,76 +85,59 @@ fn generate_protocol_name(protocol_id: ProtocolId) -> String { s } -#[derive(Decode)] -struct Request { - begin: B::Hash, -} - /// Handler for incoming grandpa warp sync requests from a remote peer. -pub struct GrandpaWarpSyncRequestHandler { - backend: Arc, - authority_set: SharedAuthoritySet>, +pub struct RequestHandler { + backend: Arc>, request_receiver: mpsc::Receiver, - _phantom: std::marker::PhantomData, } -impl> GrandpaWarpSyncRequestHandler { - /// Create a new [`GrandpaWarpSyncRequestHandler`]. +impl RequestHandler { + /// Create a new [`RequestHandler`]. pub fn new( protocol_id: ProtocolId, - backend: Arc, - authority_set: SharedAuthoritySet>, + backend: Arc>, ) -> (Self, RequestResponseConfig) { let (tx, request_receiver) = mpsc::channel(20); let mut request_response_config = generate_request_response_config(protocol_id); request_response_config.inbound_queue = Some(tx); - ( - Self { backend, request_receiver, _phantom: std::marker::PhantomData, authority_set }, - request_response_config, - ) + (Self { backend, request_receiver }, request_response_config) } fn handle_request( &self, payload: Vec, pending_response: oneshot::Sender, - ) -> Result<(), HandleRequestError> - where - NumberFor: sc_finality_grandpa::BlockNumberOps, - { + ) -> Result<(), HandleRequestError> { let request = Request::::decode(&mut &payload[..])?; - let proof = WarpSyncProof::generate( - &*self.backend, - request.begin, - &self.authority_set.authority_set_changes(), - )?; + let EncodedProof(proof) = self + .backend + .generate(request.begin) + .map_err(HandleRequestError::InvalidRequest)?; pending_response .send(OutgoingResponse { - result: Ok(proof.encode()), + result: Ok(proof), reputation_changes: Vec::new(), sent_feedback: None, }) .map_err(|_| HandleRequestError::SendResponse) } - /// Run [`GrandpaWarpSyncRequestHandler`]. - pub async fn run(mut self) - where - NumberFor: sc_finality_grandpa::BlockNumberOps, - { + /// Run [`RequestHandler`]. + pub async fn run(mut self) { while let Some(request) = self.request_receiver.next().await { let IncomingRequest { peer, payload, pending_response } = request; match self.handle_request(payload, pending_response) { Ok(()) => - debug!(target: LOG_TARGET, "Handled grandpa warp sync request from {}.", peer), + debug!(target: "sync", "Handled grandpa warp sync request from {}.", peer), Err(e) => debug!( - target: LOG_TARGET, - "Failed to handle grandpa warp sync request from {}: {}", peer, e, + target: "sync", + "Failed to handle grandpa warp sync request from {}: {}", + peer, e, ), } } @@ -159,7 +145,7 @@ impl> GrandpaWarpSyncRequestHandler), #[display(fmt = "Failed to send response.")] SendResponse, - #[display(fmt = "Missing required data to be able to answer request.")] - MissingData, } diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 553353d77ac3..7668aa8fd56e 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -108,25 +108,19 @@ impl PassThroughVerifier { impl Verifier for PassThroughVerifier { async fn verify( &mut self, - origin: BlockOrigin, - header: B::Header, - justifications: Option, - body: Option>, + mut block: BlockImportParams, ) -> Result<(BlockImportParams, Option)>>), String> { - let maybe_keys = header + let maybe_keys = block + .header .digest() .log(|l| { l.try_as_raw(OpaqueDigestItemId::Consensus(b"aura")) .or_else(|| l.try_as_raw(OpaqueDigestItemId::Consensus(b"babe"))) }) .map(|blob| vec![(well_known_cache_keys::AUTHORITIES, blob.to_vec())]); - let mut import = BlockImportParams::new(origin, header); - import.body = body; - import.finalized = self.finalized; - import.justifications = justifications; - import.fork_choice = Some(self.fork_choice.clone()); - - Ok((import, maybe_keys)) + block.finalized = self.finalized; + block.fork_choice = Some(self.fork_choice.clone()); + Ok((block, maybe_keys)) } } @@ -389,13 +383,10 @@ where block.header.parent_hash, ); let header = block.header.clone(); - let (import_block, cache) = futures::executor::block_on(self.verifier.verify( - origin, - header.clone(), - None, - if headers_only { None } else { Some(block.extrinsics) }, - )) - .unwrap(); + let mut import_block = BlockImportParams::new(origin, header.clone()); + import_block.body = if headers_only { None } else { Some(block.extrinsics) }; + let (import_block, cache) = + futures::executor::block_on(self.verifier.verify(import_block)).unwrap(); let cache = if let Some(cache) = cache { cache.into_iter().collect() } else { @@ -631,21 +622,13 @@ struct VerifierAdapter { impl Verifier for VerifierAdapter { async fn verify( &mut self, - origin: BlockOrigin, - header: B::Header, - justifications: Option, - body: Option>, + block: BlockImportParams, ) -> Result<(BlockImportParams, Option)>>), String> { - let hash = header.hash(); - self.verifier - .lock() - .await - .verify(origin, header, justifications, body) - .await - .map_err(|e| { - self.failed_verifications.lock().insert(hash, e.clone()); - e - }) + let hash = block.header.hash(); + self.verifier.lock().await.verify(block).await.map_err(|e| { + self.failed_verifications.lock().insert(hash, e.clone()); + e + }) } } @@ -850,6 +833,7 @@ where block_request_protocol_config, state_request_protocol_config, light_client_request_protocol_config, + warp_sync: None, }) .unwrap(); @@ -939,6 +923,7 @@ where block_request_protocol_config, state_request_protocol_config, light_client_request_protocol_config, + warp_sync: None, }) .unwrap(); diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 3990d6ea8ad3..6754a68296a6 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -343,7 +343,10 @@ fn should_query_storage() { Err(Error::InvalidBlockRange { from: format!("{:?}", genesis_hash), to: format!("{:?}", Some(random_hash1)), - details: format!("UnknownBlock: header not found in db: {}", random_hash1), + details: format!( + "UnknownBlock: Header was not found in the database: {:?}", + random_hash1 + ), }) .map_err(|e| e.to_string()) ); @@ -356,7 +359,10 @@ fn should_query_storage() { Err(Error::InvalidBlockRange { from: format!("{:?}", random_hash1), to: format!("{:?}", Some(genesis_hash)), - details: format!("UnknownBlock: header not found in db: {}", random_hash1), + details: format!( + "UnknownBlock: Header was not found in the database: {:?}", + random_hash1 + ), }) .map_err(|e| e.to_string()), ); @@ -369,7 +375,10 @@ fn should_query_storage() { Err(Error::InvalidBlockRange { from: format!("{:?}", random_hash1), to: format!("{:?}", Some(block2_hash)), // Best block hash. - details: format!("UnknownBlock: header not found in db: {}", random_hash1), + details: format!( + "UnknownBlock: Header was not found in the database: {:?}", + random_hash1 + ), }) .map_err(|e| e.to_string()), ); @@ -382,7 +391,10 @@ fn should_query_storage() { Err(Error::InvalidBlockRange { from: format!("{:?}", random_hash1), // First hash not found. to: format!("{:?}", Some(random_hash2)), - details: format!("UnknownBlock: header not found in db: {}", random_hash1), + details: format!( + "UnknownBlock: Header was not found in the database: {:?}", + random_hash1 + ), }) .map_err(|e| e.to_string()), ); diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 1f54850059fb..fb24a890133c 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -44,6 +44,7 @@ use sc_network::{ config::{OnDemand, Role, SyncMode}, light_client_requests::{self, handler::LightClientRequestHandler}, state_request_handler::{self, StateRequestHandler}, + warp_request_handler::{self, RequestHandler as WarpSyncRequestHandler, WarpSyncProvider}, NetworkService, }; use sc_telemetry::{telemetry, ConnectionMessage, Telemetry, TelemetryHandle, SUBSTRATE_INFO}; @@ -354,7 +355,7 @@ where wasm_runtime_overrides: config.wasm_runtime_overrides.clone(), no_genesis: matches!( config.network.sync_mode, - sc_network::config::SyncMode::Fast { .. } + sc_network::config::SyncMode::Fast { .. } | sc_network::config::SyncMode::Warp ), wasm_runtime_substitutes, }, @@ -843,6 +844,8 @@ pub struct BuildNetworkParams<'a, TBl: BlockT, TExPool, TImpQu, TCl> { /// A block announce validator builder. pub block_announce_validator_builder: Option) -> Box + Send> + Send>>, + /// An optional warp sync provider. + pub warp_sync: Option>>, } /// Build the network service, the network status sinks and an RPC sender. @@ -878,6 +881,7 @@ where import_queue, on_demand, block_announce_validator_builder, + warp_sync, } = params; let transaction_pool_adapter = Arc::new(TransactionPoolAdapter { @@ -928,6 +932,20 @@ where } }; + let warp_sync_params = warp_sync.map(|provider| { + let protocol_config = if matches!(config.role, Role::Light) { + // Allow outgoing requests but deny incoming requests. + warp_request_handler::generate_request_response_config(protocol_id.clone()) + } else { + // Allow both outgoing and incoming requests. + let (handler, protocol_config) = + WarpSyncRequestHandler::new(protocol_id.clone(), provider.clone()); + spawn_handle.spawn("warp_sync_request_handler", handler.run()); + protocol_config + }; + (provider, protocol_config) + }); + let light_client_request_protocol_config = { if matches!(config.role, Role::Light) { // Allow outgoing requests but deny incoming requests. @@ -965,6 +983,7 @@ where metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()), block_request_protocol_config, state_request_protocol_config, + warp_sync: warp_sync_params, light_client_request_protocol_config, }; diff --git a/client/service/src/client/client.rs b/client/service/src/client/client.rs index 553584b15c02..01688f0c8e70 100644 --- a/client/service/src/client/client.rs +++ b/client/service/src/client/client.rs @@ -769,6 +769,8 @@ where { let parent_hash = import_headers.post().parent_hash().clone(); let status = self.backend.blockchain().status(BlockId::Hash(hash))?; + let parent_exists = self.backend.blockchain().status(BlockId::Hash(parent_hash))? == + blockchain::BlockStatus::InChain; match (import_existing, status) { (false, blockchain::BlockStatus::InChain) => return Ok(ImportResult::AlreadyInChain), (false, blockchain::BlockStatus::Unknown) => {}, @@ -815,7 +817,6 @@ where if let Some(changes_trie_transaction) = changes_trie_tx { operation.op.update_changes_trie(changes_trie_transaction)?; } - Some((main_sc, child_sc)) }, sc_consensus::StorageChanges::Import(changes) => { @@ -834,10 +835,10 @@ where None }, }; - - // ensure parent block is finalized to maintain invariant that - // finality is called sequentially. - if finalized { + // Ensure parent chain is finalized to maintain invariant that + // finality is called sequentially. This will also send finality + // notifications for top 250 newly finalized blocks. + if finalized && parent_exists { self.apply_finality_with_block_hash( operation, parent_hash, @@ -868,7 +869,7 @@ where NewBlockState::Normal }; - let tree_route = if is_new_best && info.best_hash != parent_hash { + let tree_route = if is_new_best && info.best_hash != parent_hash && parent_exists { let route_from_best = sp_blockchain::tree_route(self.backend.blockchain(), info.best_hash, parent_hash)?; Some(route_from_best) @@ -932,21 +933,21 @@ where let at = BlockId::Hash(*parent_hash); let state_action = std::mem::replace(&mut import_block.state_action, StateAction::Skip); let (enact_state, storage_changes) = match (self.block_status(&at)?, state_action) { - (BlockStatus::Unknown, _) => - return Ok(PrepareStorageChangesResult::Discard(ImportResult::UnknownParent)), (BlockStatus::KnownBad, _) => return Ok(PrepareStorageChangesResult::Discard(ImportResult::KnownBad)), - (_, StateAction::Skip) => (false, None), ( BlockStatus::InChainPruned, StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(_)), ) => return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)), + (_, StateAction::ApplyChanges(changes)) => (true, Some(changes)), + (BlockStatus::Unknown, _) => + return Ok(PrepareStorageChangesResult::Discard(ImportResult::UnknownParent)), + (_, StateAction::Skip) => (false, None), (BlockStatus::InChainPruned, StateAction::Execute) => return Ok(PrepareStorageChangesResult::Discard(ImportResult::MissingState)), (BlockStatus::InChainPruned, StateAction::ExecuteIfPossible) => (false, None), (_, StateAction::Execute) => (true, None), (_, StateAction::ExecuteIfPossible) => (true, None), - (_, StateAction::ApplyChanges(changes)) => (true, Some(changes)), }; let storage_changes = match (enact_state, storage_changes, &import_block.body) { @@ -1912,8 +1913,14 @@ where &mut self, block: BlockCheckParams, ) -> Result { - let BlockCheckParams { hash, number, parent_hash, allow_missing_state, import_existing } = - block; + let BlockCheckParams { + hash, + number, + parent_hash, + allow_missing_state, + import_existing, + allow_missing_parent, + } = block; // Check the block against white and black lists if any are defined // (i.e. fork blocks and bad blocks respectively) @@ -1955,6 +1962,7 @@ where .map_err(|e| ConsensusError::ClientImport(e.to_string()))? { BlockStatus::InChainWithState | BlockStatus::Queued => {}, + BlockStatus::Unknown if allow_missing_parent => {}, BlockStatus::Unknown => return Ok(ImportResult::UnknownParent), BlockStatus::InChainPruned if allow_missing_state => {}, BlockStatus::InChainPruned => return Ok(ImportResult::MissingState), diff --git a/client/service/test/src/client/mod.rs b/client/service/test/src/client/mod.rs index dd0a33b7e858..6ac149677bc1 100644 --- a/client/service/test/src/client/mod.rs +++ b/client/service/test/src/client/mod.rs @@ -1209,6 +1209,7 @@ fn import_with_justification() { .unwrap() .block; block_on(client.import(BlockOrigin::Own, a2.clone())).unwrap(); + client.finalize_block(BlockId::hash(a2.hash()), None).unwrap(); // A2 -> A3 let justification = Justifications::from((TEST_ENGINE_ID, vec![1, 2, 3])); @@ -1555,6 +1556,7 @@ fn respects_block_rules() { number: 0, parent_hash: block_ok.header().parent_hash().clone(), allow_missing_state: false, + allow_missing_parent: false, import_existing: false, }; assert_eq!(block_on(client.check_block(params)).unwrap(), ImportResult::imported(false)); @@ -1570,6 +1572,7 @@ fn respects_block_rules() { number: 0, parent_hash: block_not_ok.header().parent_hash().clone(), allow_missing_state: false, + allow_missing_parent: false, import_existing: false, }; if record_only { @@ -1592,6 +1595,7 @@ fn respects_block_rules() { number: 1, parent_hash: block_ok.header().parent_hash().clone(), allow_missing_state: false, + allow_missing_parent: false, import_existing: false, }; if record_only { @@ -1610,6 +1614,7 @@ fn respects_block_rules() { number: 1, parent_hash: block_not_ok.header().parent_hash().clone(), allow_missing_state: false, + allow_missing_parent: false, import_existing: false, }; @@ -1676,6 +1681,7 @@ fn returns_status_for_pruned_blocks() { number: 0, parent_hash: a1.header().parent_hash().clone(), allow_missing_state: false, + allow_missing_parent: false, import_existing: false, }; @@ -1712,6 +1718,7 @@ fn returns_status_for_pruned_blocks() { number: 1, parent_hash: a1.header().parent_hash().clone(), allow_missing_state: false, + allow_missing_parent: false, import_existing: false, }; @@ -1745,6 +1752,7 @@ fn returns_status_for_pruned_blocks() { number: 2, parent_hash: a2.header().parent_hash().clone(), allow_missing_state: false, + allow_missing_parent: false, import_existing: false, }; @@ -1779,6 +1787,7 @@ fn returns_status_for_pruned_blocks() { number: 0, parent_hash: b1.header().parent_hash().clone(), allow_missing_state: false, + allow_missing_parent: false, import_existing: false, }; assert_eq!( diff --git a/primitives/finality-grandpa/src/lib.rs b/primitives/finality-grandpa/src/lib.rs index a083796d659c..353a3cd07822 100644 --- a/primitives/finality-grandpa/src/lib.rs +++ b/primitives/finality-grandpa/src/lib.rs @@ -492,7 +492,7 @@ sp_api::decl_runtime_apis! { /// applied in the runtime after those N blocks have passed. /// /// The consensus protocol will coordinate the handoff externally. - #[api_version(2)] + #[api_version(3)] pub trait GrandpaApi { /// Get the current GRANDPA authorities and weights. This should not change except /// for when changes are scheduled and the corresponding delay has passed. @@ -530,5 +530,8 @@ sp_api::decl_runtime_apis! { set_id: SetId, authority_id: AuthorityId, ) -> Option; + + /// Get current GRANDPA authority set id. + fn current_set_id() -> SetId; } } diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index bdf45ceae88b..a148ce5cb75a 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -883,6 +883,10 @@ cfg_if! { Vec::new() } + fn current_set_id() -> sp_finality_grandpa::SetId { + 0 + } + fn submit_report_equivocation_unsigned_extrinsic( _equivocation_proof: sp_finality_grandpa::EquivocationProof< ::Hash, diff --git a/test-utils/test-runner/Cargo.toml b/test-utils/test-runner/Cargo.toml index 0eb02d941712..06454ee24eae 100644 --- a/test-utils/test-runner/Cargo.toml +++ b/test-utils/test-runner/Cargo.toml @@ -16,6 +16,7 @@ sc-basic-authorship = { path = "../../client/basic-authorship" } sc-rpc = { path = "../../client/rpc" } sc-transaction-pool = { path = "../../client/transaction-pool" } grandpa = { package = "sc-finality-grandpa", path = "../../client/finality-grandpa" } +sp-finality-grandpa = { path = "../../primitives/finality-grandpa" } sp-consensus-babe = { path = "../../primitives/consensus/babe" } sc-consensus-babe = { path = "../../client/consensus/babe" } sc-consensus = { path = "../../client/consensus/common" } diff --git a/test-utils/test-runner/src/client.rs b/test-utils/test-runner/src/client.rs index 71a156b8bc0d..d130993bff4c 100644 --- a/test-utils/test-runner/src/client.rs +++ b/test-utils/test-runner/src/client.rs @@ -35,6 +35,7 @@ use sc_transaction_pool_api::TransactionPool; use sp_api::{ApiExt, ConstructRuntimeApi, Core, Metadata}; use sp_block_builder::BlockBuilder; use sp_consensus_babe::BabeApi; +use sp_finality_grandpa::GrandpaApi; use sp_keyring::sr25519::Keyring::Alice; use sp_offchain::OffchainWorkerApi; use sp_runtime::traits::{Block as BlockT, Header}; @@ -90,7 +91,8 @@ where + TaggedTransactionQueue + BlockBuilder + BabeApi - + ApiExt as Backend>::State>, + + ApiExt as Backend>::State> + + GrandpaApi, ::Call: From>, <::Block as BlockT>::Hash: FromStr, <<::Block as BlockT>::Header as Header>::Number: @@ -151,6 +153,7 @@ where import_queue, on_demand: None, block_announce_validator_builder: None, + warp_sync: None, }; build_network(params)? }; From 6468ec8a4117ad826d235450eeb77c624dd05945 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20K=C3=B6cher?= Date: Mon, 2 Aug 2021 11:17:30 +0200 Subject: [PATCH 7/8] Add rustfmt skip to default frame benchmarking template (#9473) This was missed in the introduction pr of rustfmt. There we only had updated the Substrate local template. --- utils/frame/benchmarking-cli/src/template.hbs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/frame/benchmarking-cli/src/template.hbs b/utils/frame/benchmarking-cli/src/template.hbs index 2fcc50f82377..4acb8c7baa23 100644 --- a/utils/frame/benchmarking-cli/src/template.hbs +++ b/utils/frame/benchmarking-cli/src/template.hbs @@ -1,5 +1,5 @@ {{header}} -//! Autogenerated weights for {{pallet}} +//! Autogenerated weights for `{{pallet}}` //! //! THIS FILE WAS AUTO-GENERATED USING THE SUBSTRATE BENCHMARK CLI VERSION {{version}} //! DATE: {{date}}, STEPS: `{{cmd.steps}}`, REPEAT: {{cmd.repeat}}, LOW RANGE: `{{cmd.lowest_range_values}}`, HIGH RANGE: `{{cmd.highest_range_values}}` @@ -10,6 +10,7 @@ // {{arg}} {{/each}} +#![cfg_attr(rustfmt, rustfmt_skip)] #![allow(unused_parens)] #![allow(unused_imports)] From 56fd18a82c95abff2184588c88630561696764eb Mon Sep 17 00:00:00 2001 From: Denis Pisarev Date: Mon, 2 Aug 2021 16:58:47 +0200 Subject: [PATCH 8/8] CI: stop publishing to crates.io until unleash is fixed (#9474) * CI: stop publishing to crates.io until unleash is fixed; allow restarting k8s runners * CI: fix CI if ci-release tag is pushed --- .gitlab-ci.yml | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f954ac23cba2..9a9f725780da 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -55,9 +55,15 @@ default: - artifacts/ .kubernetes-env: &kubernetes-env + retry: + max: 2 + when: + - runner_system_failure + - unknown_failure + - api_failure + interruptible: true tags: - kubernetes-parity-build - interruptible: true .rust-info-script: &rust-info-script - rustup show @@ -97,6 +103,7 @@ default: - if: $CI_COMMIT_REF_NAME == "master" - if: $CI_COMMIT_REF_NAME =~ /^[0-9]+$/ # PRs - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 + - if: $CI_COMMIT_REF_NAME =~ /^ci-release-.*$/ .test-refs-no-trigger-prs-only: &test-refs-no-trigger-prs-only rules: @@ -343,6 +350,7 @@ unleash-check: - mkdir -p target/unleash - export CARGO_TARGET_DIR=target/unleash - cargo unleash check ${CARGO_UNLEASH_PKG_DEF} + # FIXME: this job must not fail, or unleash-to-crates-io will publish broken stuff allow_failure: true test-frame-examples-compile-to-wasm: @@ -670,12 +678,14 @@ publish-draft-release: - ./.maintain/gitlab/publish_draft_release.sh allow_failure: true -publish-to-crates-io: +unleash-to-crates-io: stage: publish <<: *docker-env rules: - if: $CI_COMMIT_REF_NAME =~ /^ci-release-.*$/ - - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 + # FIXME: wait until https://github.com/paritytech/cargo-unleash/issues/50 is fixed, also + # remove allow_failure: true on the check job + # - if: $CI_COMMIT_REF_NAME =~ /^v[0-9]+\.[0-9]+.*$/ # i.e. v1.0, v2.1rc1 script: - cargo install cargo-unleash ${CARGO_UNLEASH_INSTALL_PARAMS} - cargo unleash em-dragons --no-check --owner github:paritytech:core-devs ${CARGO_UNLEASH_PKG_DEF}