diff --git a/bin/node/cli/tests/check_block_works.rs b/bin/node/cli/tests/check_block_works.rs
index 0b340dad6446c..34078b08cf074 100644
--- a/bin/node/cli/tests/check_block_works.rs
+++ b/bin/node/cli/tests/check_block_works.rs
@@ -22,7 +22,7 @@ use assert_cmd::cargo::cargo_bin;
use std::process::Command;
use tempfile::tempdir;
-mod common;
+pub mod common;
#[test]
fn check_block_works() {
diff --git a/bin/node/cli/tests/common.rs b/bin/node/cli/tests/common.rs
index 51f88cd92bdef..61a07dd1ca877 100644
--- a/bin/node/cli/tests/common.rs
+++ b/bin/node/cli/tests/common.rs
@@ -17,7 +17,6 @@
// along with this program. If not, see .
#![cfg(unix)]
-#![allow(dead_code)]
use std::{process::{Child, ExitStatus}, thread, time::Duration, path::Path};
use assert_cmd::cargo::cargo_bin;
diff --git a/bin/node/cli/tests/export_import_flow.rs b/bin/node/cli/tests/export_import_flow.rs
new file mode 100644
index 0000000000000..85a49b005a95e
--- /dev/null
+++ b/bin/node/cli/tests/export_import_flow.rs
@@ -0,0 +1,212 @@
+// This file is part of Substrate.
+
+// Copyright (C) 2020 Parity Technologies (UK) Ltd.
+// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
+
+// This program is free software: you can redistribute it and/or modify
+// it under the terms of the GNU General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+
+// This program is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU General Public License for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with this program. If not, see .
+
+#![cfg(unix)]
+
+use assert_cmd::cargo::cargo_bin;
+use std::{process::Command, fs, path::PathBuf};
+use tempfile::{tempdir, TempDir};
+use regex::Regex;
+
+pub mod common;
+
+fn contains_error(logged_output: &str) -> bool {
+ logged_output.contains("Error")
+}
+
+/// Helper struct to execute the export/import/revert tests.
+/// The fields are paths to a temporary directory
+struct ExportImportRevertExecutor<'a> {
+ base_path: &'a TempDir,
+ exported_blocks_file: &'a PathBuf,
+ db_path: &'a PathBuf,
+ num_exported_blocks: Option,
+}
+
+/// Format options for export / import commands.
+enum FormatOpt {
+ Json,
+ Binary,
+}
+
+/// Command corresponding to the different commands we would like to run.
+enum SubCommand {
+ ExportBlocks,
+ ImportBlocks,
+}
+
+impl ToString for SubCommand {
+ fn to_string(&self) -> String {
+ match self {
+ SubCommand::ExportBlocks => String::from("export-blocks"),
+ SubCommand::ImportBlocks => String::from("import-blocks"),
+ }
+ }
+}
+
+impl<'a> ExportImportRevertExecutor<'a> {
+ fn new(
+ base_path: &'a TempDir,
+ exported_blocks_file: &'a PathBuf,
+ db_path: &'a PathBuf
+ ) -> Self {
+ Self {
+ base_path,
+ exported_blocks_file,
+ db_path,
+ num_exported_blocks: None,
+ }
+ }
+
+ /// Helper method to run a command. Returns a string corresponding to what has been logged.
+ fn run_block_command(&self,
+ sub_command: SubCommand,
+ format_opt: FormatOpt,
+ expected_to_fail: bool
+ ) -> String {
+ let sub_command_str = sub_command.to_string();
+ // Adding "--binary" if need be.
+ let arguments: Vec<&str> = match format_opt {
+ FormatOpt::Binary => vec![&sub_command_str, "--dev", "--pruning", "archive", "--binary", "-d"],
+ FormatOpt::Json => vec![&sub_command_str, "--dev", "--pruning", "archive", "-d"],
+ };
+
+ let tmp: TempDir;
+ // Setting base_path to be a temporary folder if we are importing blocks.
+ // This allows us to make sure we are importing from scratch.
+ let base_path = match sub_command {
+ SubCommand::ExportBlocks => &self.base_path.path(),
+ SubCommand::ImportBlocks => {
+ tmp = tempdir().unwrap();
+ tmp.path()
+ }
+ };
+
+ // Running the command and capturing the output.
+ let output = Command::new(cargo_bin("substrate"))
+ .args(&arguments)
+ .arg(&base_path)
+ .arg(&self.exported_blocks_file)
+ .output()
+ .unwrap();
+
+ let logged_output = String::from_utf8_lossy(&output.stderr).to_string();
+
+ if expected_to_fail {
+ // Checking that we did indeed find an error.
+ assert!(contains_error(&logged_output), "expected to error but did not error!");
+ assert!(!output.status.success());
+ } else {
+ // Making sure no error were logged.
+ assert!(!contains_error(&logged_output), "expected not to error but error'd!");
+ assert!(output.status.success());
+ }
+
+ logged_output
+ }
+
+ /// Runs the `export-blocks` command.
+ fn run_export(&mut self, fmt_opt: FormatOpt) {
+ let log = self.run_block_command(SubCommand::ExportBlocks, fmt_opt, false);
+
+ // Using regex to find out how many block we exported.
+ let re = Regex::new(r"Exporting blocks from #\d* to #(?P\d*)").unwrap();
+ let caps = re.captures(&log).unwrap();
+ // Saving the number of blocks we've exported for further use.
+ self.num_exported_blocks = Some(caps["exported_blocks"].parse::().unwrap());
+
+ let metadata = fs::metadata(&self.exported_blocks_file).unwrap();
+ assert!(metadata.len() > 0, "file exported_blocks should not be empty");
+
+ let _ = fs::remove_dir_all(&self.db_path);
+ }
+
+ /// Runs the `import-blocks` command, asserting that an error was found or
+ /// not depending on `expected_to_fail`.
+ fn run_import(&mut self, fmt_opt: FormatOpt, expected_to_fail: bool) {
+ let log = self.run_block_command(SubCommand::ImportBlocks, fmt_opt, expected_to_fail);
+
+ if !expected_to_fail {
+ // Using regex to find out how much block we imported,
+ // and what's the best current block.
+ let re = Regex::new(r"Imported (?P\d*) blocks. Best: #(?P\d*)").unwrap();
+ let caps = re.captures(&log).expect("capture should have succeeded");
+ let imported = caps["imported"].parse::().unwrap();
+ let best = caps["best"].parse::().unwrap();
+
+ assert_eq!(
+ imported,
+ best,
+ "numbers of blocks imported and best number differs"
+ );
+ assert_eq!(
+ best,
+ self.num_exported_blocks.expect("number of exported blocks cannot be None; qed"),
+ "best block number and number of expected blocks should not differ"
+ );
+ }
+ self.num_exported_blocks = None;
+ }
+
+ /// Runs the `revert` command.
+ fn run_revert(&self) {
+ let output = Command::new(cargo_bin("substrate"))
+ .args(&["revert", "--dev", "--pruning", "archive", "-d"])
+ .arg(&self.base_path.path())
+ .output()
+ .unwrap();
+
+ let logged_output = String::from_utf8_lossy(&output.stderr).to_string();
+
+ // Reverting should not log any error.
+ assert!(!contains_error(&logged_output));
+ // Command should never fail.
+ assert!(output.status.success());
+ }
+
+ /// Helper function that runs the whole export / import / revert flow and checks for errors.
+ fn run(&mut self, export_fmt: FormatOpt, import_fmt: FormatOpt, expected_to_fail: bool) {
+ self.run_export(export_fmt);
+ self.run_import(import_fmt, expected_to_fail);
+ self.run_revert();
+ }
+}
+
+#[test]
+fn export_import_revert() {
+ let base_path = tempdir().expect("could not create a temp dir");
+ let exported_blocks_file = base_path.path().join("exported_blocks");
+ let db_path = base_path.path().join("db");
+
+ common::run_dev_node_for_a_while(base_path.path());
+
+ let mut executor = ExportImportRevertExecutor::new(
+ &base_path,
+ &exported_blocks_file,
+ &db_path,
+ );
+
+ // Binary and binary should work.
+ executor.run(FormatOpt::Binary, FormatOpt::Binary, false);
+ // Binary and JSON should fail.
+ executor.run(FormatOpt::Binary, FormatOpt::Json, true);
+ // JSON and JSON should work.
+ executor.run(FormatOpt::Json, FormatOpt::Json, false);
+ // JSON and binary should fail.
+ executor.run(FormatOpt::Json, FormatOpt::Binary, true);
+}
diff --git a/bin/node/cli/tests/import_export_and_revert_work.rs b/bin/node/cli/tests/import_export_and_revert_work.rs
deleted file mode 100644
index 91c8b024e1b49..0000000000000
--- a/bin/node/cli/tests/import_export_and_revert_work.rs
+++ /dev/null
@@ -1,61 +0,0 @@
-// This file is part of Substrate.
-
-// Copyright (C) 2020 Parity Technologies (UK) Ltd.
-// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0
-
-// This program is free software: you can redistribute it and/or modify
-// it under the terms of the GNU General Public License as published by
-// the Free Software Foundation, either version 3 of the License, or
-// (at your option) any later version.
-
-// This program is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU General Public License for more details.
-
-// You should have received a copy of the GNU General Public License
-// along with this program. If not, see .
-
-#![cfg(unix)]
-
-use assert_cmd::cargo::cargo_bin;
-use std::{process::Command, fs};
-use tempfile::tempdir;
-
-mod common;
-
-#[test]
-fn import_export_and_revert_work() {
- let base_path = tempdir().expect("could not create a temp dir");
- let exported_blocks = base_path.path().join("exported_blocks");
-
- common::run_dev_node_for_a_while(base_path.path());
-
- let status = Command::new(cargo_bin("substrate"))
- .args(&["export-blocks", "--dev", "--pruning", "archive", "-d"])
- .arg(base_path.path())
- .arg(&exported_blocks)
- .status()
- .unwrap();
- assert!(status.success());
-
- let metadata = fs::metadata(&exported_blocks).unwrap();
- assert!(metadata.len() > 0, "file exported_blocks should not be empty");
-
- let _ = fs::remove_dir_all(base_path.path().join("db"));
-
- let status = Command::new(cargo_bin("substrate"))
- .args(&["import-blocks", "--dev", "--pruning", "archive", "-d"])
- .arg(base_path.path())
- .arg(&exported_blocks)
- .status()
- .unwrap();
- assert!(status.success());
-
- let status = Command::new(cargo_bin("substrate"))
- .args(&["revert", "--dev", "--pruning", "archive", "-d"])
- .arg(base_path.path())
- .status()
- .unwrap();
- assert!(status.success());
-}
diff --git a/bin/node/cli/tests/inspect_works.rs b/bin/node/cli/tests/inspect_works.rs
index 59bdaf7de3153..aa9653acadba5 100644
--- a/bin/node/cli/tests/inspect_works.rs
+++ b/bin/node/cli/tests/inspect_works.rs
@@ -22,7 +22,7 @@ use assert_cmd::cargo::cargo_bin;
use std::process::Command;
use tempfile::tempdir;
-mod common;
+pub mod common;
#[test]
fn inspect_works() {
diff --git a/bin/node/cli/tests/purge_chain_works.rs b/bin/node/cli/tests/purge_chain_works.rs
index 8d637be3e8e15..001bed8b136f5 100644
--- a/bin/node/cli/tests/purge_chain_works.rs
+++ b/bin/node/cli/tests/purge_chain_works.rs
@@ -20,7 +20,7 @@ use assert_cmd::cargo::cargo_bin;
use std::process::Command;
use tempfile::tempdir;
-mod common;
+pub mod common;
#[test]
#[cfg(unix)]
diff --git a/bin/node/cli/tests/running_the_node_and_interrupt.rs b/bin/node/cli/tests/running_the_node_and_interrupt.rs
index a8c4be469544e..bd79dcd77a49a 100644
--- a/bin/node/cli/tests/running_the_node_and_interrupt.rs
+++ b/bin/node/cli/tests/running_the_node_and_interrupt.rs
@@ -20,7 +20,7 @@ use assert_cmd::cargo::cargo_bin;
use std::{convert::TryInto, process::Command, thread, time::Duration};
use tempfile::tempdir;
-mod common;
+pub mod common;
#[test]
#[cfg(unix)]
diff --git a/client/cli/src/commands/import_blocks_cmd.rs b/client/cli/src/commands/import_blocks_cmd.rs
index e138850c8bfd3..a74f4d524c95b 100644
--- a/client/cli/src/commands/import_blocks_cmd.rs
+++ b/client/cli/src/commands/import_blocks_cmd.rs
@@ -41,6 +41,10 @@ pub struct ImportBlocksCmd {
#[structopt(long = "default-heap-pages", value_name = "COUNT")]
pub default_heap_pages: Option,
+ /// Try importing blocks from binary format rather than JSON.
+ #[structopt(long)]
+ pub binary: bool,
+
#[allow(missing_docs)]
#[structopt(flatten)]
pub shared_params: SharedParams,
@@ -79,7 +83,7 @@ impl ImportBlocksCmd {
};
builder(config)?
- .import_blocks(file, false)
+ .import_blocks(file, false, self.binary)
.await
.map_err(Into::into)
}
diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs
index 16d78c49e1f45..d921606ea6b16 100644
--- a/client/service/src/builder.rs
+++ b/client/service/src/builder.rs
@@ -831,6 +831,7 @@ pub trait ServiceBuilderCommand {
self,
input: impl Read + Seek + Send + 'static,
force: bool,
+ binary: bool,
) -> Pin> + Send>>;
/// Performs the blocks export.
diff --git a/client/service/src/chain_ops.rs b/client/service/src/chain_ops.rs
index 5c7dca0da73dc..0297ad5c9053e 100644
--- a/client/service/src/chain_ops.rs
+++ b/client/service/src/chain_ops.rs
@@ -25,10 +25,10 @@ use sc_chain_spec::ChainSpec;
use log::{warn, info};
use futures::{future, prelude::*};
use sp_runtime::traits::{
- Block as BlockT, NumberFor, One, Zero, Header, SaturatedConversion
+ Block as BlockT, NumberFor, One, Zero, Header, SaturatedConversion, MaybeSerializeDeserialize,
};
use sp_runtime::generic::{BlockId, SignedBlock};
-use codec::{Decode, Encode, IoReader};
+use codec::{Decode, Encode, IoReader as CodecIoReader};
use crate::client::{Client, LocalCallExecutor};
use sp_consensus::{
BlockOrigin,
@@ -39,12 +39,250 @@ use sp_core::storage::{StorageKey, well_known_keys, ChildInfo, Storage, StorageC
use sc_client_api::{StorageProvider, BlockBackend, UsageProvider};
use std::{io::{Read, Write, Seek}, pin::Pin, collections::HashMap};
+use std::{thread, time::{Duration, Instant}};
+use serde_json::{de::IoRead as JsonIoRead, Deserializer, StreamDeserializer};
+use std::convert::{TryFrom, TryInto};
+use sp_runtime::traits::{CheckedDiv, Saturating};
+
+/// Number of blocks we will add to the queue before waiting for the queue to catch up.
+const MAX_PENDING_BLOCKS: u64 = 1_024;
+
+/// Number of milliseconds to wait until next poll.
+const DELAY_TIME: u64 = 2_000;
+
+/// Number of milliseconds that must have passed between two updates.
+const TIME_BETWEEN_UPDATES: u64 = 3_000;
/// Build a chain spec json
pub fn build_spec(spec: &dyn ChainSpec, raw: bool) -> error::Result {
spec.as_json(raw).map_err(Into::into)
}
+
+/// Helper enum that wraps either a binary decoder (from parity-scale-codec), or a JSON decoder (from serde_json).
+/// Implements the Iterator Trait, calling `next()` will decode the next SignedBlock and return it.
+enum BlockIter where
+ R: std::io::Read + std::io::Seek,
+{
+ Binary {
+ // Total number of blocks we are expecting to decode.
+ num_expected_blocks: u64,
+ // Number of blocks we have decoded thus far.
+ read_block_count: u64,
+ // Reader to the data, used for decoding new blocks.
+ reader: CodecIoReader,
+ },
+ Json {
+ // Nubmer of blocks we have decoded thus far.
+ read_block_count: u64,
+ // Stream to the data, used for decoding new blocks.
+ reader: StreamDeserializer<'static, JsonIoRead, SignedBlock>,
+ },
+}
+
+impl BlockIter where
+ R: Read + Seek + 'static,
+ B: BlockT + MaybeSerializeDeserialize,
+{
+ fn new(input: R, binary: bool) -> Result {
+ if binary {
+ let mut reader = CodecIoReader(input);
+ // If the file is encoded in binary format, it is expected to first specify the number
+ // of blocks that are going to be decoded. We read it and add it to our enum struct.
+ let num_expected_blocks: u64 = Decode::decode(&mut reader)
+ .map_err(|e| format!("Failed to decode the number of blocks: {:?}", e))?;
+ Ok(BlockIter::Binary {
+ num_expected_blocks,
+ read_block_count: 0,
+ reader,
+ })
+ } else {
+ let stream_deser = Deserializer::from_reader(input)
+ .into_iter::>();
+ Ok(BlockIter::Json {
+ reader: stream_deser,
+ read_block_count: 0,
+ })
+ }
+ }
+
+ /// Returns the number of blocks read thus far.
+ fn read_block_count(&self) -> u64 {
+ match self {
+ BlockIter::Binary { read_block_count, .. }
+ | BlockIter::Json { read_block_count, .. }
+ => *read_block_count,
+ }
+ }
+
+ /// Returns the total number of blocks to be imported, if possible.
+ fn num_expected_blocks(&self) -> Option {
+ match self {
+ BlockIter::Binary { num_expected_blocks, ..} => Some(*num_expected_blocks),
+ BlockIter::Json {..} => None
+ }
+ }
+}
+
+impl Iterator for BlockIter where
+ R: Read + Seek + 'static,
+ B: BlockT + MaybeSerializeDeserialize,
+{
+ type Item = Result, String>;
+
+ fn next(&mut self) -> Option {
+ match self {
+ BlockIter::Binary { num_expected_blocks, read_block_count, reader } => {
+ if read_block_count < num_expected_blocks {
+ let block_result: Result, _> = SignedBlock::::decode(reader)
+ .map_err(|e| e.to_string());
+ *read_block_count += 1;
+ Some(block_result)
+ } else {
+ // `read_block_count` == `num_expected_blocks` so we've read enough blocks.
+ None
+ }
+ }
+ BlockIter::Json { reader, read_block_count } => {
+ let res = Some(reader.next()?.map_err(|e| e.to_string()));
+ *read_block_count += 1;
+ res
+ }
+ }
+ }
+}
+
+/// Imports the SignedBlock to the queue.
+fn import_block_to_queue(
+ signed_block: SignedBlock,
+ queue: &mut TImpQu,
+ force: bool
+) where
+ TBl: BlockT + MaybeSerializeDeserialize,
+ TImpQu: 'static + ImportQueue,
+{
+ let (header, extrinsics) = signed_block.block.deconstruct();
+ let hash = header.hash();
+ // import queue handles verification and importing it into the client.
+ queue.import_blocks(BlockOrigin::File, vec![
+ IncomingBlock:: {
+ hash,
+ header: Some(header),
+ body: Some(extrinsics),
+ justification: signed_block.justification,
+ origin: None,
+ allow_missing_state: false,
+ import_existing: force,
+ }
+ ]);
+}
+
+/// Returns true if we have imported every block we were supposed to import, else returns false.
+fn importing_is_done(
+ num_expected_blocks: Option,
+ read_block_count: u64,
+ imported_blocks: u64
+) -> bool {
+ if let Some(num_expected_blocks) = num_expected_blocks {
+ imported_blocks >= num_expected_blocks
+ } else {
+ imported_blocks >= read_block_count
+ }
+}
+
+/// Structure used to log the block importing speed.
+struct Speedometer {
+ best_number: NumberFor,
+ last_number: Option>,
+ last_update: Instant,
+}
+
+impl Speedometer {
+ /// Creates a fresh Speedometer.
+ fn new() -> Self {
+ Self {
+ best_number: NumberFor::::from(0),
+ last_number: None,
+ last_update: Instant::now(),
+ }
+ }
+
+ /// Calculates `(best_number - last_number) / (now - last_update)` and
+ /// logs the speed of import.
+ fn display_speed(&self) {
+ // Number of milliseconds elapsed since last time.
+ let elapsed_ms = {
+ let elapsed = self.last_update.elapsed();
+ let since_last_millis = elapsed.as_secs() * 1000;
+ let since_last_subsec_millis = elapsed.subsec_millis() as u64;
+ since_last_millis + since_last_subsec_millis
+ };
+
+ // Number of blocks that have been imported since last time.
+ let diff = match self.last_number {
+ None => return,
+ Some(n) => self.best_number.saturating_sub(n)
+ };
+
+ if let Ok(diff) = TryInto::::try_into(diff) {
+ // If the number of blocks can be converted to a regular integer, then it's easy: just
+ // do the math and turn it into a `f64`.
+ let speed = diff.saturating_mul(10_000).checked_div(u128::from(elapsed_ms))
+ .map_or(0.0, |s| s as f64) / 10.0;
+ info!("📦 Current best block: {} ({:4.1} bps)", self.best_number, speed);
+ } else {
+ // If the number of blocks can't be converted to a regular integer, then we need a more
+ // algebraic approach and we stay within the realm of integers.
+ let one_thousand = NumberFor::::from(1_000);
+ let elapsed = NumberFor::::from(
+ >::try_from(elapsed_ms).unwrap_or(u32::max_value())
+ );
+
+ let speed = diff.saturating_mul(one_thousand).checked_div(&elapsed)
+ .unwrap_or_else(Zero::zero);
+ info!("📦 Current best block: {} ({} bps)", self.best_number, speed)
+ }
+ }
+
+ /// Updates the Speedometer.
+ fn update(&mut self, best_number: NumberFor) {
+ self.last_number = Some(self.best_number);
+ self.best_number = best_number;
+ self.last_update = Instant::now();
+ }
+
+ // If more than TIME_BETWEEN_UPDATES has elapsed since last update,
+ // then print and update the speedometer.
+ fn notify_user(&mut self, best_number: NumberFor) {
+ let delta = Duration::from_millis(TIME_BETWEEN_UPDATES);
+ if Instant::now().duration_since(self.last_update) >= delta {
+ self.display_speed();
+ self.update(best_number);
+ }
+ }
+}
+
+/// Different State that the `import_blocks` future could be in.
+enum ImportState where
+ R: Read + Seek + 'static,
+ B: BlockT + MaybeSerializeDeserialize,
+{
+ /// We are reading from the BlockIter structure, adding those blocks to the queue if possible.
+ Reading{block_iter: BlockIter},
+ /// The queue is full (contains at least MAX_PENDING_BLOCKS blocks) and we are waiting for it to catch up.
+ WaitingForImportQueueToCatchUp{
+ block_iter: BlockIter,
+ delay: Duration,
+ block: SignedBlock
+ },
+ // We have added all the blocks to the queue but they are still being processed.
+ WaitingForImportQueueToFinish{
+ num_expected_blocks: Option,
+ read_block_count: u64,
+ delay: Duration,
+ },
+}
+
impl<
TBl, TRtApi, TBackend,
TExecDisp, TFchr, TSc, TImpQu, TFprb, TFpp,
@@ -54,7 +292,7 @@ impl<
Client>, TBl, TRtApi>,
TFchr, TSc, TImpQu, TFprb, TFpp, TExPool, TRpc, Backend
> where
- TBl: BlockT,
+ TBl: BlockT + MaybeSerializeDeserialize,
TBackend: 'static + sc_client_api::backend::Backend + Send,
TExecDisp: 'static + NativeExecutionDispatch,
TImpQu: 'static + ImportQueue,
@@ -68,6 +306,7 @@ impl<
mut self,
input: impl Read + Seek + Send + 'static,
force: bool,
+ binary: bool,
) -> Pin> + Send>> {
struct WaitLink {
imported_blocks: u64,
@@ -87,7 +326,7 @@ impl<
fn blocks_processed(
&mut self,
imported: usize,
- _count: usize,
+ _num_expected_blocks: usize,
results: Vec<(Result>, BlockImportError>, B::Hash)>
) {
self.imported_blocks += imported as u64;
@@ -102,10 +341,20 @@ impl<
}
}
- let mut io_reader_input = IoReader(input);
- let mut count = None::;
- let mut read_block_count = 0;
let mut link = WaitLink::new();
+ let block_iter_res: Result, String> = BlockIter::new(input, binary);
+
+ let block_iter = match block_iter_res {
+ Ok(block_iter) => block_iter,
+ Err(e) => {
+ // We've encountered an error while creating the block iterator
+ // so we can just return a future that returns an error.
+ return future::ready(Err(Error::Other(e))).boxed()
+ }
+ };
+
+ let mut state = Some(ImportState::Reading{block_iter});
+ let mut speedometer = Speedometer::::new();
// Importing blocks is implemented as a future, because we want the operation to be
// interruptible.
@@ -117,85 +366,85 @@ impl<
let import = future::poll_fn(move |cx| {
let client = &self.client;
let queue = &mut self.import_queue;
-
- // Start by reading the number of blocks if not done so already.
- let count = match count {
- Some(c) => c,
- None => {
- let c: u64 = match Decode::decode(&mut io_reader_input) {
- Ok(c) => c,
- Err(err) => {
- let err = format!("Error reading file: {}", err);
- return std::task::Poll::Ready(Err(From::from(err)));
+ match state.take().expect("state should never be None; qed") {
+ ImportState::Reading{mut block_iter} => {
+ match block_iter.next() {
+ None => {
+ // The iterator is over: we now need to wait for the import queue to finish.
+ let num_expected_blocks = block_iter.num_expected_blocks();
+ let read_block_count = block_iter.read_block_count();
+ let delay = Duration::from_millis(DELAY_TIME);
+ state = Some(ImportState::WaitingForImportQueueToFinish{num_expected_blocks, read_block_count, delay});
},
- };
- info!("📦 Importing {} blocks", c);
- count = Some(c);
- c
- }
- };
-
- // Read blocks from the input.
- if read_block_count < count {
- match SignedBlock::::decode(&mut io_reader_input) {
- Ok(signed) => {
- let (header, extrinsics) = signed.block.deconstruct();
- let hash = header.hash();
- // import queue handles verification and importing it into the client
- queue.import_blocks(BlockOrigin::File, vec![
- IncomingBlock:: {
- hash,
- header: Some(header),
- body: Some(extrinsics),
- justification: signed.justification,
- origin: None,
- allow_missing_state: false,
- import_existing: force,
+ Some(block_result) => {
+ let read_block_count = block_iter.read_block_count();
+ match block_result {
+ Ok(block) => {
+ if read_block_count - link.imported_blocks >= MAX_PENDING_BLOCKS {
+ // The queue is full, so do not add this block and simply wait until
+ // the queue has made some progress.
+ let delay = Duration::from_millis(DELAY_TIME);
+ state = Some(ImportState::WaitingForImportQueueToCatchUp{block_iter, delay, block});
+ } else {
+ // Queue is not full, we can keep on adding blocks to the queue.
+ import_block_to_queue(block, queue, force);
+ state = Some(ImportState::Reading{block_iter});
+ }
+ }
+ Err(e) => {
+ return std::task::Poll::Ready(
+ Err(Error::Other(format!("Error reading block #{}: {}", read_block_count, e))))
+ }
}
- ]);
+ }
+ }
+ },
+ ImportState::WaitingForImportQueueToCatchUp{block_iter, delay, block} => {
+ let read_block_count = block_iter.read_block_count();
+ if read_block_count - link.imported_blocks >= MAX_PENDING_BLOCKS {
+ thread::sleep(delay);
+ // Queue is still full, so wait until there is room to insert our block.
+ state = Some(ImportState::WaitingForImportQueueToCatchUp{block_iter, delay, block});
+ } else {
+ // Queue is no longer full, so we can add our block to the queue.
+ import_block_to_queue(block, queue, force);
+ // Switch back to Reading state.
+ state = Some(ImportState::Reading{block_iter});
}
- Err(e) => {
- warn!("Error reading block data at {}: {}", read_block_count, e);
- return std::task::Poll::Ready(Ok(()));
+ },
+ ImportState::WaitingForImportQueueToFinish{num_expected_blocks, read_block_count, delay} => {
+ // All the blocks have been added to the queue, which doesn't mean they
+ // have all been properly imported.
+ if importing_is_done(num_expected_blocks, read_block_count, link.imported_blocks) {
+ // Importing is done, we can log the result and return.
+ info!(
+ "🎉 Imported {} blocks. Best: #{}",
+ read_block_count, client.chain_info().best_number
+ );
+ return std::task::Poll::Ready(Ok(()))
+ } else {
+ thread::sleep(delay);
+ // Importing is not done, we still have to wait for the queue to finish.
+ state = Some(ImportState::WaitingForImportQueueToFinish{num_expected_blocks, read_block_count, delay});
}
}
-
- read_block_count += 1;
- if read_block_count % 1000 == 0 {
- info!("#{} blocks were added to the queue", read_block_count);
- }
-
- cx.waker().wake_by_ref();
- return std::task::Poll::Pending;
}
- let blocks_before = link.imported_blocks;
queue.poll_actions(cx, &mut link);
- if link.has_error {
- info!(
- "Stopping after #{} blocks because of an error",
- link.imported_blocks,
- );
- return std::task::Poll::Ready(Ok(()));
- }
+ let best_number = client.chain_info().best_number;
+ speedometer.notify_user(best_number);
- if link.imported_blocks / 1000 != blocks_before / 1000 {
- info!(
- "#{} blocks were imported (#{} left)",
- link.imported_blocks,
- count - link.imported_blocks
- );
+ if link.has_error {
+ return std::task::Poll::Ready(Err(
+ Error::Other(
+ format!("Stopping after #{} blocks because of an error", link.imported_blocks)
+ )
+ ))
}
- if link.imported_blocks >= count {
- info!("🎉 Imported {} blocks. Best: #{}", read_block_count, client.chain_info().best_number);
- return std::task::Poll::Ready(Ok(()));
-
- } else {
- // Polling the import queue will re-schedule the task when ready.
- return std::task::Poll::Pending;
- }
+ cx.waker().wake_by_ref();
+ std::task::Poll::Pending
});
Box::pin(import)
}
@@ -295,7 +544,7 @@ impl<
1u64.encode_to(&mut buf);
block.encode_to(&mut buf);
let reader = std::io::Cursor::new(buf);
- self.import_blocks(reader, true)
+ self.import_blocks(reader, true, true)
}
Ok(None) => Box::pin(future::err("Unknown block".into())),
Err(e) => Box::pin(future::err(format!("Error reading block: {:?}", e).into())),