diff --git a/Cargo.lock b/Cargo.lock index 2bec30fdd8eca..32159c6936666 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -937,7 +937,7 @@ dependencies = [ "clap", "criterion-plot", "csv", - "itertools", + "itertools 0.9.0", "lazy_static", "num-traits", "oorandom", @@ -959,7 +959,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e022feadec601fba1649cfa83586381a4ad31c6bf3a9ab7d408118b05dd9889d" dependencies = [ "cast", - "itertools", + "itertools 0.9.0", ] [[package]] @@ -2530,6 +2530,15 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" +[[package]] +name = "itertools" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f56a2d0bc861f9165be4eb3442afd3c236d8a98afd426f65d92324ae1091a484" +dependencies = [ + "either", +] + [[package]] name = "itertools" version = "0.9.0" @@ -2878,7 +2887,7 @@ dependencies = [ "parity-multiaddr", "parking_lot 0.11.1", "pin-project 1.0.2", - "prost", + "prost 0.7.0", "prost-build", "rand 0.7.3", "ring", @@ -2935,7 +2944,7 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log", - "prost", + "prost 0.7.0", "prost-build", "rand 0.7.3", "smallvec 1.5.0", @@ -2957,7 +2966,7 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log", - "prost", + "prost 0.7.0", "prost-build", "rand 0.7.3", "regex", @@ -2977,7 +2986,7 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log", - "prost", + "prost 0.7.0", "prost-build", "smallvec 1.5.0", "wasm-timer", @@ -2998,7 +3007,7 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log", - "prost", + "prost 0.7.0", "prost-build", "rand 0.7.3", "sha2 0.9.2", @@ -3060,7 +3069,7 @@ dependencies = [ "lazy_static", "libp2p-core", "log", - "prost", + "prost 0.7.0", "prost-build", "rand 0.7.3", "sha2 0.9.2", @@ -3096,7 +3105,7 @@ dependencies = [ "futures 0.3.9", "libp2p-core", "log", - "prost", + "prost 0.7.0", "prost-build", "unsigned-varint 0.6.0", "void", @@ -3773,6 +3782,7 @@ dependencies = [ "sc-consensus-epochs", "sc-consensus-slots", "sc-finality-grandpa", + "sc-finality-grandpa-warp-sync", "sc-keystore", "sc-network", "sc-offchain", @@ -5765,6 +5775,16 @@ dependencies = [ "thiserror", ] +[[package]] +name = "prost" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce49aefe0a6144a45de32927c77bd2859a5f7677b55f220ae5b744e87389c212" +dependencies = [ + "bytes 0.5.6", + "prost-derive 0.6.1", +] + [[package]] name = "prost" version = "0.7.0" @@ -5772,7 +5792,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9e6984d2f1a23009bd270b8bb56d0926810a3d483f59c987d77969e9d8e840b2" dependencies = [ "bytes 1.0.1", - "prost-derive", + "prost-derive 0.7.0", ] [[package]] @@ -5783,16 +5803,29 @@ checksum = "32d3ebd75ac2679c2af3a92246639f9fcc8a442ee420719cc4fe195b98dd5fa3" dependencies = [ "bytes 1.0.1", "heck", - "itertools", + "itertools 0.9.0", "log", "multimap", "petgraph", - "prost", + "prost 0.7.0", "prost-types", "tempfile", "which 4.0.2", ] +[[package]] +name = "prost-derive" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "537aa19b95acde10a12fec4301466386f757403de4cd4e5b4fa78fb5ecb18f72" +dependencies = [ + "anyhow", + "itertools 0.8.2", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "prost-derive" version = "0.7.0" @@ -5800,7 +5833,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "169a15f3008ecb5160cba7d37bcd690a7601b6d30cfb87a117d45e59d52af5d4" dependencies = [ "anyhow", - "itertools", + "itertools 0.9.0", "proc-macro2", "quote", "syn", @@ -5813,7 +5846,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b518d7cdd93dab1d1122cf07fa9a60771836c668dde9d9e2a139f957f0d9f1bb" dependencies = [ "bytes 1.0.1", - "prost", + "prost 0.7.0", ] [[package]] @@ -6435,7 +6468,7 @@ dependencies = [ "libp2p", "log", "parity-scale-codec", - "prost", + "prost 0.7.0", "prost-build", "quickcheck", "rand 0.7.3", @@ -6977,6 +7010,7 @@ dependencies = [ "fork-tree", "futures 0.3.9", "futures-timer 3.0.2", + "linked-hash-map", "log", "parity-scale-codec", "parking_lot 0.11.1", @@ -7042,6 +7076,25 @@ dependencies = [ "substrate-test-runtime-client", ] +[[package]] +name = "sc-finality-grandpa-warp-sync" +version = "0.8.0" +dependencies = [ + "derive_more", + "futures 0.3.9", + "log", + "num-traits", + "parity-scale-codec", + "parking_lot 0.11.1", + "prost 0.6.1", + "sc-client-api", + "sc-finality-grandpa", + "sc-network", + "sc-service", + "sp-blockchain", + "sp-runtime", +] + [[package]] name = "sc-informant" version = "0.8.1" @@ -7126,7 +7179,7 @@ dependencies = [ "parity-scale-codec", "parking_lot 0.11.1", "pin-project 0.4.27", - "prost", + "prost 0.7.0", "prost-build", "quickcheck", "rand 0.7.3", @@ -10480,6 +10533,6 @@ checksum = "b89249644df056b522696b1bb9e7c18c87e8ffa3e2f0dc3b0155875d6498f01b" dependencies = [ "cc", "glob", - "itertools", + "itertools 0.9.0", "libc", ] diff --git a/Cargo.toml b/Cargo.toml index 2a0db9b385a6b..fc22f440ca7fd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -37,6 +37,7 @@ members = [ "client/executor/wasmi", "client/executor/wasmtime", "client/finality-grandpa", + "client/finality-grandpa-warp-sync", "client/informant", "client/keystore", "client/light", diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index aaee373119592..4c245dcf629fb 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -74,6 +74,7 @@ sc-service = { version = "0.8.0", default-features = false, path = "../../../cli sc-tracing = { version = "2.0.0", path = "../../../client/tracing" } sc-telemetry = { version = "2.0.0", path = "../../../client/telemetry" } sc-authority-discovery = { version = "0.8.0", path = "../../../client/authority-discovery" } +sc-finality-grandpa-warp-sync = { version = "0.8.0", path = "../../../client/finality-grandpa-warp-sync", optional = true } # frame dependencies pallet-indices = { version = "2.0.0", path = "../../../frame/indices" } @@ -151,6 +152,7 @@ cli = [ "frame-benchmarking-cli", "substrate-frame-cli", "sc-service/db", + "sc-finality-grandpa-warp-sync", "structopt", "substrate-build-script-utils", ] diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index c3061b88709ab..217914d2b3b09 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -194,6 +194,11 @@ pub fn new_full_base( config.network.extra_sets.push(grandpa::grandpa_peers_set_config()); + #[cfg(feature = "cli")] + config.network.request_response_protocols.push(sc_finality_grandpa_warp_sync::request_response_config_for_chain( + &config, task_manager.spawn_handle(), backend.clone(), + )); + let (network, network_status_sinks, system_rpc_tx, network_starter) = sc_service::build_network(sc_service::BuildNetworkParams { config: &config, diff --git a/client/finality-grandpa-warp-sync/Cargo.toml b/client/finality-grandpa-warp-sync/Cargo.toml new file mode 100644 index 0000000000000..4f7ee0301f417 --- /dev/null +++ b/client/finality-grandpa-warp-sync/Cargo.toml @@ -0,0 +1,28 @@ +[package] +description = "A request-response protocol for handling grandpa warp sync requests" +name = "sc-finality-grandpa-warp-sync" +version = "0.8.0" +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +authors = ["Parity Technologies "] +edition = "2018" +publish = false +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] + +[dependencies] +sc-network = { version = "0.8.0", path = "../network" } +sc-finality-grandpa = { version = "0.8.0", path = "../finality-grandpa" } +sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } +sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } +sc-client-api = { version = "2.0.0", path = "../api" } +sc-service = { version = "0.8.0", path = "../service" } +futures = "0.3.8" +log = "0.4.11" +derive_more = "0.99.11" +codec = { package = "parity-scale-codec", version = "1.3.5" } +prost = "0.6.1" +num-traits = "0.2.14" +parking_lot = "0.11.1" diff --git a/client/finality-grandpa-warp-sync/src/lib.rs b/client/finality-grandpa-warp-sync/src/lib.rs new file mode 100644 index 0000000000000..d22d74c2faeed --- /dev/null +++ b/client/finality-grandpa-warp-sync/src/lib.rs @@ -0,0 +1,161 @@ +// Copyright 2021 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Helper for handling (i.e. answering) grandpa warp sync requests from a remote peer via the +//! [`crate::request_responses::RequestResponsesBehaviour`]. + +use codec::Decode; +use sc_network::config::{ProtocolId, IncomingRequest, RequestResponseConfig}; +use sc_client_api::Backend; +use sp_runtime::traits::NumberFor; +use futures::channel::{mpsc, oneshot}; +use futures::stream::StreamExt; +use log::debug; +use sp_runtime::traits::Block as BlockT; +use std::time::Duration; +use std::sync::Arc; +use sc_service::{SpawnTaskHandle, config::{Configuration, Role}}; +use sc_finality_grandpa::WarpSyncFragmentCache; + +/// Generates the appropriate [`RequestResponseConfig`] for a given chain configuration. +pub fn request_response_config_for_chain + 'static>( + config: &Configuration, + spawn_handle: SpawnTaskHandle, + backend: Arc, +) -> RequestResponseConfig + where NumberFor: sc_finality_grandpa::BlockNumberOps, +{ + let protocol_id = config.protocol_id(); + + if matches!(config.role, Role::Light) { + // Allow outgoing requests but deny incoming requests. + generate_request_response_config(protocol_id.clone()) + } else { + // Allow both outgoing and incoming requests. + let (handler, request_response_config) = GrandpaWarpSyncRequestHandler::new( + protocol_id.clone(), + backend.clone(), + ); + spawn_handle.spawn("grandpa_warp_sync_request_handler", handler.run()); + request_response_config + } +} + +const LOG_TARGET: &str = "finality-grandpa-warp-sync-request-handler"; + +/// Generates a [`RequestResponseConfig`] for the grandpa warp sync request protocol, refusing incoming requests. +pub fn generate_request_response_config(protocol_id: ProtocolId) -> RequestResponseConfig { + RequestResponseConfig { + name: generate_protocol_name(protocol_id).into(), + max_request_size: 32, + max_response_size: 16 * 1024 * 1024, + request_timeout: Duration::from_secs(10), + inbound_queue: None, + } +} + +/// Generate the grandpa warp sync protocol name from chain specific protocol identifier. +fn generate_protocol_name(protocol_id: ProtocolId) -> String { + let mut s = String::new(); + s.push_str("/"); + s.push_str(protocol_id.as_ref()); + s.push_str("/sync/warp"); + s +} + +#[derive(codec::Decode)] +struct Request { + begin: B::Hash +} + +/// Setting a large fragment limit, allowing client +/// to define it is possible. +const WARP_SYNC_FRAGMENTS_LIMIT: usize = 100; + +/// Number of item with justification in warp sync cache. +/// This should be customizable, setting a low number +/// until then. +const WARP_SYNC_CACHE_SIZE: usize = 20; + +/// Handler for incoming grandpa warp sync requests from a remote peer. +pub struct GrandpaWarpSyncRequestHandler { + backend: Arc, + cache: Arc>>, + request_receiver: mpsc::Receiver, + _phantom: std::marker::PhantomData +} + +impl> GrandpaWarpSyncRequestHandler { + /// Create a new [`GrandpaWarpSyncRequestHandler`]. + pub fn new(protocol_id: ProtocolId, backend: Arc) -> (Self, RequestResponseConfig) { + let (tx, request_receiver) = mpsc::channel(20); + + let mut request_response_config = generate_request_response_config(protocol_id); + request_response_config.inbound_queue = Some(tx); + let cache = Arc::new(parking_lot::RwLock::new(WarpSyncFragmentCache::new(WARP_SYNC_CACHE_SIZE))); + + (Self { backend, request_receiver, cache, _phantom: std::marker::PhantomData }, request_response_config) + } + + fn handle_request( + &self, + payload: Vec, + pending_response: oneshot::Sender> + ) -> Result<(), HandleRequestError> + where NumberFor: sc_finality_grandpa::BlockNumberOps, + { + let request = Request::::decode(&mut &payload[..])?; + + let mut cache = self.cache.write(); + let response = sc_finality_grandpa::prove_warp_sync( + self.backend.blockchain(), request.begin, Some(WARP_SYNC_FRAGMENTS_LIMIT), Some(&mut cache) + )?; + + pending_response.send(response) + .map_err(|_| HandleRequestError::SendResponse) + } + + /// Run [`GrandpaWarpSyncRequestHandler`]. + pub async fn run(mut self) + where NumberFor: sc_finality_grandpa::BlockNumberOps, + { + while let Some(request) = self.request_receiver.next().await { + let IncomingRequest { peer, payload, pending_response } = request; + + match self.handle_request(payload, pending_response) { + Ok(()) => debug!(target: LOG_TARGET, "Handled grandpa warp sync request from {}.", peer), + Err(e) => debug!( + target: LOG_TARGET, + "Failed to handle grandpa warp sync request from {}: {}", + peer, e, + ), + } + } + } +} + +#[derive(derive_more::Display, derive_more::From)] +enum HandleRequestError { + #[display(fmt = "Failed to decode request: {}.", _0)] + DecodeProto(prost::DecodeError), + #[display(fmt = "Failed to encode response: {}.", _0)] + EncodeProto(prost::EncodeError), + #[display(fmt = "Failed to decode block hash: {}.", _0)] + DecodeScale(codec::Error), + Client(sp_blockchain::Error), + #[display(fmt = "Failed to send response.")] + SendResponse, +} diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index 1309cbb316b67..1b410b32013a3 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -45,6 +45,7 @@ prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../.. sc-block-builder = { version = "0.8.0", path = "../block-builder" } finality-grandpa = { version = "0.12.3", features = ["derive-codec"] } pin-project = "0.4.6" +linked-hash-map = "0.5.2" [dev-dependencies] assert_matches = "1.3.0" diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index bd29b18bae12a..a0fad2f92c881 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -52,7 +52,7 @@ use parity_scale_codec::{Encode, Decode}; use finality_grandpa::BlockNumberOps; use sp_runtime::{ Justification, generic::BlockId, - traits::{NumberFor, Block as BlockT, Header as HeaderT, One}, + traits::{NumberFor, Block as BlockT, Header as HeaderT, Zero, One}, }; use sp_core::storage::StorageKey; use sc_telemetry::{telemetry, CONSENSUS_INFO}; @@ -247,6 +247,23 @@ pub struct FinalityProofFragment { /// - all other fragments provide justifications for GRANDPA authorities set changes within requested range. type FinalityProof
= Vec>; +/// Single fragment of authority set proof. +/// +/// Finality for block B is proved by providing: +/// 1) headers of this block; +/// 2) the justification for the block containing a authority set change digest; +#[derive(Debug, PartialEq, Clone, Encode, Decode)] +pub(crate) struct AuthoritySetProofFragment { + /// The header of the given block. + pub header: Header, + /// Justification of the block F. + pub justification: Vec, +} + +/// Proof of authority set is the ordered set of authority set fragments, where: +/// - last fragment match target block. +type AuthoritySetProof
= Vec>; + /// Finality proof request data. #[derive(Debug, Encode, Decode)] enum FinalityProofRequest { @@ -425,6 +442,133 @@ pub(crate) fn prove_finality, J>( } } +/// Prepare authority proof for the best possible block starting at a given trusted block. +/// +/// Started block should be in range of bonding duration. +/// We only return proof for finalized blocks (with justification). +/// +/// It is assumed that the caller already have a proof-of-finality for the block 'begin'. +pub fn prove_warp_sync>( + blockchain: &B, + begin: Block::Hash, + max_fragment_limit: Option, + mut cache: Option<&mut WarpSyncFragmentCache>, +) -> ::sp_blockchain::Result> { + + let begin = BlockId::Hash(begin); + let begin_number = blockchain.block_number_from_id(&begin)? + .ok_or_else(|| ClientError::Backend("Missing start block".to_string()))?; + let end = BlockId::Hash(blockchain.last_finalized()?); + let end_number = blockchain.block_number_from_id(&end)? + // This error should not happen, we could also panic. + .ok_or_else(|| ClientError::Backend("Missing last finalized block".to_string()))?; + + if begin_number > end_number { + return Err(ClientError::Backend("Unfinalized start for authority proof".to_string())); + } + + let mut result = Vec::new(); + let mut last_apply = None; + + let header = blockchain.expect_header(begin)?; + let mut index = *header.number(); + + // Find previous change in case there is a delay. + // This operation is a costy and only for the delay corner case. + while index > Zero::zero() { + index = index - One::one(); + if let Some((fragement, apply_block)) = get_warp_sync_proof_fragment(blockchain, index, &mut cache)? { + if last_apply.map(|next| &next > header.number()).unwrap_or(false) { + result.push(fragement); + last_apply = Some(apply_block); + } else { + break; + } + } + } + + let mut index = *header.number(); + while index <= end_number { + if max_fragment_limit.map(|limit| result.len() <= limit).unwrap_or(false) { + break; + } + + if let Some((fragement, apply_block)) = get_warp_sync_proof_fragment(blockchain, index, &mut cache)? { + if last_apply.map(|next| apply_block < next).unwrap_or(false) { + // Previous delayed will not apply, do not include it. + result.pop(); + } + result.push(fragement); + last_apply = Some(apply_block); + } + + index = index + One::one(); + } + + if result.last().as_ref().map(|head| head.header.number()) != Some(&end_number) { + let header = blockchain.expect_header(end)?; + if let Some(justification) = blockchain.justification(BlockId::Number(end_number.clone()))? { + result.push(AuthoritySetProofFragment { + header: header.clone(), + justification, + }); + } else { + // no justification, don't include it. + } + } + + Ok(result.encode()) +} + +/// Try get a warp sync proof fragment a a given finalized block. +fn get_warp_sync_proof_fragment>( + blockchain: &B, + index: NumberFor, + cache: &mut Option<&mut WarpSyncFragmentCache>, +) -> sp_blockchain::Result, NumberFor)>> { + if let Some(cache) = cache.as_mut() { + if let Some(result) = cache.get_item(index) { + return Ok(result.clone()); + } + } + + let mut result = None; + let header = blockchain.expect_header(BlockId::number(index))?; + + if let Some((block_number, sp_finality_grandpa::ScheduledChange { + next_authorities: _, + delay, + })) = crate::import::find_forced_change::(&header) { + let dest = block_number + delay; + if let Some(justification) = blockchain.justification(BlockId::Number(index.clone()))? { + result = Some((AuthoritySetProofFragment { + header: header.clone(), + justification, + }, dest)); + } else { + return Err(ClientError::Backend("Unjustified block with authority set change".to_string())); + } + } + + if let Some(sp_finality_grandpa::ScheduledChange { + next_authorities: _, + delay, + }) = crate::import::find_scheduled_change::(&header) { + let dest = index + delay; + if let Some(justification) = blockchain.justification(BlockId::Number(index.clone()))? { + result = Some((AuthoritySetProofFragment { + header: header.clone(), + justification, + }, dest)); + } else { + return Err(ClientError::Backend("Unjustified block with authority set change".to_string())); + } + } + + cache.as_mut().map(|cache| cache.new_item(index, result.clone())); + Ok(result) +} + /// Check GRANDPA proof-of-finality for the given block. /// /// Returns the vector of headers that MUST be validated + imported @@ -483,6 +627,98 @@ pub(crate) fn check_finality_proof( Ok(effects) } +/// Check GRANDPA authority change sequence to assert finality of a target block. +/// +/// Returns the header of the target block. +pub(crate) fn check_warp_sync_proof( + current_set_id: u64, + current_authorities: AuthorityList, + remote_proof: Vec, +) -> ClientResult<(Block::Header, u64, AuthorityList)> +where + NumberFor: BlockNumberOps, + J: Decode + ProvableJustification + BlockJustification, +{ + // decode finality proof + let proof = AuthoritySetProof::::decode(&mut &remote_proof[..]) + .map_err(|_| ClientError::BadJustification("failed to decode authority proof".into()))?; + + let last = proof.len() - 1; + + let mut result = (current_set_id, current_authorities, NumberFor::::zero()); + + for (ix, fragment) in proof.into_iter().enumerate() { + let is_last = ix == last; + result = check_warp_sync_proof_fragment::( + result.0, + &result.1, + &result.2, + is_last, + &fragment, + )?; + + if is_last { + return Ok((fragment.header, result.0, result.1)) + } + } + + // empty proof can't prove anything + return Err(ClientError::BadJustification("empty proof of authority".into())); +} + +/// Check finality authority set sequence. +fn check_warp_sync_proof_fragment( + current_set_id: u64, + current_authorities: &AuthorityList, + previous_checked_block: &NumberFor, + is_last: bool, + authorities_proof: &AuthoritySetProofFragment, +) -> ClientResult<(u64, AuthorityList, NumberFor)> +where + NumberFor: BlockNumberOps, + J: Decode + ProvableJustification + BlockJustification, +{ + let justification: J = Decode::decode(&mut authorities_proof.justification.as_slice()) + .map_err(|_| ClientError::JustificationDecode)?; + justification.verify(current_set_id, ¤t_authorities)?; + + // assert justification is for this header + if &justification.number() != authorities_proof.header.number() + || justification.hash().as_ref() != authorities_proof.header.hash().as_ref() { + return Err(ClientError::Backend("Invalid authority warp proof, justification do not match header".to_string())); + } + + if authorities_proof.header.number() <= previous_checked_block { + return Err(ClientError::Backend("Invalid authority warp proof".to_string())); + } + let current_block = authorities_proof.header.number(); + let mut at_block = None; + if let Some(sp_finality_grandpa::ScheduledChange { + next_authorities, + delay, + }) = crate::import::find_scheduled_change::(&authorities_proof.header) { + let dest = *current_block + delay; + at_block = Some((dest, next_authorities)); + } + if let Some((block_number, sp_finality_grandpa::ScheduledChange { + next_authorities, + delay, + })) = crate::import::find_forced_change::(&authorities_proof.header) { + let dest = block_number + delay; + at_block = Some((dest, next_authorities)); + } + + // Fragment without change only allowed for proof last block. + if at_block.is_none() && !is_last { + return Err(ClientError::Backend("Invalid authority warp proof".to_string())); + } + if let Some((at_block, next_authorities)) = at_block { + Ok((current_set_id + 1, next_authorities, at_block)) + } else { + Ok((current_set_id, current_authorities.clone(), current_block.clone())) + } +} + /// Check finality proof for the single block. fn check_finality_proof_fragment( blockchain: &B, @@ -551,6 +787,15 @@ impl AuthoritiesOrEffects
{ } } +/// Block info extracted from the justification. +pub(crate) trait BlockJustification { + /// Block number justified. + fn number(&self) -> Header::Number; + + /// Block hash justified. + fn hash(&self) -> Header::Hash; +} + /// Justification used to prove block finality. pub(crate) trait ProvableJustification: Encode + Decode { /// Verify justification with respect to authorities set and authorities set id. @@ -582,6 +827,68 @@ impl ProvableJustification for GrandpaJustificatio } } +impl BlockJustification for GrandpaJustification { + fn number(&self) -> NumberFor { + self.commit.target_number.clone() + } + fn hash(&self) -> Block::Hash { + self.commit.target_hash.clone() + } +} + +/// Simple cache for warp sync queries. +pub struct WarpSyncFragmentCache { + cache: linked_hash_map::LinkedHashMap< + Header::Number, + Option<(AuthoritySetProofFragment
, Header::Number)>, + >, + headers_with_justification: usize, + limit: usize, +} + +impl WarpSyncFragmentCache
{ + /// Instantiate a new cache for the warp sync prover. + pub fn new(size: usize) -> Self { + WarpSyncFragmentCache { + cache: Default::default(), + headers_with_justification: 0, + limit: size, + } + } + + fn new_item( + &mut self, + at: Header::Number, + item: Option<(AuthoritySetProofFragment
, Header::Number)>, + ) { + if self.cache.len() == self.limit { + self.pop_one(); + } + if item.is_some() { + // we do not check previous value as cached value is always supposed to + // be queried before calling 'new_item'. + self.headers_with_justification += 1; + } + self.cache.insert(at, item); + } + + fn pop_one(&mut self) { + while let Some(v) = self.cache.pop_front() { + if v.1.is_some() { + self.headers_with_justification -= 1; + break; + } + } + } + + fn get_item( + &mut self, + block: Header::Number, + ) -> Option<&mut Option<(AuthoritySetProofFragment
, Header::Number)>> { + self.cache.get_refresh(&block) + } +} + #[cfg(test)] pub(crate) mod tests { use substrate_test_runtime_client::runtime::{Block, Header, H256}; @@ -635,6 +942,24 @@ pub(crate) mod tests { } } + #[derive(Debug, PartialEq, Encode, Decode)] + pub struct TestBlockJustification(TestJustification, u64, H256); + + impl BlockJustification
for TestBlockJustification { + fn number(&self) ->
::Number { + self.1 + } + fn hash(&self) ->
::Hash { + self.2.clone() + } + } + + impl ProvableJustification
for TestBlockJustification { + fn verify(&self, set_id: u64, authorities: &[(AuthorityId, u64)]) -> ClientResult<()> { + self.0.verify(set_id, authorities) + } + } + fn header(number: u64) -> Header { let parent_hash = match number { 0 => Default::default(), @@ -1027,4 +1352,161 @@ pub(crate) mod tests { ).unwrap(); assert!(proof_of_4.is_none()); } + + #[test] + fn warp_sync_proof_encoding_decoding() { + fn test_blockchain( + nb_blocks: u64, + mut set_change: &[(u64, Vec)], + mut justifications: &[(u64, Vec)], + ) -> (InMemoryBlockchain, Vec) { + let blockchain = InMemoryBlockchain::::new(); + let mut hashes = Vec::::new(); + let mut set_id = 0; + for i in 0..nb_blocks { + let mut set_id_next = set_id; + let mut header = header(i); + set_change.first() + .map(|j| if i == j.0 { + set_change = &set_change[1..]; + let next_authorities: Vec<_> = j.1.iter().map(|i| (AuthorityId::from_slice(&[*i; 32]), 1u64)).collect(); + set_id_next += 1; + header.digest_mut().logs.push( + sp_runtime::generic::DigestItem::Consensus( + sp_finality_grandpa::GRANDPA_ENGINE_ID, + sp_finality_grandpa::ConsensusLog::ScheduledChange( + sp_finality_grandpa::ScheduledChange { delay: 0u64, next_authorities } + ).encode(), + )); + }); + + if let Some(parent) = hashes.last() { + header.set_parent_hash(parent.clone()); + } + let header_hash = header.hash(); + + let justification = justifications.first() + .and_then(|j| if i == j.0 { + justifications = &justifications[1..]; + + let authority = j.1.iter().map(|j| + (AuthorityId::from_slice(&[*j; 32]), 1u64) + ).collect(); + let justification = TestBlockJustification( + TestJustification((set_id, authority), vec![i as u8]), + i, + header_hash, + ); + Some(justification.encode()) + } else { + None + }); + hashes.push(header_hash.clone()); + set_id = set_id_next; + + blockchain.insert(header_hash, header, justification, None, NewBlockState::Final) + .unwrap(); + } + (blockchain, hashes) + } + + let (blockchain, hashes) = test_blockchain( + 7, + vec![(3, vec![9])].as_slice(), + vec![ + (1, vec![1, 2, 3]), + (2, vec![1, 2, 3]), + (3, vec![1, 2, 3]), + (4, vec![9]), + (6, vec![9]), + ].as_slice(), + ); + + // proof after set change + let mut cache = WarpSyncFragmentCache::new(5); + let proof_no_cache = prove_warp_sync(&blockchain, hashes[6], None, Some(&mut cache)).unwrap(); + let proof = prove_warp_sync(&blockchain, hashes[6], None, Some(&mut cache)).unwrap(); + assert_eq!(proof_no_cache, proof); + + let initial_authorities: Vec<_> = [1u8, 2, 3].iter().map(|i| + (AuthorityId::from_slice(&[*i; 32]), 1u64) + ).collect(); + + let authorities_next: Vec<_> = [9u8].iter().map(|i| + (AuthorityId::from_slice(&[*i; 32]), 1u64) + ).collect(); + + assert!(check_warp_sync_proof::( + 0, + initial_authorities.clone(), + proof.clone(), + ).is_err()); + assert!(check_warp_sync_proof::( + 0, + authorities_next.clone(), + proof.clone(), + ).is_err()); + assert!(check_warp_sync_proof::( + 1, + initial_authorities.clone(), + proof.clone(), + ).is_err()); + let ( + _header, + current_set_id, + current_set, + ) = check_warp_sync_proof::( + 1, + authorities_next.clone(), + proof.clone(), + ).unwrap(); + + assert_eq!(current_set_id, 1); + assert_eq!(current_set, authorities_next); + + // proof before set change + let proof = prove_warp_sync(&blockchain, hashes[1], None, None).unwrap(); + let ( + _header, + current_set_id, + current_set, + ) = check_warp_sync_proof::( + 0, + initial_authorities.clone(), + proof.clone(), + ).unwrap(); + + assert_eq!(current_set_id, 1); + assert_eq!(current_set, authorities_next); + + // two changes + let (blockchain, hashes) = test_blockchain( + 13, + vec![(3, vec![7]), (8, vec![9])].as_slice(), + vec![ + (1, vec![1, 2, 3]), + (2, vec![1, 2, 3]), + (3, vec![1, 2, 3]), + (4, vec![7]), + (6, vec![7]), + (8, vec![7]), // warning, requires a justification on change set + (10, vec![9]), + ].as_slice(), + ); + + // proof before set change + let proof = prove_warp_sync(&blockchain, hashes[1], None, None).unwrap(); + let ( + _header, + current_set_id, + current_set, + ) = check_warp_sync_proof::( + 0, + initial_authorities.clone(), + proof.clone(), + ).unwrap(); + + assert_eq!(current_set_id, 2); + assert_eq!(current_set, authorities_next); + } } diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index d9630e272ef9c..2eef13d583600 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -182,7 +182,7 @@ impl<'a, Block: 'a + BlockT> Drop for PendingSetChanges<'a, Block> { } } -fn find_scheduled_change(header: &B::Header) +pub(crate) fn find_scheduled_change(header: &B::Header) -> Option>> { let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); @@ -197,7 +197,7 @@ fn find_scheduled_change(header: &B::Header) header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) } -fn find_forced_change(header: &B::Header) +pub(crate) fn find_forced_change(header: &B::Header) -> Option<(NumberFor, ScheduledChange>)> { let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index 040748448de6f..d556ae089b61a 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -130,6 +130,7 @@ pub use voting_rule::{ BeforeBestBlockBy, ThreeQuartersOfTheUnfinalizedChain, VotingRule, VotingRulesBuilder }; pub use finality_grandpa::voter::report; +pub use finality_proof::{prove_warp_sync, WarpSyncFragmentCache}; use aux_schema::PersistentData; use environment::{Environment, VoterSetState}; diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index a155899fbd99c..3dc716b4e1c9e 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -17,7 +17,7 @@ // along with this program. If not, see . use crate::{ - error::Error, DEFAULT_PROTOCOL_ID, MallocSizeOfWasm, RpcHandlers, NetworkStatusSinks, + error::Error, MallocSizeOfWasm, RpcHandlers, NetworkStatusSinks, start_rpc_servers, build_network_future, TransactionPoolAdapter, TaskManager, SpawnTaskHandle, metrics::MetricsService, client::{light, Client, ClientConfig}, @@ -877,18 +877,7 @@ pub fn build_network( client: client.clone(), }); - let protocol_id = { - let protocol_id_full = match config.chain_spec.protocol_id() { - Some(pid) => pid, - None => { - warn!("Using default protocol ID {:?} because none is configured in the \ - chain specs", DEFAULT_PROTOCOL_ID - ); - DEFAULT_PROTOCOL_ID - } - }; - sc_network::config::ProtocolId::from(protocol_id_full) - }; + let protocol_id = config.protocol_id(); let block_announce_validator = if let Some(f) = block_announce_validator_builder { f(client.clone()) diff --git a/client/service/src/config.rs b/client/service/src/config.rs index c3be40e08397c..74d15cb3fb922 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -219,6 +219,20 @@ impl Configuration { _ => None, } } + + /// Returns the network protocol id from the chain spec, or the default. + pub fn protocol_id(&self) -> sc_network::config::ProtocolId { + let protocol_id_full = match self.chain_spec.protocol_id() { + Some(pid) => pid, + None => { + log::warn!("Using default protocol ID {:?} because none is configured in the \ + chain specs", crate::DEFAULT_PROTOCOL_ID + ); + crate::DEFAULT_PROTOCOL_ID + } + }; + sc_network::config::ProtocolId::from(protocol_id_full) + } } /// Available RPC methods.