diff --git a/core/client/db/src/cache/list_cache.rs b/core/client/db/src/cache/list_cache.rs index fcce71a538f4a..4d7b4e2e51f8c 100644 --- a/core/client/db/src/cache/list_cache.rs +++ b/core/client/db/src/cache/list_cache.rs @@ -46,7 +46,7 @@ use log::warn; use client::error::{Error as ClientError, Result as ClientResult}; use runtime_primitives::traits::{Block as BlockT, NumberFor, As, Zero}; -use crate::cache::{CacheItemT, ComplexBlockId}; +use crate::cache::{CacheItemT, ComplexBlockId, EntryType}; use crate::cache::list_entry::{Entry, StorageEntry}; use crate::cache::list_storage::{Storage, StorageTransaction, Metadata}; @@ -174,10 +174,10 @@ impl> ListCache parent: ComplexBlockId, block: ComplexBlockId, value: Option, - is_final: bool, + entry_type: EntryType, ) -> ClientResult>> { // this guarantee is currently provided by LightStorage && we're relying on it here - debug_assert!(!is_final || self.best_finalized_block.hash == parent.hash); + debug_assert!(entry_type != EntryType::Final || self.best_finalized_block.hash == parent.hash); // we do not store any values behind finalized if block.number != Zero::zero() && self.best_finalized_block.number >= block.number { @@ -185,6 +185,7 @@ impl> ListCache } // if the block is not final, it is possibly appended to/forking from existing unfinalized fork + let is_final = entry_type == EntryType::Final || entry_type == EntryType::Genesis; if !is_final { let mut fork_and_action = None; @@ -831,12 +832,27 @@ pub mod tests { #[test] fn list_on_block_insert_works() { + let nfin = EntryType::NonFinal; + let fin = EntryType::Final; + // when trying to insert block < finalized number assert!(ListCache::new(DummyStorage::new(), 1024, test_id(100)) - .on_block_insert(&mut DummyTransaction::new(), test_id(49), test_id(50), Some(50), false).unwrap().is_none()); + .on_block_insert( + &mut DummyTransaction::new(), + test_id(49), + test_id(50), + Some(50), + nfin, + ).unwrap().is_none()); // when trying to insert block @ finalized number assert!(ListCache::new(DummyStorage::new(), 1024, test_id(100)) - .on_block_insert(&mut DummyTransaction::new(), test_id(99), test_id(100), Some(100), false).unwrap().is_none()); + .on_block_insert( + &mut DummyTransaction::new(), + test_id(99), + test_id(100), + Some(100), + nfin, + ).unwrap().is_none()); // when trying to insert non-final block AND it appends to the best block of unfinalized fork // AND new value is the same as in the fork' best block @@ -848,7 +864,7 @@ pub mod tests { ); cache.unfinalized[0].best_block = Some(test_id(4)); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, test_id(4), test_id(5), Some(4), false).unwrap(), + assert_eq!(cache.on_block_insert(&mut tx, test_id(4), test_id(5), Some(4), nfin).unwrap(), Some(CommitOperation::AppendNewBlock(0, test_id(5)))); assert!(tx.inserted_entries().is_empty()); assert!(tx.removed_entries().is_empty()); @@ -856,7 +872,7 @@ pub mod tests { // when trying to insert non-final block AND it appends to the best block of unfinalized fork // AND new value is the same as in the fork' best block let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, test_id(4), test_id(5), Some(5), false).unwrap(), + assert_eq!(cache.on_block_insert(&mut tx, test_id(4), test_id(5), Some(5), nfin).unwrap(), Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: test_id(5), value: Some(5) }))); assert_eq!(*tx.inserted_entries(), vec![test_id(5).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); @@ -872,7 +888,7 @@ pub mod tests { 1024, test_id(2) ); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(4), false).unwrap(), + assert_eq!(cache.on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(4), nfin).unwrap(), Some(CommitOperation::AppendNewBlock(0, correct_id(5)))); assert!(tx.inserted_entries().is_empty()); assert!(tx.removed_entries().is_empty()); @@ -880,7 +896,7 @@ pub mod tests { // when trying to insert non-final block AND it is the first block that appends to the best block of unfinalized fork // AND new value is the same as in the fork' best block let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(5), false).unwrap(), + assert_eq!(cache.on_block_insert(&mut tx, correct_id(4), correct_id(5), Some(5), nfin).unwrap(), Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(5), value: Some(5) }))); assert_eq!(*tx.inserted_entries(), vec![correct_id(5).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); @@ -898,7 +914,7 @@ pub mod tests { 1024, correct_id(2) ); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(3), fork_id(0, 3, 4), Some(14), false).unwrap(), + assert_eq!(cache.on_block_insert(&mut tx, correct_id(3), fork_id(0, 3, 4), Some(14), nfin).unwrap(), Some(CommitOperation::AddNewFork(Entry { valid_from: fork_id(0, 3, 4), value: Some(14) }))); assert_eq!(*tx.inserted_entries(), vec![fork_id(0, 3, 4).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); @@ -913,7 +929,7 @@ pub mod tests { 1024, correct_id(2) ); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), false).unwrap(), None); + assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), nfin).unwrap(), None); assert!(tx.inserted_entries().is_empty()); assert!(tx.removed_entries().is_empty()); assert!(tx.updated_meta().is_none()); @@ -926,7 +942,7 @@ pub mod tests { 1024, correct_id(2) ); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), false).unwrap(), + assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), nfin).unwrap(), Some(CommitOperation::AddNewFork(Entry { valid_from: correct_id(3), value: Some(3) }))); assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); @@ -935,7 +951,7 @@ pub mod tests { // when inserting finalized entry AND there are no previous finalized entries let cache = ListCache::new(DummyStorage::new(), 1024, correct_id(2)); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), true).unwrap(), + assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin).unwrap(), Some(CommitOperation::BlockFinalized(correct_id(3), Some(Entry { valid_from: correct_id(3), value: Some(3) }), Default::default()))); assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); @@ -948,14 +964,14 @@ pub mod tests { 1024, correct_id(2) ); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), true).unwrap(), + assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), fin).unwrap(), Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default()))); assert!(tx.inserted_entries().is_empty()); assert!(tx.removed_entries().is_empty()); assert!(tx.updated_meta().is_none()); // when inserting finalized entry AND value differs from previous finalized let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), true).unwrap(), + assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin).unwrap(), Some(CommitOperation::BlockFinalized(correct_id(3), Some(Entry { valid_from: correct_id(3), value: Some(3) }), Default::default()))); assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect()); assert!(tx.removed_entries().is_empty()); @@ -970,7 +986,7 @@ pub mod tests { 1024, correct_id(2) ); let mut tx = DummyTransaction::new(); - assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), true).unwrap(), + assert_eq!(cache.on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), fin).unwrap(), Some(CommitOperation::BlockFinalized(correct_id(3), None, vec![0].into_iter().collect()))); } diff --git a/core/client/db/src/cache/mod.rs b/core/client/db/src/cache/mod.rs index b5dd45f11dd52..e4e23a5ca1d3b 100644 --- a/core/client/db/src/cache/mod.rs +++ b/core/client/db/src/cache/mod.rs @@ -25,9 +25,9 @@ use client::blockchain::Cache as BlockchainCache; use client::error::Result as ClientResult; use parity_codec::{Encode, Decode}; use runtime_primitives::generic::BlockId; -use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor, As}; +use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, NumberFor, As, Zero}; use consensus_common::well_known_cache_keys::Id as CacheKeyId; -use crate::utils::{self, COLUMN_META}; +use crate::utils::{self, COLUMN_META, db_err}; use self::list_cache::ListCache; @@ -38,6 +38,17 @@ mod list_storage; /// Minimal post-finalization age age of finalized blocks before they'll pruned. const PRUNE_DEPTH: u64 = 1024; +/// The type of entry that is inserted to the cache. +#[derive(Clone, Copy, Debug, PartialEq)] +pub enum EntryType { + /// Non-final entry. + NonFinal, + /// Final entry. + Final, + /// Genesis entry (inserted during cache initialization). + Genesis, +} + /// Block identifier that holds both hash and number. #[derive(Clone, Debug, Encode, Decode, PartialEq)] pub struct ComplexBlockId { @@ -70,6 +81,7 @@ pub struct DbCache { key_lookup_column: Option, header_column: Option, authorities_column: Option, + genesis_hash: Block::Hash, best_finalized_block: ComplexBlockId, } @@ -80,6 +92,7 @@ impl DbCache { key_lookup_column: Option, header_column: Option, authorities_column: Option, + genesis_hash: Block::Hash, best_finalized_block: ComplexBlockId, ) -> Self { Self { @@ -88,10 +101,16 @@ impl DbCache { key_lookup_column, header_column, authorities_column, + genesis_hash, best_finalized_block, } } + /// Set genesis block hash. + pub fn set_genesis_hash(&mut self, genesis_hash: Block::Hash) { + self.genesis_hash = genesis_hash; + } + /// Begin cache transaction. pub fn transaction<'a>(&'a mut self, tx: &'a mut DBTransaction) -> DbCacheTransaction<'a, Block> { DbCacheTransaction { @@ -182,7 +201,7 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { parent: ComplexBlockId, block: ComplexBlockId, data_at: HashMap>, - is_final: bool, + entry_type: EntryType, ) -> ClientResult { assert!(self.cache_at_op.is_empty()); @@ -203,7 +222,7 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { parent.clone(), block.clone(), value.or(cache.value_at_block(&parent)?), - is_final, + entry_type, )?; if let Some(op) = op { self.cache_at_op.insert(name, op); @@ -214,8 +233,10 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { data_at.into_iter().try_for_each(|(name, data)| insert_op(name, Some(data)))?; missed_caches.into_iter().try_for_each(|name| insert_op(name, None))?; - if is_final { - self.best_finalized_block = Some(block); + match entry_type { + EntryType::Final | EntryType::Genesis => + self.best_finalized_block = Some(block), + EntryType::NonFinal => (), } Ok(self) @@ -254,6 +275,25 @@ impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { pub struct DbCacheSync(pub RwLock>); impl BlockchainCache for DbCacheSync { + fn initialize(&self, key: &CacheKeyId, data: Vec) -> ClientResult<()> { + let mut cache = self.0.write(); + let genesis_hash = cache.genesis_hash; + let cache_contents = vec![(*key, data)].into_iter().collect(); + let db = cache.db.clone(); + let mut dbtx = DBTransaction::new(); + let tx = cache.transaction(&mut dbtx); + let tx = tx.on_block_insert( + ComplexBlockId::new(Default::default(), Zero::zero()), + ComplexBlockId::new(genesis_hash, Zero::zero()), + cache_contents, + EntryType::Genesis, + )?; + let tx_ops = tx.into_ops(); + db.write(dbtx).map_err(db_err)?; + cache.commit(tx_ops); + Ok(()) + } + fn get_at(&self, key: &CacheKeyId, at: &BlockId) -> Option> { let cache = self.0.read(); let storage = cache.cache_at.get(key)?.storage(); diff --git a/core/client/db/src/light.rs b/core/client/db/src/light.rs index a571ffe142920..39817140c6613 100644 --- a/core/client/db/src/light.rs +++ b/core/client/db/src/light.rs @@ -34,7 +34,7 @@ use runtime_primitives::generic::BlockId; use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, Zero, One, As, NumberFor, Digest, DigestItem}; use consensus_common::well_known_cache_keys; -use crate::cache::{DbCacheSync, DbCache, ComplexBlockId}; +use crate::cache::{DbCacheSync, DbCache, ComplexBlockId, EntryType as CacheEntryType}; use crate::utils::{self, meta_keys, Meta, db_err, open_database, read_db, block_id_to_lookup_key, read_meta}; use crate::DatabaseSettings; @@ -91,6 +91,7 @@ impl LightStorage columns::KEY_LOOKUP, columns::HEADER, columns::CACHE, + meta.genesis_hash, ComplexBlockId::new(meta.finalized_hash, meta.finalized_number), ); @@ -406,6 +407,7 @@ impl LightBlockchainStorage for LightStorage let is_genesis = number.is_zero(); if is_genesis { + self.cache.0.write().set_genesis_hash(hash); transaction.put(columns::META, meta_keys::GENESIS_HASH, hash.as_ref()); } @@ -434,7 +436,7 @@ impl LightBlockchainStorage for LightStorage ComplexBlockId::new(*header.parent_hash(), if number.is_zero() { Zero::zero() } else { number - One::one() }), ComplexBlockId::new(hash, number), cache_at, - finalized, + if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, )? .into_ops(); @@ -1040,4 +1042,24 @@ pub(crate) mod tests { // leaves at same height stay. Leaves at lower heights pruned. assert_eq!(db.leaves.read().hashes(), vec![block2_a, block2_b, block2_c]); } + + #[test] + fn cache_can_be_initialized_after_genesis_inserted() { + let db = LightStorage::::new_test(); + + // before cache is initialized => None + assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)), None); + + // insert genesis block (no value for cache is provided) + insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); + + // after genesis is inserted => None + assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)), None); + + // initialize cache + db.cache().initialize(b"test", vec![42]).unwrap(); + + // after genesis is inserted + cache is initialized => Some + assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)), Some(vec![42])); + } } diff --git a/core/client/src/blockchain.rs b/core/client/src/blockchain.rs index b0e7c2943ac86..d168ecda197c6 100644 --- a/core/client/src/blockchain.rs +++ b/core/client/src/blockchain.rs @@ -100,6 +100,11 @@ pub trait ProvideCache { /// Blockchain optional data cache. pub trait Cache: Send + Sync { + /// Initialize genesis value for the given cache. + /// + /// The operation should be performed once before anything else is inserted in the cache. + /// Otherwise cache may end up in inconsistent state. + fn initialize(&self, key: &well_known_cache_keys::Id, value_at_genesis: Vec) -> Result<()>; /// Returns cached value by the given key. fn get_at(&self, key: &well_known_cache_keys::Id, block: &BlockId) -> Option>; } diff --git a/core/client/src/error.rs b/core/client/src/error.rs index 050f867dfcb86..b201c092e0ed1 100644 --- a/core/client/src/error.rs +++ b/core/client/src/error.rs @@ -55,7 +55,10 @@ pub enum Error { /// Genesis config is invalid. #[display(fmt = "Genesis config provided is invalid")] GenesisInvalid, - /// Bad justification for header. + /// Error decoding header justification. + #[display(fmt = "error decoding justification for header")] + JustificationDecode, + /// Justification for header is correctly encoded, but invalid. #[display(fmt = "bad justification for header: {}", _0)] BadJustification(String), /// Not available on light client. diff --git a/core/client/src/lib.rs b/core/client/src/lib.rs index fe33c56262b3c..574ec95dc003f 100644 --- a/core/client/src/lib.rs +++ b/core/client/src/lib.rs @@ -64,7 +64,7 @@ pub use crate::client::{ #[cfg(feature = "std")] pub use crate::notifications::{StorageEventStream, StorageChangeSet}; #[cfg(feature = "std")] -pub use state_machine::ExecutionStrategy; +pub use state_machine::{ExecutionStrategy, NeverOffchainExt}; #[cfg(feature = "std")] pub use crate::leaves::LeafSet; diff --git a/core/consensus/aura/src/lib.rs b/core/consensus/aura/src/lib.rs index d5123feb4f17e..b5122372ba248 100644 --- a/core/consensus/aura/src/lib.rs +++ b/core/consensus/aura/src/lib.rs @@ -33,7 +33,10 @@ use consensus_common::{self, Authorities, BlockImport, Environment, Proposer, ForkChoiceStrategy, ImportBlock, BlockOrigin, Error as ConsensusError, SelectChain, well_known_cache_keys }; -use consensus_common::import_queue::{Verifier, BasicQueue, SharedBlockImport, SharedJustificationImport}; +use consensus_common::import_queue::{ + Verifier, BasicQueue, SharedBlockImport, SharedJustificationImport, SharedFinalityProofImport, + SharedFinalityProofRequestBuilder, +}; use client::{ block_builder::api::BlockBuilder as BlockBuilderApi, blockchain::ProvideCache, @@ -44,7 +47,7 @@ use client::{ use aura_primitives::AURA_ENGINE_ID; use runtime_primitives::{generic, generic::BlockId, Justification}; use runtime_primitives::traits::{ - Block, Header, Digest, DigestItemFor, DigestItem, ProvideRuntimeApi, AuthorityIdFor, + Block, Header, Digest, DigestItemFor, DigestItem, ProvideRuntimeApi, AuthorityIdFor, Zero, }; use primitives::Pair; use inherents::{InherentDataProviders, InherentData, RuntimeString}; @@ -653,6 +656,10 @@ impl Verifier for AuraVerifier where extra_verification.into_future().wait()?; + let new_authorities = pre_header.digest() + .log(DigestItem::as_authorities_change) + .map(|digest| digest.to_vec()); + let import_block = ImportBlock { origin, header: pre_header, @@ -664,8 +671,7 @@ impl Verifier for AuraVerifier where fork_choice: ForkChoiceStrategy::LongestChain, }; - // FIXME #1019 extract authorities - Ok((import_block, None)) + Ok((import_block, new_authorities)) } CheckedHeader::Deferred(a, b) => { debug!(target: "aura", "Checking {:?} failed; {:?}, {:?}.", hash, a, b); @@ -690,6 +696,38 @@ impl Authorities for AuraVerifier where } } +fn initialize_authorities_cache(client: &C) -> Result<(), ConsensusError> where + B: Block, + C: ProvideRuntimeApi + ProvideCache, + C::Api: AuthoritiesApi, +{ + // no cache => no initialization + let cache = match client.cache() { + Some(cache) => cache, + None => return Ok(()), + }; + + // check if we already have initialized the cache + let genesis_id = BlockId::Number(Zero::zero()); + let genesis_authorities: Option>> = cache + .get_at(&well_known_cache_keys::AUTHORITIES, &genesis_id) + .and_then(|v| Decode::decode(&mut &v[..])); + if genesis_authorities.is_some() { + return Ok(()); + } + + let map_err = |error| consensus_common::Error::from(consensus_common::ErrorKind::ClientImport( + format!( + "Error initializing authorities cache: {}", + error, + ))); + let genesis_authorities = authorities(client, &genesis_id)?; + cache.initialize(&well_known_cache_keys::AUTHORITIES, genesis_authorities.encode()) + .map_err(map_err)?; + + Ok(()) +} + #[allow(deprecated)] fn authorities(client: &C, at: &BlockId) -> Result>, ConsensusError> where B: Block, @@ -731,6 +769,8 @@ pub fn import_queue( slot_duration: SlotDuration, block_import: SharedBlockImport, justification_import: Option>, + finality_proof_import: Option>, + finality_proof_request_builder: Option>, client: Arc, extra: E, inherent_data_providers: InherentDataProviders, @@ -745,6 +785,7 @@ pub fn import_queue( P::Signature: Encode + Decode, { register_aura_inherent_data_provider(&inherent_data_providers, slot_duration.get())?; + initialize_authorities_cache(&*client)?; let verifier = Arc::new( AuraVerifier { @@ -755,7 +796,13 @@ pub fn import_queue( allow_old_seals: false, } ); - Ok(BasicQueue::new(verifier, block_import, justification_import)) + Ok(BasicQueue::new( + verifier, + block_import, + justification_import, + finality_proof_import, + finality_proof_request_builder, + )) } /// Start an import queue for the Aura consensus algorithm with backwards compatibility. @@ -767,6 +814,8 @@ pub fn import_queue_accept_old_seals( slot_duration: SlotDuration, block_import: SharedBlockImport, justification_import: Option>, + finality_proof_import: Option>, + finality_proof_request_builder: Option>, client: Arc, extra: E, inherent_data_providers: InherentDataProviders, @@ -781,6 +830,7 @@ pub fn import_queue_accept_old_seals( P::Signature: Encode + Decode, { register_aura_inherent_data_provider(&inherent_data_providers, slot_duration.get())?; + initialize_authorities_cache(&*client)?; let verifier = Arc::new( AuraVerifier { @@ -791,7 +841,13 @@ pub fn import_queue_accept_old_seals( allow_old_seals: true, } ); - Ok(BasicQueue::new(verifier, block_import, justification_import)) + Ok(BasicQueue::new( + verifier, + block_import, + justification_import, + finality_proof_import, + finality_proof_request_builder, + )) } #[cfg(test)] @@ -799,7 +855,7 @@ mod tests { use super::*; use consensus_common::NoNetwork as DummyOracle; use network::test::*; - use network::test::{Block as TestBlock, PeersClient}; + use network::test::{Block as TestBlock, PeersClient, PeersFullClient}; use runtime_primitives::traits::Block as BlockT; use network::config::ProtocolConfig; use parking_lot::Mutex; @@ -846,7 +902,7 @@ mod tests { impl TestNetFactory for AuraTestNet { type Specialization = DummySpecialization; - type Verifier = AuraVerifier; + type Verifier = AuraVerifier; type PeerData = (); /// Create new test network with peers and given config. @@ -857,25 +913,30 @@ mod tests { } } - fn make_verifier(&self, client: Arc, _cfg: &ProtocolConfig) + fn make_verifier(&self, client: PeersClient, _cfg: &ProtocolConfig) -> Arc { - let slot_duration = SlotDuration::get_or_compute(&*client) - .expect("slot duration available"); - let inherent_data_providers = InherentDataProviders::new(); - register_aura_inherent_data_provider( - &inherent_data_providers, - slot_duration.get() - ).expect("Registers aura inherent data provider"); - - assert_eq!(slot_duration.get(), SLOT_DURATION); - Arc::new(AuraVerifier { - client, - extra: NothingExtra, - inherent_data_providers, - phantom: Default::default(), - allow_old_seals: false, - }) + match client { + PeersClient::Full(client) => { + let slot_duration = SlotDuration::get_or_compute(&*client) + .expect("slot duration available"); + let inherent_data_providers = InherentDataProviders::new(); + register_aura_inherent_data_provider( + &inherent_data_providers, + slot_duration.get() + ).expect("Registers aura inherent data provider"); + + assert_eq!(slot_duration.get(), SLOT_DURATION); + Arc::new(AuraVerifier { + client, + extra: NothingExtra, + inherent_data_providers, + phantom: Default::default(), + allow_old_seals: false, + }) + }, + PeersClient::Light(_) => unreachable!("No (yet) tests for light client + Aura"), + } } fn peer(&self, i: usize) -> &Peer { @@ -917,7 +978,7 @@ mod tests { let mut runtime = current_thread::Runtime::new().unwrap(); for (peer_id, key) in peers { - let client = net.lock().peer(*peer_id).client().clone(); + let client = net.lock().peer(*peer_id).client().as_full().expect("full clients are created").clone(); let select_chain = LongestChain::new( client.backend().clone(), client.import_lock().clone(), diff --git a/core/consensus/babe/src/lib.rs b/core/consensus/babe/src/lib.rs index a016e835c5fd8..fa40fe64bad5c 100644 --- a/core/consensus/babe/src/lib.rs +++ b/core/consensus/babe/src/lib.rs @@ -914,7 +914,7 @@ mod tests { impl TestNetFactory for BabeTestNet { type Specialization = DummySpecialization; - type Verifier = BabeVerifier; + type Verifier = BabeVerifier; type PeerData = (); /// Create new test network with peers and given config. @@ -926,9 +926,10 @@ mod tests { } } - fn make_verifier(&self, client: Arc, _cfg: &ProtocolConfig) + fn make_verifier(&self, client: PeersClient, _cfg: &ProtocolConfig) -> Arc { + let client = client.as_full().expect("only full clients are used in test"); trace!(target: "babe", "Creating a verifier"); let config = Config::get_or_compute(&*client) .expect("slot duration available"); @@ -1001,7 +1002,7 @@ mod tests { debug!(target: "babe", "checkpoint 4"); let mut runtime = current_thread::Runtime::new().unwrap(); for (peer_id, key) in peers { - let client = net.lock().peer(*peer_id).client().clone(); + let client = net.lock().peer(*peer_id).client().as_full().unwrap(); let environ = Arc::new(DummyFactory(client.clone())); import_notifications.push( client.import_notification_stream() diff --git a/core/consensus/common/src/block_import.rs b/core/consensus/common/src/block_import.rs index 7debe1acfec7f..1a6a8d1f5078d 100644 --- a/core/consensus/common/src/block_import.rs +++ b/core/consensus/common/src/block_import.rs @@ -22,6 +22,8 @@ use std::borrow::Cow; use std::collections::HashMap; use crate::well_known_cache_keys; +use crate::import_queue::Verifier; + /// Block import result. #[derive(Debug, PartialEq, Eq)] pub enum ImportResult { @@ -44,6 +46,8 @@ pub struct ImportedAux { pub needs_justification: bool, /// Received a bad justification. pub bad_justification: bool, + /// Request a finality proof for the given block. + pub needs_finality_proof: bool, } impl Default for ImportedAux { @@ -52,6 +56,7 @@ impl Default for ImportedAux { clear_justification_requests: false, needs_justification: false, bad_justification: false, + needs_finality_proof: false, } } } @@ -202,3 +207,26 @@ pub trait JustificationImport { justification: Justification, ) -> Result<(), Self::Error>; } + +/// Finality proof import trait. +pub trait FinalityProofImport { + type Error: ::std::error::Error + Send + 'static; + + /// Called by the import queue when it is started. + fn on_start(&self, _link: &crate::import_queue::Link) { } + + /// Import a Block justification and finalize the given block. Returns finalized block or error. + fn import_finality_proof( + &self, + hash: B::Hash, + number: NumberFor, + finality_proof: Vec, + verifier: &Verifier, + ) -> Result<(B::Hash, NumberFor), Self::Error>; +} + +/// Finality proof request builder. +pub trait FinalityProofRequestBuilder: Send { + /// Build data blob, associated with the request. + fn build_request_data(&self, hash: &B::Hash) -> Vec; +} diff --git a/core/consensus/common/src/import_queue.rs b/core/consensus/common/src/import_queue.rs index a2da3ed2e55ad..4d64d799b18b7 100644 --- a/core/consensus/common/src/import_queue.rs +++ b/core/consensus/common/src/import_queue.rs @@ -27,6 +27,7 @@ use crate::block_import::{ BlockImport, BlockOrigin, ImportBlock, ImportedAux, ImportResult, JustificationImport, + FinalityProofImport, FinalityProofRequestBuilder, }; use crossbeam_channel::{self as channel, Receiver, Sender}; use parity_codec::Encode; @@ -57,6 +58,12 @@ pub type SharedBlockImport = Arc + /// Shared justification import struct used by the queue. pub type SharedJustificationImport = Arc + Send + Sync>; +/// Shared finality proof import struct used by the queue. +pub type SharedFinalityProofImport = Arc + Send + Sync>; + +/// Shared finality proof request builder struct used by the queue. +pub type SharedFinalityProofRequestBuilder = Arc + Send + Sync>; + /// Maps to the Origin used by the network. pub type Origin = libp2p::PeerId; @@ -76,7 +83,7 @@ pub struct IncomingBlock { } /// Verify a justification of a block -pub trait Verifier: Send + Sync + Sized { +pub trait Verifier: Send + Sync { /// Verify the given data and return the ImportBlock and an optional /// new set of validators to import. If not, err with an Error-Message /// presented to the User in the logs. @@ -104,6 +111,8 @@ pub trait ImportQueue: Send + Sync + ImportQueueClone { fn import_blocks(&self, origin: BlockOrigin, blocks: Vec>); /// Import a block justification. fn import_justification(&self, who: Origin, hash: B::Hash, number: NumberFor, justification: Justification); + /// Import block finality proof. + fn import_finality_proof(&self, who: Origin, hash: B::Hash, number: NumberFor, finality_proof: Vec); } pub trait ImportQueueClone { @@ -129,6 +138,7 @@ impl ImportQueueClone for BasicQueue { } } + /// "BasicQueue" is a wrapper around a channel sender to the "BlockImporter". /// "BasicQueue" itself does not keep any state or do any importing work, and /// can therefore be send to other threads. @@ -153,11 +163,25 @@ impl BasicQueue { pub fn new>( verifier: Arc, block_import: SharedBlockImport, - justification_import: Option> + justification_import: Option>, + finality_proof_import: Option>, + finality_proof_request_builder: Option>, ) -> Self { let (result_sender, result_port) = channel::unbounded(); - let worker_sender = BlockImportWorker::new(result_sender, verifier, block_import); - let importer_sender = BlockImporter::new(result_port, worker_sender, justification_import); + let worker_sender = BlockImportWorker::new( + result_sender, + verifier.clone(), + block_import, + finality_proof_import.clone(), + ); + let importer_sender = BlockImporter::new( + result_port, + worker_sender, + verifier, + justification_import, + finality_proof_import, + finality_proof_request_builder, + ); Self { sender: importer_sender, @@ -210,25 +234,36 @@ impl ImportQueue for BasicQueue { .send(BlockImportMsg::ImportJustification(who.clone(), hash, number, justification)) .expect("1. self is holding a sender to the Importer, 2. Importer should handle messages while there are senders around; qed"); } + + fn import_finality_proof(&self, who: Origin, hash: B::Hash, number: NumberFor, finality_proof: Vec) { + let _ = self + .sender + .send(BlockImportMsg::ImportFinalityProof(who, hash, number, finality_proof)) + .expect("1. self is holding a sender to the Importer, 2. Importer should handle messages while there are senders around; qed"); + } } pub enum BlockImportMsg { ImportBlocks(BlockOrigin, Vec>), ImportJustification(Origin, B::Hash, NumberFor, Justification), + ImportFinalityProof(Origin, B::Hash, NumberFor, Vec), Start(Box>, Sender>), Stop, #[cfg(any(test, feature = "test-helpers"))] Synchronize, } +#[cfg_attr(test, derive(Debug, PartialEq))] pub enum BlockImportWorkerMsg { ImportBlocks(BlockOrigin, Vec>), - Imported( + ImportedBlocks( Vec<( Result>, BlockImportError>, B::Hash, )>, ), + ImportFinalityProof(Origin, B::Hash, NumberFor, Vec), + ImportedFinalityProof(Origin, (B::Hash, NumberFor), Result<(B::Hash, NumberFor), ()>), #[cfg(any(test, feature = "test-helpers"))] Synchronize, } @@ -243,14 +278,20 @@ struct BlockImporter { result_port: Receiver>, worker_sender: Sender>, link: Option>>, + verifier: Arc>, justification_import: Option>, + finality_proof_import: Option>, + finality_proof_request_builder: Option>, } impl BlockImporter { fn new( result_port: Receiver>, worker_sender: Sender>, + verifier: Arc>, justification_import: Option>, + finality_proof_import: Option>, + finality_proof_request_builder: Option>, ) -> Sender> { trace!(target: "block_import", "Creating new Block Importer!"); let (sender, port) = channel::bounded(4); @@ -262,7 +303,10 @@ impl BlockImporter { result_port, worker_sender, link: None, + verifier, justification_import, + finality_proof_import, + finality_proof_request_builder, }; while importer.run() { // Importing until all senders have been dropped... @@ -303,10 +347,19 @@ impl BlockImporter { BlockImportMsg::ImportJustification(who, hash, number, justification) => { self.handle_import_justification(who, hash, number, justification) }, + BlockImportMsg::ImportFinalityProof(who, hash, number, finality_proof) => { + self.handle_import_finality_proof(who, hash, number, finality_proof) + }, BlockImportMsg::Start(link, sender) => { + if let Some(finality_proof_request_builder) = self.finality_proof_request_builder.take() { + link.set_finality_proof_request_builder(finality_proof_request_builder); + } if let Some(justification_import) = self.justification_import.as_ref() { justification_import.on_start(&*link); } + if let Some(finality_proof_import) = self.finality_proof_import.as_ref() { + finality_proof_import.on_start(&*link); + } self.link = Some(link); let _ = sender.send(Ok(())); }, @@ -332,14 +385,20 @@ impl BlockImporter { }; let results = match msg { - BlockImportWorkerMsg::Imported(results) => (results), + BlockImportWorkerMsg::ImportedBlocks(results) => (results), + BlockImportWorkerMsg::ImportedFinalityProof(who, request_block, finalization_result) => { + link.finality_proof_imported(who, request_block, finalization_result); + return true; + }, #[cfg(any(test, feature = "test-helpers"))] BlockImportWorkerMsg::Synchronize => { trace!(target: "sync", "Synchronizing link"); link.synchronized(); return true; }, - _ => unreachable!("Import Worker does not send ImportBlocks message; qed"), + BlockImportWorkerMsg::ImportBlocks(_, _) + | BlockImportWorkerMsg::ImportFinalityProof(_, _, _, _) + => unreachable!("Import Worker does not send Import* message; qed"), }; let mut has_error = false; let mut hashes = vec![]; @@ -375,6 +434,11 @@ impl BlockImporter { link.report_peer(peer, BAD_JUSTIFICATION_REPUTATION_CHANGE); } } + + if aux.needs_finality_proof { + trace!(target: "sync", "Block imported but requires finality proof {}: {:?}", number, hash); + link.request_finality_proof(&hash, number); + } }, Err(BlockImportError::IncompleteHeader(who)) => { if let Some(peer) = who { @@ -422,6 +486,13 @@ impl BlockImporter { } } + fn handle_import_finality_proof(&self, who: Origin, hash: B::Hash, number: NumberFor, finality_proof: Vec) { + trace!(target: "sync", "Scheduling finality proof of {}/{} for import", number, hash); + self.worker_sender + .send(BlockImportWorkerMsg::ImportFinalityProof(who, hash, number, finality_proof)) + .expect("1. This is holding a sender to the worker, 2. the worker should not quit while a sender is still held; qed"); + } + fn handle_import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>) { trace!(target: "sync", "Scheduling {} blocks for import", blocks.len()); self.worker_sender @@ -433,6 +504,7 @@ impl BlockImporter { struct BlockImportWorker> { result_sender: Sender>, block_import: SharedBlockImport, + finality_proof_import: Option>, verifier: Arc, } @@ -441,6 +513,7 @@ impl> BlockImportWorker { result_sender: Sender>, verifier: Arc, block_import: SharedBlockImport, + finality_proof_import: Option>, ) -> Sender> { let (sender, port) = channel::bounded(4); let _ = thread::Builder::new() @@ -450,6 +523,7 @@ impl> BlockImportWorker { result_sender, verifier, block_import, + finality_proof_import, }; for msg in port.iter() { // Working until all senders have been dropped... @@ -457,12 +531,17 @@ impl> BlockImportWorker { BlockImportWorkerMsg::ImportBlocks(origin, blocks) => { worker.import_a_batch_of_blocks(origin, blocks); }, + BlockImportWorkerMsg::ImportFinalityProof(who, hash, number, proof) => { + worker.import_finality_proof(who, hash, number, proof); + }, #[cfg(any(test, feature = "test-helpers"))] BlockImportWorkerMsg::Synchronize => { trace!(target: "sync", "Sending sync message"); let _ = worker.result_sender.send(BlockImportWorkerMsg::Synchronize); }, - _ => unreachable!("Import Worker does not receive the Imported message; qed"), + BlockImportWorkerMsg::ImportedBlocks(_) + | BlockImportWorkerMsg::ImportedFinalityProof(_, _, _) + => unreachable!("Import Worker does not receive the Imported* messages; qed"), } } }) @@ -512,10 +591,31 @@ impl> BlockImportWorker { let _ = self .result_sender - .send(BlockImportWorkerMsg::Imported(results)); + .send(BlockImportWorkerMsg::ImportedBlocks(results)); trace!(target: "sync", "Imported {} of {}", imported, count); } + + fn import_finality_proof(&self, who: Origin, hash: B::Hash, number: NumberFor, finality_proof: Vec) { + let result = self.finality_proof_import.as_ref().map(|finality_proof_import| { + finality_proof_import.import_finality_proof(hash, number, finality_proof, &*self.verifier) + .map_err(|e| { + debug!( + "Finality proof import failed with {:?} for hash: {:?} number: {:?} coming from node: {:?}", + e, + hash, + number, + who, + ); + }) + }).unwrap_or(Err(())); + + let _ = self + .result_sender + .send(BlockImportWorkerMsg::ImportedFinalityProof(who, (hash, number), result)); + + trace!(target: "sync", "Imported finality proof for {}/{}", number, hash); + } } /// Hooks that the verification queue can use to influence the synchronization @@ -531,6 +631,21 @@ pub trait Link: Send { fn clear_justification_requests(&self) {} /// Request a justification for the given block. fn request_justification(&self, _hash: &B::Hash, _number: NumberFor) {} + /// Finality proof import result. + /// + /// Even though we have asked for finality proof of block A, provider could return proof of + /// some earlier block B, if the proof for A was too large. The sync module should continue + /// asking for proof of A in this case. + fn finality_proof_imported( + &self, + _who: Origin, + _request_block: (B::Hash, NumberFor), + _finalization_result: Result<(B::Hash, NumberFor), ()>, + ) {} + /// Request a finality proof for the given block. + fn request_finality_proof(&self, _hash: &B::Hash, _number: NumberFor) {} + /// Remember finality proof request builder on start. + fn set_finality_proof_request_builder(&self, _request_builder: SharedFinalityProofRequestBuilder) {} /// Adjusts the reputation of the given peer. fn report_peer(&self, _who: Origin, _reputation_change: i32) {} /// Restart sync. @@ -637,12 +752,14 @@ pub fn import_single_block>( #[cfg(test)] mod tests { use super::*; + use crate::block_import::ForkChoiceStrategy; use libp2p::PeerId; use test_client::runtime::{Block, Hash}; #[derive(Debug, PartialEq)] enum LinkMsg { BlockImported, + FinalityProofImported, Disconnected, Restarted, } @@ -664,6 +781,14 @@ mod tests { fn block_imported(&self, _hash: &Hash, _number: NumberFor) { let _ = self.sender.send(LinkMsg::BlockImported); } + fn finality_proof_imported( + &self, + _: Origin, + _: (Hash, NumberFor), + _: Result<(Hash, NumberFor), ()>, + ) { + let _ = self.sender.send(LinkMsg::FinalityProofImported); + } fn report_peer(&self, _: Origin, _: i32) { let _ = self.sender.send(LinkMsg::Disconnected); } @@ -672,12 +797,33 @@ mod tests { } } + impl Verifier for () { + fn verify( + &self, + origin: BlockOrigin, + header: B::Header, + justification: Option, + body: Option>, + ) -> Result<(ImportBlock, Option>>), String> { + Ok((ImportBlock { + origin, + header, + body, + finalized: false, + justification, + post_digests: vec![], + auxiliary: Vec::new(), + fork_choice: ForkChoiceStrategy::LongestChain, + }, None)) + } + } + #[test] fn process_import_result_works() { let (result_sender, result_port) = channel::unbounded(); let (worker_sender, _) = channel::unbounded(); let (link_sender, link_port) = channel::unbounded(); - let importer_sender = BlockImporter::::new(result_port, worker_sender, None); + let importer_sender = BlockImporter::::new(result_port, worker_sender, Arc::new(()), None, None, None); let link = TestLink::new(link_sender); let (ack_sender, start_ack_port) = channel::bounded(4); let _ = importer_sender.send(BlockImportMsg::Start(Box::new(link.clone()), ack_sender)); @@ -687,52 +833,101 @@ mod tests { // Send a known let results = vec![(Ok(BlockImportResult::ImportedKnown(Default::default())), Default::default())]; - let _ = result_sender.send(BlockImportWorkerMsg::Imported(results)).ok().unwrap(); + let _ = result_sender.send(BlockImportWorkerMsg::ImportedBlocks(results)).ok().unwrap(); assert_eq!(link_port.recv(), Ok(LinkMsg::BlockImported)); // Send a second known let results = vec![(Ok(BlockImportResult::ImportedKnown(Default::default())), Default::default())]; - let _ = result_sender.send(BlockImportWorkerMsg::Imported(results)).ok().unwrap(); + let _ = result_sender.send(BlockImportWorkerMsg::ImportedBlocks(results)).ok().unwrap(); assert_eq!(link_port.recv(), Ok(LinkMsg::BlockImported)); // Send an unknown let results = vec![(Ok(BlockImportResult::ImportedUnknown(Default::default(), Default::default(), None)), Default::default())]; - let _ = result_sender.send(BlockImportWorkerMsg::Imported(results)).ok().unwrap(); + let _ = result_sender.send(BlockImportWorkerMsg::ImportedBlocks(results)).ok().unwrap(); assert_eq!(link_port.recv(), Ok(LinkMsg::BlockImported)); // Send an unknown with peer and bad justification let peer_id = PeerId::random(); let results = vec![(Ok(BlockImportResult::ImportedUnknown(Default::default(), - ImportedAux { needs_justification: true, clear_justification_requests: false, bad_justification: true }, + ImportedAux { + needs_justification: true, + clear_justification_requests: false, + bad_justification: true, + needs_finality_proof: false, + }, Some(peer_id.clone()))), Default::default())]; - let _ = result_sender.send(BlockImportWorkerMsg::Imported(results)).ok().unwrap(); + let _ = result_sender.send(BlockImportWorkerMsg::ImportedBlocks(results)).ok().unwrap(); assert_eq!(link_port.recv(), Ok(LinkMsg::BlockImported)); assert_eq!(link_port.recv(), Ok(LinkMsg::Disconnected)); // Send an incomplete header let results = vec![(Err(BlockImportError::IncompleteHeader(Some(peer_id.clone()))), Default::default())]; - let _ = result_sender.send(BlockImportWorkerMsg::Imported(results)).ok().unwrap(); + let _ = result_sender.send(BlockImportWorkerMsg::ImportedBlocks(results)).ok().unwrap(); assert_eq!(link_port.recv(), Ok(LinkMsg::Disconnected)); assert_eq!(link_port.recv(), Ok(LinkMsg::Restarted)); // Send an unknown parent let results = vec![(Err(BlockImportError::UnknownParent), Default::default())]; - let _ = result_sender.send(BlockImportWorkerMsg::Imported(results)).ok().unwrap(); + let _ = result_sender.send(BlockImportWorkerMsg::ImportedBlocks(results)).ok().unwrap(); assert_eq!(link_port.recv(), Ok(LinkMsg::Restarted)); // Send a verification failed let results = vec![(Err(BlockImportError::VerificationFailed(Some(peer_id.clone()), String::new())), Default::default())]; - let _ = result_sender.send(BlockImportWorkerMsg::Imported(results)).ok().unwrap(); + let _ = result_sender.send(BlockImportWorkerMsg::ImportedBlocks(results)).ok().unwrap(); assert_eq!(link_port.recv(), Ok(LinkMsg::Disconnected)); assert_eq!(link_port.recv(), Ok(LinkMsg::Restarted)); // Send an error let results = vec![(Err(BlockImportError::Error), Default::default())]; - let _ = result_sender.send(BlockImportWorkerMsg::Imported(results)).ok().unwrap(); + let _ = result_sender.send(BlockImportWorkerMsg::ImportedBlocks(results)).ok().unwrap(); assert_eq!(link_port.recv(), Ok(LinkMsg::Restarted)); // Drop the importer sender first, ensuring graceful shutdown. drop(importer_sender); } + + #[test] + fn process_finality_proof_import_result_works() { + let (result_sender, result_port) = channel::unbounded(); + let (worker_sender, worker_receiver) = channel::unbounded(); + let (link_sender, link_port) = channel::unbounded(); + let importer_sender = BlockImporter::::new(result_port, worker_sender, Arc::new(()), None, None, None); + let link = TestLink::new(link_sender); + let (ack_sender, start_ack_port) = channel::bounded(4); + let _ = importer_sender.send(BlockImportMsg::Start(Box::new(link.clone()), ack_sender)); + let who = Origin::random(); + + // Ensure the importer handles Start before any result messages. + start_ack_port.recv().unwrap().unwrap(); + + // Send finality proof import request to BlockImporter + importer_sender.send(BlockImportMsg::ImportFinalityProof( + who.clone(), + Default::default(), + 1, + vec![42], + )).unwrap(); + + // Wait until this request is redirected to the BlockImportWorker + assert_eq!(worker_receiver.recv(), Ok(BlockImportWorkerMsg::ImportFinalityProof( + who.clone(), + Default::default(), + 1, + vec![42], + ))); + + // Send ack of proof import from BlockImportWorker to BlockImporter + result_sender.send(BlockImportWorkerMsg::ImportedFinalityProof( + who.clone(), + (Default::default(), 0), + Ok((Default::default(), 0)), + )).unwrap(); + + // Wait for finality proof import result + assert_eq!(link_port.recv(), Ok(LinkMsg::FinalityProofImported)); + + // Drop the importer sender first, ensuring graceful shutdown. + drop(importer_sender); + } } diff --git a/core/consensus/common/src/lib.rs b/core/consensus/common/src/lib.rs index be7900d853a94..5e308ba8fe904 100644 --- a/core/consensus/common/src/lib.rs +++ b/core/consensus/common/src/lib.rs @@ -49,7 +49,8 @@ const MAX_BLOCK_SIZE: usize = 4 * 1024 * 1024 + 512; pub use self::error::{Error, ErrorKind}; pub use block_import::{ - BlockImport, BlockOrigin, ForkChoiceStrategy, ImportedAux, ImportBlock, ImportResult, JustificationImport, + BlockImport, BlockOrigin, ForkChoiceStrategy, ImportedAux, ImportBlock, ImportResult, + JustificationImport, FinalityProofImport, FinalityProofRequestBuilder, }; pub use select_chain::SelectChain; diff --git a/core/finality-grandpa/src/aux_schema.rs b/core/finality-grandpa/src/aux_schema.rs index 9e981cb903b14..4dd6e75dd946f 100644 --- a/core/finality-grandpa/src/aux_schema.rs +++ b/core/finality-grandpa/src/aux_schema.rs @@ -105,7 +105,7 @@ where H: Clone + Debug + PartialEq, } } -fn load_decode(backend: &B, key: &[u8]) -> ClientResult> { +pub(crate) fn load_decode(backend: &B, key: &[u8]) -> ClientResult> { match backend.get_aux(key)? { None => Ok(None), Some(t) => T::decode(&mut &t[..]) diff --git a/core/finality-grandpa/src/consensus_changes.rs b/core/finality-grandpa/src/consensus_changes.rs index cbd7b30f8e7a5..02ac95124151d 100644 --- a/core/finality-grandpa/src/consensus_changes.rs +++ b/core/finality-grandpa/src/consensus_changes.rs @@ -32,6 +32,11 @@ impl ConsensusChanges { impl ConsensusChanges { + /// Returns reference to all pending changes. + pub fn pending_changes(&self) -> &[(N, H)] { + &self.pending_changes + } + /// Note unfinalized change of consensus-related data. pub(crate) fn note_change(&mut self, at: (N, H)) { let idx = self.pending_changes diff --git a/core/finality-grandpa/src/finality_proof.rs b/core/finality-grandpa/src/finality_proof.rs index a3147ce338103..4cffe0dd981c1 100644 --- a/core/finality-grandpa/src/finality_proof.rs +++ b/core/finality-grandpa/src/finality_proof.rs @@ -17,254 +17,612 @@ //! GRANDPA block finality proof generation and check. //! //! Finality of block B is proved by providing: -//! 1) valid headers sub-chain from the block B to the block F; -//! 2) valid (with respect to proved authorities) GRANDPA justification of the block F; -//! 3) proof-of-execution of the `grandpa_authorities` call at the block F. +//! 1) the justification for the descendant block F; +//! 2) headers sub-chain (B; F] if B != F; +//! 3) proof of GRANDPA::authorities() if the set changes at block F. //! //! Since earliest possible justification is returned, the GRANDPA authorities set //! at the block F is guaranteed to be the same as in the block B (this is because block //! that enacts new GRANDPA authorities set always comes with justification). It also //! means that the `set_id` is the same at blocks B and F. //! -//! The caller should track the `set_id`. The most straightforward way is to fetch finality -//! proofs ONLY for blocks on the tip of the chain and track the latest known `set_id`. +//! Let U be the last finalized block known to caller. If authorities set has changed several +//! times in the (U; F] interval, multiple finality proof fragments are returned (one for each +//! authority set change) and they must be verified in-order. +//! +//! Finality proof provider can choose how to provide finality proof on its own. The incomplete +//! finality proof (that finalizes some block C that is ancestor of the B and descendant +//! of the U) could be returned. -use grandpa::voter_set::VoterSet; +use std::sync::Arc; +use log::{trace, warn}; use client::{ - blockchain::Backend as BlockchainBackend, + backend::Backend, blockchain::Backend as BlockchainBackend, CallExecutor, Client, error::{Error as ClientError, Result as ClientResult}, - light::fetcher::RemoteCallRequest, + light::fetcher::{FetchChecker, RemoteCallRequest}, + ExecutionStrategy, NeverOffchainExt, }; use parity_codec::{Encode, Decode}; use grandpa::BlockNumberOps; -use runtime_primitives::generic::BlockId; +use runtime_primitives::{Justification, generic::BlockId}; use runtime_primitives::traits::{ NumberFor, Block as BlockT, Header as HeaderT, One, }; -use substrate_primitives::{ed25519, H256}; +use substrate_primitives::{ed25519, H256, Blake2Hasher}; use ed25519::Public as AuthorityId; use substrate_telemetry::{telemetry, CONSENSUS_INFO}; use crate::justification::GrandpaJustification; -/// Prepare proof-of-finality for the given block. +/// Maximum number of fragments that we want to return in a single prove_finality call. +const MAX_FRAGMENTS_IN_PROOF: usize = 8; + +/// GRANDPA authority set related methods for the finality proof provider. +pub trait AuthoritySetForFinalityProver: Send + Sync { + /// Call GrandpaApi::grandpa_authorities at given block. + fn authorities(&self, block: &BlockId) -> ClientResult>; + /// Prove call of GrandpaApi::grandpa_authorities at given block. + fn prove_authorities(&self, block: &BlockId) -> ClientResult>>; +} + +/// Client-based implementation of AuthoritySetForFinalityProver. +impl, RA> AuthoritySetForFinalityProver for Client + where + B: Backend + Send + Sync + 'static, + E: CallExecutor + 'static + Clone + Send + Sync, + RA: Send + Sync, +{ + fn authorities(&self, block: &BlockId) -> ClientResult> { + self.executor().call( + block, + "GrandpaApi_grandpa_authorities", + &[], + ExecutionStrategy::NativeElseWasm, + NeverOffchainExt::new(), + ).and_then(|call_result| Decode::decode(&mut &call_result[..]) + .ok_or_else(|| ClientError::CallResultDecode( + "failed to decode GRANDPA authorities set proof".into(), + ))) + } + + fn prove_authorities(&self, block: &BlockId) -> ClientResult>> { + self.execution_proof(block, "GrandpaApi_grandpa_authorities",&[]).map(|(_, proof)| proof) + } +} + +/// GRANDPA authority set related methods for the finality proof checker. +pub trait AuthoritySetForFinalityChecker: Send + Sync { + /// Check execution proof of Grandpa::grandpa_authorities at given block. + fn check_authorities_proof( + &self, + hash: Block::Hash, + header: Block::Header, + proof: Vec>, + ) -> ClientResult>; +} + +/// FetchChecker-based implementation of AuthoritySetForFinalityChecker. +impl AuthoritySetForFinalityChecker for Arc> { + fn check_authorities_proof( + &self, + hash: Block::Hash, + header: Block::Header, + proof: Vec>, + ) -> ClientResult> { + let request = RemoteCallRequest { + block: hash, + header, + method: "GrandpaApi_grandpa_authorities".into(), + call_data: vec![], + retry_count: None, + }; + + self.check_execution_proof(&request, proof) + .and_then(|authorities| { + let authorities: Vec<(AuthorityId, u64)> = Decode::decode(&mut &authorities[..]) + .ok_or_else(|| ClientError::CallResultDecode( + "failed to decode GRANDPA authorities set proof".into(), + ))?; + Ok(authorities.into_iter().collect()) + }) + } +} + +/// Finality proof provider for serving network requests. +pub struct FinalityProofProvider, RA> { + client: Arc>, + authority_provider: Arc>, +} + +impl, RA> FinalityProofProvider + where + B: Backend + Send + Sync + 'static, + E: CallExecutor + 'static + Clone + Send + Sync, + RA: Send + Sync, +{ + /// Create new finality proof provider using: + /// + /// - client for accessing blockchain data; + /// - authority_provider for calling and proving runtime methods. + pub fn new( + client: Arc>, + authority_provider: Arc>, + ) -> Self { + FinalityProofProvider { client, authority_provider } + } +} + +impl network::FinalityProofProvider for FinalityProofProvider + where + Block: BlockT, + NumberFor: BlockNumberOps, + B: Backend + Send + Sync + 'static, + E: CallExecutor + 'static + Clone + Send + Sync, + RA: Send + Sync, +{ + fn prove_finality( + &self, + for_block: Block::Hash, + request: &[u8], + ) -> Result>, ClientError> { + let request: FinalityProofRequest = Decode::decode(&mut &request[..]) + .ok_or_else(|| { + warn!(target: "finality", "Unable to decode finality proof request."); + ClientError::Backend(format!("Invalid finality proof request")) + })?; + match request { + FinalityProofRequest::Original(request) => prove_finality::<_, _, GrandpaJustification>( + &*self.client.backend().blockchain(), + &*self.authority_provider, + request.authorities_set_id, + request.last_finalized, + for_block, + ), + } + } +} + +/// The effects of block finality. +#[derive(Debug, PartialEq)] +pub struct FinalityEffects { + /// The (ordered) set of headers that could be imported. + pub headers_to_import: Vec
, + /// The hash of the block that could be finalized. + pub block: Header::Hash, + /// The justification for the block. + pub justification: Vec, + /// New authorities set id that should be applied starting from block. + pub new_set_id: u64, + /// New authorities set that should be applied starting from block. + pub new_authorities: Vec<(AuthorityId, u64)>, +} + +/// Single fragment of proof-of-finality. /// -/// The proof is the serialized `FinalityProof` constructed using earliest known -/// justification of the block. None is returned if there's no known justification atm. -pub fn prove_finality( +/// Finality for block B is proved by providing: +/// 1) the justification for the descendant block F; +/// 2) headers sub-chain (B; F] if B != F; +/// 3) proof of GRANDPA::authorities() if the set changes at block F. +#[derive(Debug, PartialEq, Encode, Decode)] +struct FinalityProofFragment { + /// The hash of block F for which justification is provided. + pub block: Header::Hash, + /// Justification of the block F. + pub justification: Vec, + /// The set of headers in the range (U; F] that we believe are unknown to the caller. Ordered. + pub unknown_headers: Vec
, + /// Optional proof of execution of GRANDPA::authorities(). + pub authorities_proof: Option>>, +} + +/// Proof of finality is the ordered set of finality fragments, where: +/// - last fragment provides justification for the best possible block from the requested range; +/// - all other fragments provide justifications for GRANDPA authorities set changes within requested range. +type FinalityProof
= Vec>; + +/// Finality proof request data. +#[derive(Debug, Encode, Decode)] +enum FinalityProofRequest { + /// Original version of the request. + Original(OriginalFinalityProofRequest), +} + +/// Original version of finality proof request. +#[derive(Debug, Encode, Decode)] +struct OriginalFinalityProofRequest { + /// The authorities set id we are waiting proof from. + /// + /// The first justification in the proof must be signed by this authority set. + pub authorities_set_id: u64, + /// Hash of the last known finalized block. + pub last_finalized: H, +} + +/// Prepare data blob associated with finality proof request. +pub(crate) fn make_finality_proof_request(last_finalized: H, authorities_set_id: u64) -> Vec { + FinalityProofRequest::Original(OriginalFinalityProofRequest { + authorities_set_id, + last_finalized, + }).encode() +} + +/// Prepare proof-of-finality for the best possible block in the range: (begin; end]. +/// +/// It is assumed that the caller already have a proof-of-finality for the block 'begin'. +/// It is assumed that the caller already knows all blocks in the range (begin; end]. +/// +/// Returns None if there are no finalized blocks unknown to the caller. +pub(crate) fn prove_finality, B: BlockchainBackend, J>( blockchain: &B, - generate_execution_proof: G, - block: Block::Hash, + authorities_provider: &AuthoritySetForFinalityProver, + authorities_set_id: u64, + begin: Block::Hash, + end: Block::Hash, ) -> ::client::error::Result>> where - B: BlockchainBackend, - G: Fn(&BlockId, &str, &[u8]) -> ClientResult>>, + J: ProvableJustification, { - let block_id = BlockId::Hash(block); - let mut block_number = blockchain.expect_block_number_from_id(&block_id)?; + let begin_id = BlockId::Hash(begin); + let begin_number = blockchain.expect_block_number_from_id(&begin_id)?; - // early-return if we sure that the block isn't finalized yet + // early-return if we sure that there are no blocks finalized AFTER begin block let info = blockchain.info()?; - if info.finalized_number < block_number { + if info.finalized_number <= begin_number { + trace!( + target: "finality", + "Requested finality proof for descendant of #{} while we only have finalized #{}. Returning empty proof.", + begin_number, + info.finalized_number, + ); + return Ok(None); } + // check if blocks range is valid. It is the caller responsibility to ensure + // that it only asks peers that know about whole blocks range + let end_number = blockchain.expect_block_number_from_id(&BlockId::Hash(end))?; + if begin_number + One::one() > end_number { + return Err(ClientError::Backend( + format!("Cannot generate finality proof for invalid range: {}..{}", begin_number, end_number), + )); + } + // early-return if we sure that the block is NOT a part of canonical chain - let canonical_block = blockchain.expect_block_hash_from_id(&BlockId::Number(block_number))?; - if block != canonical_block { + let canonical_begin = blockchain.expect_block_hash_from_id(&BlockId::Number(begin_number))?; + if begin != canonical_begin { return Err(ClientError::Backend( - "Cannot generate finality proof for non-canonical block".into() - ).into()); - } - - // now that we know that the block is finalized, we can generate finalization proof - - // we need to prove grandpa authorities set that has generated justification - // BUT since `GrandpaApi::grandpa_authorities` call returns the set that becames actual - // at the next block, the proof-of execution is generated using parent block' state - // (this will fail if we're trying to prove genesis finality, but such the call itself is redundant) - let mut current_header = blockchain.expect_header(BlockId::Hash(block))?; - let parent_block_id = BlockId::Hash(*current_header.parent_hash()); - let authorities_proof = generate_execution_proof( - &parent_block_id, - "GrandpaApi_grandpa_authorities", - &[], - )?; - - // search for earliest post-block (inclusive) justification - let mut finalization_path = Vec::new(); + format!("Cannot generate finality proof for non-canonical block: {}", begin), + )); + } + + // iterate justifications && try to prove finality + let mut fragment_index = 0; + let mut current_authorities = authorities_provider.authorities(&begin_id)?; + let mut current_number = begin_number + One::one(); + let mut finality_proof = Vec::new(); + let mut unknown_headers = Vec::new(); + let mut latest_proof_fragment = None; loop { - finalization_path.push(current_header); + let current_id = BlockId::Number(current_number); - match blockchain.justification(BlockId::Number(block_number))? { - Some(justification) => return Ok(Some(FinalityProof { - finalization_path, + // check if header is unknown to the caller + if current_number > end_number { + let unknown_header = blockchain.expect_header(current_id)?; + unknown_headers.push(unknown_header); + } + + if let Some(justification) = blockchain.justification(current_id)? { + // check if the current block enacts new GRANDPA authorities set + let parent_id = BlockId::Number(current_number - One::one()); + let new_authorities = authorities_provider.authorities(&parent_id)?; + let new_authorities_proof = if current_authorities != new_authorities { + current_authorities = new_authorities; + Some(authorities_provider.prove_authorities(&parent_id)?) + } else { + None + }; + + // prepare finality proof for the current block + let current = blockchain.expect_block_hash_from_id(&BlockId::Number(current_number))?; + let proof_fragment = FinalityProofFragment { + block: current, justification, - authorities_proof, - }.encode())), - None if block_number == info.finalized_number => break, - None => { - block_number = block_number + One::one(); - current_header = blockchain.expect_header(BlockId::Number(block_number))?; - }, + unknown_headers: ::std::mem::replace(&mut unknown_headers, Vec::new()), + authorities_proof: new_authorities_proof, + }; + + // append justification to finality proof if required + let justifies_end_block = current_number >= end_number; + let justifies_authority_set_change = proof_fragment.authorities_proof.is_some(); + if justifies_end_block || justifies_authority_set_change { + // check if the proof is generated by the requested authority set + if finality_proof.is_empty() { + let justification_check_result = J::decode_and_verify( + &proof_fragment.justification, + authorities_set_id, + ¤t_authorities, + ); + if justification_check_result.is_err() { + trace!( + target: "finality", + "Can not provide finality proof with requested set id #{}\ + (possible forced change?). Returning empty proof.", + authorities_set_id, + ); + + return Ok(None); + } + } + + finality_proof.push(proof_fragment); + latest_proof_fragment = None; + } else { + latest_proof_fragment = Some(proof_fragment); + } + + // we don't need to provide more justifications + if justifies_end_block { + break; + } } + + // we can't provide more justifications + if current_number == info.finalized_number { + // append last justification - even if we can't generate finality proof for + // the end block, we try to generate it for the latest possible block + if let Some(latest_proof_fragment) = latest_proof_fragment.take() { + finality_proof.push(latest_proof_fragment); + + fragment_index += 1; + if fragment_index == MAX_FRAGMENTS_IN_PROOF { + break; + } + } + break; + } + + // else search for the next justification + current_number = current_number + One::one(); } - Err(ClientError::Backend( - "cannot find justification for finalized block".into() - ).into()) + if finality_proof.is_empty() { + trace!( + target: "finality", + "No justifications found when making finality proof for {}. Returning empty proof.", + end, + ); + + Ok(None) + } else { + trace!( + target: "finality", + "Built finality proof for {} of {} fragments. Last fragment for {}.", + end, + finality_proof.len(), + finality_proof.last().expect("checked that !finality_proof.is_empty(); qed").block, + ); + + Ok(Some(finality_proof.encode())) + } } -/// Check proof-of-finality for the given block. +/// Check GRANDPA proof-of-finality for the given block. /// -/// Returns the vector of headers (including `block` header, ordered by ASC block number) that MUST be -/// validated + imported at once (i.e. within single db transaction). If at least one of those headers -/// is invalid, all other MUST be considered invalid. -pub fn check_finality_proof, C>( - check_execution_proof: C, - parent_header: Block::Header, - block: (NumberFor, Block::Hash), - set_id: u64, +/// Returns the vector of headers that MUST be validated + imported +/// AND if at least one of those headers is invalid, all other MUST be considered invalid. +pub(crate) fn check_finality_proof, B>( + blockchain: &B, + current_set_id: u64, + current_authorities: Vec<(AuthorityId, u64)>, + authorities_provider: &AuthoritySetForFinalityChecker, remote_proof: Vec, -) -> ClientResult> +) -> ClientResult> where - NumberFor: grandpa::BlockNumberOps, - C: Fn(&RemoteCallRequest) -> ClientResult>, + NumberFor: BlockNumberOps, + B: BlockchainBackend, { - do_check_finality_proof::>( - check_execution_proof, - parent_header, - block, - set_id, - remote_proof, - ) + do_check_finality_proof::<_, _, GrandpaJustification>( + blockchain, + current_set_id, + current_authorities, + authorities_provider, + remote_proof) } -/// Check proof-of-finality using given justification type. -fn do_check_finality_proof, C, J>( - check_execution_proof: C, - parent_header: Block::Header, - block: (NumberFor, Block::Hash), - set_id: u64, +fn do_check_finality_proof, B, J>( + blockchain: &B, + current_set_id: u64, + current_authorities: Vec<(AuthorityId, u64)>, + authorities_provider: &AuthoritySetForFinalityChecker, remote_proof: Vec, -) -> ClientResult> +) -> ClientResult> where - NumberFor: grandpa::BlockNumberOps, - C: Fn(&RemoteCallRequest) -> ClientResult>, + NumberFor: BlockNumberOps, + B: BlockchainBackend, J: ProvableJustification, { // decode finality proof - let proof = FinalityProof::::decode(&mut &remote_proof[..]) + let proof = FinalityProof::::decode(&mut &remote_proof[..]) .ok_or_else(|| ClientError::BadJustification("failed to decode finality proof".into()))?; - // check that the first header in finalization path is the block itself - { - let finalized_header = proof.finalization_path.first() - .ok_or_else(|| ClientError::from(ClientError::BadJustification( - "finality proof: finalized path is empty".into() - )))?; - if *finalized_header.number() != block.0 || finalized_header.hash() != block.1 { - return Err(ClientError::BadJustification( - "finality proof: block is not a part of finalized path".into() - ).into()); - } + // empty proof can't prove anything + if proof.is_empty() { + return Err(ClientError::BadJustification("empty proof of finality".into())); } - // check that the last header in finalization path is the justification target block - let just_block = proof.justification.target_block(); - { - let finalized_header = proof.finalization_path.last() - .expect("checked above that proof.finalization_path is not empty; qed"); - if *finalized_header.number() != just_block.0 || finalized_header.hash() != just_block.1 { - return Err(ClientError::BadJustification( - "finality proof: target justification block is not a part of finalized path".into() - ).into()); + // iterate and verify proof fragments + let last_fragment_index = proof.len() - 1; + let mut authorities = AuthoritiesOrEffects::Authorities(current_set_id, current_authorities); + for (proof_fragment_index, proof_fragment) in proof.into_iter().enumerate() { + // check that proof is non-redundant. The proof still can be valid, but + // we do not want peer to spam us with redundant data + if proof_fragment_index != last_fragment_index { + let has_unknown_headers = !proof_fragment.unknown_headers.is_empty(); + let has_new_authorities = proof_fragment.authorities_proof.is_some(); + if has_unknown_headers || !has_new_authorities { + return Err(ClientError::BadJustification("redundant proof of finality".into())); + } } - } - // check authorities set proof && get grandpa authorities that should have signed justification - let grandpa_authorities = check_execution_proof(&RemoteCallRequest { - block: just_block.1, - header: parent_header, - method: "GrandpaApi_grandpa_authorities".into(), - call_data: vec![], - retry_count: None, - })?; - let grandpa_authorities: Vec<(AuthorityId, u64)> = Decode::decode(&mut &grandpa_authorities[..]) - .ok_or_else(|| ClientError::BadJustification("failed to decode GRANDPA authorities set proof".into()))?; + authorities = check_finality_proof_fragment::<_, _, J>( + blockchain, + authorities, + authorities_provider, + proof_fragment)?; + } - // and now check justification - proof.justification.verify(set_id, &grandpa_authorities.into_iter().collect())?; + let effects = authorities.extract_effects().expect("at least one loop iteration is guaranteed + because proof is not empty;\ + check_finality_proof_fragment is called on every iteration;\ + check_finality_proof_fragment always returns FinalityEffects;\ + qed"); telemetry!(CONSENSUS_INFO; "afg.finality_proof_ok"; - "set_id" => ?set_id, "finalized_header_hash" => ?block.1); - Ok(proof.finalization_path) + "set_id" => ?effects.new_set_id, "finalized_header_hash" => ?effects.block); + + Ok(effects) } -/// Proof of finality. -/// -/// Finality of block B is proved by providing: -/// 1) valid headers sub-chain from the block B to the block F; -/// 2) proof of `GrandpaApi::grandpa_authorities()` call at the block F; -/// 3) valid (with respect to proved authorities) GRANDPA justification of the block F. -#[derive(Debug, PartialEq, Encode, Decode)] -struct FinalityProof { - /// Headers-path (ordered by block number, ascending) from the block we're gathering proof for - /// (inclusive) to the target block of the justification (inclusive). - pub finalization_path: Vec
, - /// Justification (finalization) of the last block from the `finalization_path`. - pub justification: Justification, - /// Proof of `GrandpaApi::grandpa_authorities` call execution at the - /// justification' target block. - pub authorities_proof: Vec>, +/// Check finality proof for the single block. +fn check_finality_proof_fragment, B, J>( + blockchain: &B, + authority_set: AuthoritiesOrEffects, + authorities_provider: &AuthoritySetForFinalityChecker, + proof_fragment: FinalityProofFragment, +) -> ClientResult> + where + NumberFor: BlockNumberOps, + B: BlockchainBackend, + J: Decode + ProvableJustification, +{ + // verify justification using previous authorities set + let (mut current_set_id, mut current_authorities) = authority_set.extract_authorities(); + let justification: J = Decode::decode(&mut &proof_fragment.justification[..]) + .ok_or_else(|| ClientError::JustificationDecode)?; + justification.verify(current_set_id, ¤t_authorities)?; + + // and now verify new authorities proof (if provided) + if let Some(new_authorities_proof) = proof_fragment.authorities_proof { + // it is safe to query header here, because its non-finality proves that it can't be pruned + let header = blockchain.expect_header(BlockId::Hash(proof_fragment.block))?; + let parent_hash = *header.parent_hash(); + let parent_header = blockchain.expect_header(BlockId::Hash(parent_hash))?; + current_authorities = authorities_provider.check_authorities_proof( + parent_hash, + parent_header, + new_authorities_proof, + )?; + + current_set_id = current_set_id + 1; + } + + Ok(AuthoritiesOrEffects::Effects(FinalityEffects { + headers_to_import: proof_fragment.unknown_headers, + block: proof_fragment.block, + justification: proof_fragment.justification, + new_set_id: current_set_id, + new_authorities: current_authorities, + })) } -/// Justification used to prove block finality. -trait ProvableJustification: Encode + Decode { - /// Get target block of this justification. - fn target_block(&self) -> (Header::Number, Header::Hash); +/// Authorities set from initial authorities set or finality effects. +enum AuthoritiesOrEffects { + Authorities(u64, Vec<(AuthorityId, u64)>), + Effects(FinalityEffects
), +} +impl AuthoritiesOrEffects
{ + pub fn extract_authorities(self) -> (u64, Vec<(AuthorityId, u64)>) { + match self { + AuthoritiesOrEffects::Authorities(set_id, authorities) => (set_id, authorities), + AuthoritiesOrEffects::Effects(effects) => (effects.new_set_id, effects.new_authorities), + } + } + + pub fn extract_effects(self) -> Option> { + match self { + AuthoritiesOrEffects::Authorities(_, _) => None, + AuthoritiesOrEffects::Effects(effects) => Some(effects), + } + } +} + +/// Justification used to prove block finality. +pub(crate) trait ProvableJustification: Encode + Decode { /// Verify justification with respect to authorities set and authorities set id. - fn verify(&self, set_id: u64, authorities: &VoterSet) -> ClientResult<()>; + fn verify(&self, set_id: u64, authorities: &[(AuthorityId, u64)]) -> ClientResult<()>; + + /// Decode and verify justification. + fn decode_and_verify( + justification: &Justification, + set_id: u64, + authorities: &[(AuthorityId, u64)], + ) -> ClientResult { + let justification = Self::decode(&mut &**justification).ok_or(ClientError::JustificationDecode)?; + justification.verify(set_id, authorities)?; + Ok(justification) + } } impl> ProvableJustification for GrandpaJustification where NumberFor: BlockNumberOps, { - fn target_block(&self) -> (NumberFor, Block::Hash) { - (self.commit.target_number, self.commit.target_hash) - } - - fn verify(&self, set_id: u64, authorities: &VoterSet) -> ClientResult<()> { - GrandpaJustification::verify(self, set_id, authorities) + fn verify(&self, set_id: u64, authorities: &[(AuthorityId, u64)]) -> ClientResult<()> { + GrandpaJustification::verify(self, set_id, &authorities.iter().cloned().collect()) } } #[cfg(test)] -mod tests { - use test_client::runtime::{Block, Header}; - use test_client::client::backend::NewBlockState; +pub(crate) mod tests { + use test_client::runtime::{Block, Header, H256}; + use test_client::client::{backend::NewBlockState}; use test_client::client::in_mem::Blockchain as InMemoryBlockchain; use super::*; - type FinalityProof = super::FinalityProof>; + type FinalityProof = super::FinalityProof
; + + impl AuthoritySetForFinalityProver for (GetAuthorities, ProveAuthorities) + where + GetAuthorities: Send + Sync + Fn(BlockId) -> ClientResult>, + ProveAuthorities: Send + Sync + Fn(BlockId) -> ClientResult>>, + { + fn authorities(&self, block: &BlockId) -> ClientResult> { + self.0(*block) + } - #[derive(Encode, Decode)] - struct ValidFinalityProof(Vec); + fn prove_authorities(&self, block: &BlockId) -> ClientResult>> { + self.1(*block) + } + } - impl ProvableJustification
for ValidFinalityProof { - fn target_block(&self) -> (u64, H256) { (3, header(3).hash()) } + struct ClosureAuthoritySetForFinalityChecker(pub Closure); - fn verify(&self, set_id: u64, authorities: &VoterSet) -> ClientResult<()> { - assert_eq!(set_id, 1); - assert_eq!(authorities, &vec![ - (AuthorityId([1u8; 32]), 1), - (AuthorityId([2u8; 32]), 2), - (AuthorityId([3u8; 32]), 3), - ].into_iter().collect()); - Ok(()) + impl AuthoritySetForFinalityChecker for ClosureAuthoritySetForFinalityChecker + where + Closure: Send + Sync + Fn(H256, Header, Vec>) -> ClientResult>, + { + fn check_authorities_proof( + &self, + hash: H256, + header: Header, + proof: Vec>, + ) -> ClientResult> { + self.0(hash, header, proof) + } + } + + #[derive(Debug, PartialEq, Encode, Decode)] + pub struct TestJustification(pub bool, pub Vec); + + impl ProvableJustification
for TestJustification { + fn verify(&self, _set_id: u64, _authorities: &[(AuthorityId, u64)]) -> ClientResult<()> { + if self.0 { + Ok(()) + } else { + Err(ClientError::BadJustification("test".into())) + } } } @@ -277,7 +635,23 @@ mod tests { } fn side_header(number: u64) -> Header { - Header::new(number, H256::from_low_u64_be(0), H256::from_low_u64_be(1), header(number - 1).hash(), Default::default()) + Header::new( + number, + H256::from_low_u64_be(0), + H256::from_low_u64_be(1), + header(number - 1).hash(), + Default::default(), + ) + } + + fn second_side_header(number: u64) -> Header { + Header::new( + number, + H256::from_low_u64_be(0), + H256::from_low_u64_be(1), + side_header(number - 1).hash(), + Default::default(), + ) } fn test_blockchain() -> InMemoryBlockchain { @@ -290,13 +664,42 @@ mod tests { } #[test] - fn finality_proof_is_not_generated_for_non_final_block() { + fn finality_prove_fails_with_invalid_range() { + let blockchain = test_blockchain(); + + // their last finalized is: 2 + // they request for proof-of-finality of: 2 + // => range is invalid + prove_finality::<_, _, TestJustification>( + &blockchain, + &( + |_| unreachable!("should return before calling GetAuthorities"), + |_| unreachable!("should return before calling ProveAuthorities"), + ), + 0, + header(2).hash(), + header(2).hash(), + ).unwrap_err(); + } + + #[test] + fn finality_proof_is_none_if_no_more_last_finalized_blocks() { let blockchain = test_blockchain(); blockchain.insert(header(4).hash(), header(4), None, None, NewBlockState::Best).unwrap(); - // when asking for finality of block 4, None is returned - let proof_of_4 = prove_finality(&blockchain, |_, _, _| Ok(vec![vec![42]]), header(4).hash()) - .unwrap(); + // our last finalized is: 3 + // their last finalized is: 3 + // => we can't provide any additional justifications + let proof_of_4 = prove_finality::<_, _, TestJustification>( + &blockchain, + &( + |_| unreachable!("should return before calling GetAuthorities"), + |_| unreachable!("should return before calling ProveAuthorities"), + ), + 0, + header(3).hash(), + header(4).hash(), + ).unwrap(); assert_eq!(proof_of_4, None); } @@ -305,128 +708,279 @@ mod tests { let blockchain = test_blockchain(); blockchain.insert(header(4).hash(), header(4), None, None, NewBlockState::Best).unwrap(); blockchain.insert(side_header(4).hash(), side_header(4), None, None, NewBlockState::Best).unwrap(); + blockchain.insert(second_side_header(5).hash(), second_side_header(5), None, None, NewBlockState::Best) + .unwrap(); blockchain.insert(header(5).hash(), header(5), Some(vec![5]), None, NewBlockState::Final).unwrap(); - // when asking for finality of side-block 42, None is returned - let proof_of_side_4_fails = prove_finality(&blockchain, |_, _, _| Ok(vec![vec![42]]), H256::from_low_u64_be(42)).is_err(); - assert_eq!(proof_of_side_4_fails, true); + // chain is 1 -> 2 -> 3 -> 4 -> 5 + // \> 4' -> 5' + // and the best finalized is 5 + // => when requesting for (4'; 5'], error is returned + prove_finality::<_, _, TestJustification>( + &blockchain, + &( + |_| unreachable!("should return before calling GetAuthorities"), + |_| unreachable!("should return before calling ProveAuthorities"), + ), + 0, + side_header(4).hash(), + second_side_header(5).hash(), + ).unwrap_err(); } #[test] - fn finality_proof_fails_if_no_justification_known() { + fn finality_proof_is_none_if_no_justification_known() { let blockchain = test_blockchain(); blockchain.insert(header(4).hash(), header(4), None, None, NewBlockState::Final).unwrap(); - // when asking for finality of block 4, search for justification failing - let proof_of_4_fails = prove_finality(&blockchain, |_, _, _| Ok(vec![vec![42]]), H256::from_low_u64_be(42)).is_err(); - assert_eq!(proof_of_4_fails, true); + // block 4 is finalized without justification + // => we can't prove finality + let proof_of_4 = prove_finality::<_, _, TestJustification>( + &blockchain, + &( + |_| Ok(vec![(AuthorityId::from_raw([1u8; 32]), 1u64)]), + |_| unreachable!("authorities didn't change => ProveAuthorities won't be called"), + ), + 0, + header(3).hash(), + header(4).hash(), + ).unwrap(); + assert_eq!(proof_of_4, None); } #[test] - fn prove_finality_is_generated() { + fn finality_proof_works_without_authorities_change() { let blockchain = test_blockchain(); + let just4 = TestJustification(true, vec![4]).encode(); + let just5 = TestJustification(true, vec![5]).encode(); + blockchain.insert(header(4).hash(), header(4), Some(just4), None, NewBlockState::Final).unwrap(); + blockchain.insert(header(5).hash(), header(5), Some(just5.clone()), None, NewBlockState::Final).unwrap(); + + // blocks 4 && 5 are finalized with justification + // => since authorities are the same, we only need justification for 5 + let proof_of_5: FinalityProof = Decode::decode(&mut &prove_finality::<_, _, TestJustification>( + &blockchain, + &( + |_| Ok(vec![(AuthorityId::from_raw([1u8; 32]), 1u64)]), + |_| unreachable!("should return before calling ProveAuthorities"), + ), + 0, + header(3).hash(), + header(5).hash(), + ).unwrap().unwrap()[..]).unwrap(); + assert_eq!(proof_of_5, vec![FinalityProofFragment { + block: header(5).hash(), + justification: just5, + unknown_headers: Vec::new(), + authorities_proof: None, + }]); + } - // when asking for finality of block 2, justification of 3 is returned - let proof_of_2: FinalityProof = prove_finality(&blockchain, |_, _, _| Ok(vec![vec![42]]), header(2).hash()) - .unwrap().and_then(|p| Decode::decode(&mut &p[..])).unwrap(); - assert_eq!(proof_of_2, FinalityProof { - finalization_path: vec![header(2), header(3)], - justification: vec![3], - authorities_proof: vec![vec![42]], - }); + #[test] + fn finality_proof_finalized_earlier_block_if_no_justification_for_target_is_known() { + let blockchain = test_blockchain(); + blockchain.insert(header(4).hash(), header(4), Some(vec![4]), None, NewBlockState::Final).unwrap(); + blockchain.insert(header(5).hash(), header(5), None, None, NewBlockState::Final).unwrap(); + + // block 4 is finalized with justification + we request for finality of 5 + // => we can't prove finality of 5, but providing finality for 4 is still useful for requester + let proof_of_5: FinalityProof = Decode::decode(&mut &prove_finality::<_, _, TestJustification>( + &blockchain, + &( + |_| Ok(vec![(AuthorityId::from_raw([1u8; 32]), 1u64)]), + |_| unreachable!("should return before calling ProveAuthorities"), + ), + 0, + header(3).hash(), + header(5).hash(), + ).unwrap().unwrap()[..]).unwrap(); + assert_eq!(proof_of_5, vec![FinalityProofFragment { + block: header(4).hash(), + justification: vec![4], + unknown_headers: Vec::new(), + authorities_proof: None, + }]); + } - // when asking for finality of block 3, justification of 3 is returned - let proof_of_3: FinalityProof = prove_finality(&blockchain, |_, _, _| Ok(vec![vec![42]]), header(3).hash()) - .unwrap().and_then(|p| Decode::decode(&mut &p[..])).unwrap(); - assert_eq!(proof_of_3, FinalityProof { - finalization_path: vec![header(3)], - justification: vec![3], - authorities_proof: vec![vec![42]], - }); + #[test] + fn finality_proof_works_with_authorities_change() { + let blockchain = test_blockchain(); + let just4 = TestJustification(true, vec![4]).encode(); + let just5 = TestJustification(true, vec![5]).encode(); + let just7 = TestJustification(true, vec![7]).encode(); + blockchain.insert(header(4).hash(), header(4), Some(just4), None, NewBlockState::Final).unwrap(); + blockchain.insert(header(5).hash(), header(5), Some(just5.clone()), None, NewBlockState::Final).unwrap(); + blockchain.insert(header(6).hash(), header(6), None, None, NewBlockState::Final).unwrap(); + blockchain.insert(header(7).hash(), header(7), Some(just7.clone()), None, NewBlockState::Final).unwrap(); + + // when querying for finality of 6, we assume that the #6 is the last block known to the requester + // => since we only have justification for #7, we provide #7 + let proof_of_6: FinalityProof = Decode::decode(&mut &prove_finality::<_, _, TestJustification>( + &blockchain, + &( + |block_id| match block_id { + BlockId::Hash(h) if h == header(3).hash() => Ok(vec![(AuthorityId::from_raw([3u8; 32]), 1u64)]), + BlockId::Number(3) => Ok(vec![(AuthorityId::from_raw([3u8; 32]), 1u64)]), + BlockId::Number(4) => Ok(vec![(AuthorityId::from_raw([4u8; 32]), 1u64)]), + BlockId::Number(6) => Ok(vec![(AuthorityId::from_raw([6u8; 32]), 1u64)]), + _ => unreachable!("no other authorities should be fetched: {:?}", block_id), + }, + |block_id| match block_id { + BlockId::Number(4) => Ok(vec![vec![40]]), + BlockId::Number(6) => Ok(vec![vec![60]]), + _ => unreachable!("no other authorities should be proved: {:?}", block_id), + }, + ), + 0, + header(3).hash(), + header(6).hash(), + ).unwrap().unwrap()[..]).unwrap(); + // initial authorities set (which start acting from #4) is [3; 32] + assert_eq!(proof_of_6, vec![ + // new authorities set starts acting from #5 => we do not provide fragment for #4 + // first fragment provides justification for #5 && authorities set that starts acting from #5 + FinalityProofFragment { + block: header(5).hash(), + justification: just5, + unknown_headers: Vec::new(), + authorities_proof: Some(vec![vec![40]]), + }, + // last fragment provides justification for #7 && unknown#7 + FinalityProofFragment { + block: header(7).hash(), + justification: just7, + unknown_headers: vec![header(7)], + authorities_proof: Some(vec![vec![60]]), + }, + ]); } #[test] - fn finality_proof_check_fails_when_block_is_not_included() { - let mut proof_of_2: FinalityProof = prove_finality( - &test_blockchain(), - |_, _, _| Ok(vec![vec![42]]), - header(2).hash(), - ).unwrap().and_then(|p| Decode::decode(&mut &p[..])).unwrap(); - proof_of_2.finalization_path.remove(0); - - // block for which we're trying to request finality proof is missing from finalization_path - assert_eq!(do_check_finality_proof::( - |_| Ok(Vec::::new().encode()), - header(1), - (2, header(2).hash()), + fn finality_proof_check_fails_when_proof_decode_fails() { + let blockchain = test_blockchain(); + + // when we can't decode proof from Vec + do_check_finality_proof::<_, _, TestJustification>( + &blockchain, 1, - proof_of_2.encode(), - ).is_err(), true); + vec![(AuthorityId::from_raw([3u8; 32]), 1u64)], + &ClosureAuthoritySetForFinalityChecker(|_, _, _| unreachable!("returns before CheckAuthoritiesProof")), + vec![42], + ).unwrap_err(); } #[test] - fn finality_proof_check_fails_when_justified_block_is_not_included() { - let mut proof_of_2: FinalityProof = prove_finality( - &test_blockchain(), - |_, _, _| Ok(vec![vec![42]]), - header(2).hash(), - ).unwrap().and_then(|p| Decode::decode(&mut &p[..])).unwrap(); - proof_of_2.finalization_path.remove(1); - - // justified block is missing from finalization_path - assert_eq!(do_check_finality_proof::( - |_| Ok(Vec::::new().encode()), - header(1), - (2, header(2).hash()), + fn finality_proof_check_fails_when_proof_is_empty() { + let blockchain = test_blockchain(); + + // when decoded proof has zero length + do_check_finality_proof::<_, _, TestJustification>( + &blockchain, 1, - proof_of_2.encode(), - ).is_err(), true); + vec![(AuthorityId::from_raw([3u8; 32]), 1u64)], + &ClosureAuthoritySetForFinalityChecker(|_, _, _| unreachable!("returns before CheckAuthoritiesProof")), + Vec::::new().encode(), + ).unwrap_err(); } #[test] - fn finality_proof_check_fails_when_justification_verification_fails() { - #[derive(Encode, Decode)] - struct InvalidFinalityProof(Vec); + fn finality_proof_check_fails_when_intemediate_fragment_has_unknown_headers() { + let blockchain = test_blockchain(); - impl ProvableJustification
for InvalidFinalityProof { - fn target_block(&self) -> (u64, H256) { (3, header(3).hash()) } + // when intermediate (#0) fragment has non-empty unknown headers + do_check_finality_proof::<_, _, TestJustification>( + &blockchain, + 1, + vec![(AuthorityId::from_raw([3u8; 32]), 1u64)], + &ClosureAuthoritySetForFinalityChecker(|_, _, _| unreachable!("returns before CheckAuthoritiesProof")), + vec![FinalityProofFragment { + block: header(4).hash(), + justification: TestJustification(true, vec![7]).encode(), + unknown_headers: vec![header(4)], + authorities_proof: Some(vec![vec![42]]), + }, FinalityProofFragment { + block: header(5).hash(), + justification: TestJustification(true, vec![8]).encode(), + unknown_headers: vec![header(5)], + authorities_proof: None, + }].encode(), + ).unwrap_err(); + } - fn verify(&self, _set_id: u64, _authorities: &VoterSet) -> ClientResult<()> { - Err(ClientError::Backend("test error".into())) - } - } + #[test] + fn finality_proof_check_fails_when_intemediate_fragment_has_no_authorities_proof() { + let blockchain = test_blockchain(); - let mut proof_of_2: FinalityProof = prove_finality( - &test_blockchain(), - |_, _, _| Ok(vec![vec![42]]), - header(2).hash(), - ).unwrap().and_then(|p| Decode::decode(&mut &p[..])).unwrap(); - proof_of_2.finalization_path.remove(1); - - // justification is not valid - assert_eq!(do_check_finality_proof::( - |_| Ok(Vec::::new().encode()), - header(1), - (2, header(2).hash()), + // when intermediate (#0) fragment has empty authorities proof + do_check_finality_proof::<_, _, TestJustification>( + &blockchain, 1, - proof_of_2.encode(), - ).is_err(), true); + vec![(AuthorityId::from_raw([3u8; 32]), 1u64)], + &ClosureAuthoritySetForFinalityChecker(|_, _, _| unreachable!("returns before CheckAuthoritiesProof")), + vec![FinalityProofFragment { + block: header(4).hash(), + justification: TestJustification(true, vec![7]).encode(), + unknown_headers: Vec::new(), + authorities_proof: None, + }, FinalityProofFragment { + block: header(5).hash(), + justification: TestJustification(true, vec![8]).encode(), + unknown_headers: vec![header(5)], + authorities_proof: None, + }].encode(), + ).unwrap_err(); } #[test] fn finality_proof_check_works() { - let proof_of_2 = prove_finality(&test_blockchain(), |_, _, _| Ok(vec![vec![42]]), header(2).hash()) - .unwrap().unwrap(); - assert_eq!(do_check_finality_proof::( - |_| Ok(vec![ - (AuthorityId([1u8; 32]), 1u64), - (AuthorityId([2u8; 32]), 2u64), - (AuthorityId([3u8; 32]), 3u64), - ].encode()), - header(1), - (2, header(2).hash()), + let blockchain = test_blockchain(); + + let effects = do_check_finality_proof::<_, _, TestJustification>( + &blockchain, 1, - proof_of_2, - ).unwrap(), vec![header(2), header(3)]); + vec![(AuthorityId::from_raw([3u8; 32]), 1u64)], + &ClosureAuthoritySetForFinalityChecker(|_, _, _| Ok(vec![(AuthorityId::from_raw([4u8; 32]), 1u64)])), + vec![FinalityProofFragment { + block: header(2).hash(), + justification: TestJustification(true, vec![7]).encode(), + unknown_headers: Vec::new(), + authorities_proof: Some(vec![vec![42]]), + }, FinalityProofFragment { + block: header(4).hash(), + justification: TestJustification(true, vec![8]).encode(), + unknown_headers: vec![header(4)], + authorities_proof: None, + }].encode(), + ).unwrap(); + assert_eq!(effects, FinalityEffects { + headers_to_import: vec![header(4)], + block: header(4).hash(), + justification: TestJustification(true, vec![8]).encode(), + new_set_id: 2, + new_authorities: vec![(AuthorityId::from_raw([4u8; 32]), 1u64)], + }); + } + + #[test] + fn finality_proof_is_none_if_first_justification_is_generated_by_unknown_set() { + // this is the case for forced change: set_id has been forcibly increased on full node + // and ligh node missed that + // => justification verification will fail on light node anyways, so we do not return + // finality proof at all + let blockchain = test_blockchain(); + let just4 = TestJustification(false, vec![4]).encode(); // false makes verification fail + blockchain.insert(header(4).hash(), header(4), Some(just4), None, NewBlockState::Final).unwrap(); + + let proof_of_4 = prove_finality::<_, _, TestJustification>( + &blockchain, + &( + |_| Ok(vec![(AuthorityId::from_raw([1u8; 32]), 1u64)]), + |_| unreachable!("should return before calling ProveAuthorities"), + ), + 0, + header(3).hash(), + header(4).hash(), + ).unwrap(); + assert!(proof_of_4.is_none()); } } diff --git a/core/finality-grandpa/src/import.rs b/core/finality-grandpa/src/import.rs index 542617fbcf0b7..cd5ca02a5b367 100644 --- a/core/finality-grandpa/src/import.rs +++ b/core/finality-grandpa/src/import.rs @@ -552,7 +552,7 @@ where enacts_change: bool, ) -> Result<(), ConsensusError> { let justification = GrandpaJustification::decode_and_verify_finalizes( - justification, + &justification, (hash, number), self.authority_set.set_id(), &self.authority_set.current_authorities(), diff --git a/core/finality-grandpa/src/justification.rs b/core/finality-grandpa/src/justification.rs index 5b55acec8524f..f16824f924680 100644 --- a/core/finality-grandpa/src/justification.rs +++ b/core/finality-grandpa/src/justification.rs @@ -95,17 +95,16 @@ impl> GrandpaJustification { /// Decode a GRANDPA justification and validate the commit and the votes' /// ancestry proofs finalize the given block. pub(crate) fn decode_and_verify_finalizes( - encoded: Vec, + encoded: &[u8], finalized_target: (Block::Hash, NumberFor), set_id: u64, voters: &VoterSet, ) -> Result, ClientError> where NumberFor: grandpa::BlockNumberOps, { - let justification = GrandpaJustification::::decode(&mut &*encoded).ok_or_else(|| { - let msg = "failed to decode grandpa justification".to_string(); - ClientError::from(ClientError::BadJustification(msg)) - })?; + + let justification = GrandpaJustification::::decode(&mut &*encoded) + .ok_or(ClientError::JustificationDecode)?; if (justification.commit.target_hash, justification.commit.target_number) != finalized_target { let msg = "invalid commit target in grandpa justification".to_string(); diff --git a/core/finality-grandpa/src/lib.rs b/core/finality-grandpa/src/lib.rs index bd7bc5f9e3223..9d808bf4d5120 100644 --- a/core/finality-grandpa/src/lib.rs +++ b/core/finality-grandpa/src/lib.rs @@ -93,15 +93,17 @@ mod environment; mod finality_proof; mod import; mod justification; +mod light_import; mod observer; mod until_imported; #[cfg(feature="service-integration")] mod service_integration; #[cfg(feature="service-integration")] -pub use service_integration::{LinkHalfForService, BlockImportForService}; +pub use service_integration::{LinkHalfForService, BlockImportForService, BlockImportForLightService}; pub use communication::Network; -pub use finality_proof::{prove_finality, check_finality_proof}; +pub use finality_proof::FinalityProofProvider; +pub use light_import::light_block_import; pub use observer::run_grandpa_observer; use aux_schema::PersistentData; @@ -300,7 +302,7 @@ pub struct LinkHalf, RA, SC> { pub fn block_import, RA, PRA, SC>( client: Arc>, api: Arc, - select_chain: SC + select_chain: SC, ) -> Result<( GrandpaBlockImport, LinkHalf diff --git a/core/finality-grandpa/src/light_import.rs b/core/finality-grandpa/src/light_import.rs new file mode 100644 index 0000000000000..f73b41f0ae02e --- /dev/null +++ b/core/finality-grandpa/src/light_import.rs @@ -0,0 +1,728 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use std::collections::HashMap; +use std::sync::Arc; +use log::{info, trace, warn}; +use parking_lot::RwLock; + +use client::{ + CallExecutor, Client, + backend::{AuxStore, Backend}, + blockchain::HeaderBackend, + error::Error as ClientError, +}; +use parity_codec::{Encode, Decode}; +use consensus_common::{ + import_queue::{Verifier, SharedFinalityProofRequestBuilder}, well_known_cache_keys, + BlockOrigin, BlockImport, FinalityProofImport, ImportBlock, ImportResult, ImportedAux, + Error as ConsensusError, ErrorKind as ConsensusErrorKind, FinalityProofRequestBuilder, +}; +use runtime_primitives::Justification; +use runtime_primitives::traits::{ + NumberFor, Block as BlockT, Header as HeaderT, ProvideRuntimeApi, DigestFor, +}; +use fg_primitives::GrandpaApi; +use runtime_primitives::generic::BlockId; +use substrate_primitives::{H256, Blake2Hasher, ed25519::Public as AuthorityId}; + +use crate::aux_schema::load_decode; +use crate::consensus_changes::ConsensusChanges; +use crate::environment::canonical_at_height; +use crate::finality_proof::{AuthoritySetForFinalityChecker, ProvableJustification, make_finality_proof_request}; +use crate::justification::GrandpaJustification; + +/// LightAuthoritySet is saved under this key in aux storage. +const LIGHT_AUTHORITY_SET_KEY: &[u8] = b"grandpa_voters"; +/// ConsensusChanges is saver under this key in aux storage. +const LIGHT_CONSENSUS_CHANGES_KEY: &[u8] = b"grandpa_consensus_changes"; + +/// Create light block importer. +pub fn light_block_import, RA, PRA>( + client: Arc>, + authority_set_provider: Arc>, + api: Arc, +) -> Result, ClientError> + where + B: Backend + 'static, + E: CallExecutor + 'static + Clone + Send + Sync, + RA: Send + Sync, + PRA: ProvideRuntimeApi, + PRA::Api: GrandpaApi, +{ + let info = client.info()?; + let import_data = load_aux_import_data(info.chain.finalized_hash, &**client.backend(), api)?; + Ok(GrandpaLightBlockImport { + client, + authority_set_provider, + data: Arc::new(RwLock::new(import_data)), + }) +} + +/// A light block-import handler for GRANDPA. +/// +/// It is responsible for: +/// - checking GRANDPA justifications; +/// - fetching finality proofs for blocks that are enacting consensus changes. +pub struct GrandpaLightBlockImport, RA> { + client: Arc>, + authority_set_provider: Arc>, + data: Arc>>, +} + +/// Mutable data of light block importer. +struct LightImportData> { + last_finalized: Block::Hash, + authority_set: LightAuthoritySet, + consensus_changes: ConsensusChanges>, +} + +/// Latest authority set tracker. +#[derive(Debug, Encode, Decode)] +struct LightAuthoritySet { + set_id: u64, + authorities: Vec<(AuthorityId, u64)>, +} + +impl, RA> GrandpaLightBlockImport { + /// Create finality proof request builder. + pub fn create_finality_proof_request_builder(&self) -> SharedFinalityProofRequestBuilder { + Arc::new(GrandpaFinalityProofRequestBuilder(self.data.clone())) as _ + } +} + +impl, RA> BlockImport + for GrandpaLightBlockImport where + NumberFor: grandpa::BlockNumberOps, + B: Backend + 'static, + E: CallExecutor + 'static + Clone + Send + Sync, + DigestFor: Encode, + RA: Send + Sync, +{ + type Error = ConsensusError; + + fn import_block( + &self, + block: ImportBlock, + new_cache: HashMap>, + ) -> Result { + do_import_block::<_, _, _, _, GrandpaJustification>(&*self.client, &mut *self.data.write(), block, new_cache) + } + + fn check_block( + &self, + hash: Block::Hash, + parent_hash: Block::Hash, + ) -> Result { + self.client.check_block(hash, parent_hash) + } +} + +impl, RA> FinalityProofImport + for GrandpaLightBlockImport where + NumberFor: grandpa::BlockNumberOps, + B: Backend + 'static, + E: CallExecutor + 'static + Clone + Send + Sync, + DigestFor: Encode, + RA: Send + Sync, +{ + type Error = ConsensusError; + + fn on_start(&self, link: &::consensus_common::import_queue::Link) { + let chain_info = match self.client.info() { + Ok(info) => info.chain, + _ => return, + }; + + let data = self.data.read(); + for (pending_number, pending_hash) in data.consensus_changes.pending_changes() { + if *pending_number > chain_info.finalized_number && *pending_number <= chain_info.best_number { + link.request_finality_proof(pending_hash, *pending_number); + } + } + } + + fn import_finality_proof( + &self, + hash: Block::Hash, + number: NumberFor, + finality_proof: Vec, + verifier: &Verifier, + ) -> Result<(Block::Hash, NumberFor), Self::Error> { + do_import_finality_proof::<_, _, _, _, GrandpaJustification>( + &*self.client, + &*self.authority_set_provider, + &mut *self.data.write(), + hash, + number, + finality_proof, + verifier, + ) + } +} + +impl LightAuthoritySet { + /// Get a genesis set with given authorities. + pub fn genesis(initial: Vec<(AuthorityId, u64)>) -> Self { + LightAuthoritySet { + set_id: 0, + authorities: initial, + } + } + + /// Get latest set id. + pub fn set_id(&self) -> u64 { + self.set_id + } + + /// Get latest authorities set. + pub fn authorities(&self) -> Vec<(AuthorityId, u64)> { + self.authorities.clone() + } + + /// Set new authorities set. + pub fn update(&mut self, set_id: u64, authorities: Vec<(AuthorityId, u64)>) { + self.set_id = set_id; + std::mem::replace(&mut self.authorities, authorities); + } +} + +struct GrandpaFinalityProofRequestBuilder>(Arc>>); + +impl> FinalityProofRequestBuilder for GrandpaFinalityProofRequestBuilder { + fn build_request_data(&self, _hash: &B::Hash) -> Vec { + let data = self.0.read(); + make_finality_proof_request( + data.last_finalized, + data.authority_set.set_id(), + ) + } +} + +/// Try to import new block. +fn do_import_block, RA, J>( + client: &Client, + data: &mut LightImportData, + mut block: ImportBlock, + new_cache: HashMap>, +) -> Result + where + B: Backend + 'static, + E: CallExecutor + 'static + Clone + Send + Sync, + RA: Send + Sync, + NumberFor: grandpa::BlockNumberOps, + DigestFor: Encode, + J: ProvableJustification, +{ + let hash = block.post_header().hash(); + let number = block.header.number().clone(); + + // we don't want to finalize on `inner.import_block` + let justification = block.justification.take(); + let enacts_consensus_change = !new_cache.is_empty(); + let import_result = client.import_block(block, new_cache); + + let mut imported_aux = match import_result { + Ok(ImportResult::Imported(aux)) => aux, + Ok(r) => return Ok(r), + Err(e) => return Err(ConsensusErrorKind::ClientImport(e.to_string()).into()), + }; + + match justification { + Some(justification) => { + trace!( + target: "finality", + "Imported block {}{}. Importing justification.", + if enacts_consensus_change { " which enacts consensus changes" } else { "" }, + hash, + ); + + do_import_justification::<_, _, _, _, J>(client, data, hash, number, justification) + }, + None if enacts_consensus_change => { + trace!( + target: "finality", + "Imported block {} which enacts consensus changes. Requesting finality proof.", + hash, + ); + + // remember that we need finality proof for this block + imported_aux.needs_finality_proof = true; + data.consensus_changes.note_change((number, hash)); + Ok(ImportResult::Imported(imported_aux)) + }, + None => Ok(ImportResult::Imported(imported_aux)), + } +} + +/// Try to import finality proof. +fn do_import_finality_proof, RA, J>( + client: &Client, + authority_set_provider: &AuthoritySetForFinalityChecker, + data: &mut LightImportData, + _hash: Block::Hash, + _number: NumberFor, + finality_proof: Vec, + verifier: &Verifier, +) -> Result<(Block::Hash, NumberFor), ConsensusError> + where + B: Backend + 'static, + E: CallExecutor + 'static + Clone + Send + Sync, + RA: Send + Sync, + DigestFor: Encode, + NumberFor: grandpa::BlockNumberOps, + J: ProvableJustification, +{ + let authority_set_id = data.authority_set.set_id(); + let authorities = data.authority_set.authorities(); + let finality_effects = crate::finality_proof::check_finality_proof( + &*client.backend().blockchain(), + authority_set_id, + authorities, + authority_set_provider, + finality_proof, + ).map_err(|e| ConsensusError::from(ConsensusErrorKind::ClientImport(e.to_string())))?; + + // try to import all new headers + let block_origin = BlockOrigin::NetworkBroadcast; + for header_to_import in finality_effects.headers_to_import { + let (block_to_import, new_authorities) = verifier.verify(block_origin, header_to_import, None, None)?; + assert!(block_to_import.justification.is_none(), "We have passed None as justification to verifier.verify"); + + let mut cache = HashMap::new(); + if let Some(authorities) = new_authorities { + cache.insert(well_known_cache_keys::AUTHORITIES, authorities.encode()); + } + do_import_block::<_, _, _, _, J>(client, data, block_to_import, cache)?; + } + + // try to import latest justification + let finalized_block_hash = finality_effects.block; + let finalized_block_number = client.backend().blockchain() + .expect_block_number_from_id(&BlockId::Hash(finality_effects.block)) + .map_err(|e| ConsensusError::from(ConsensusErrorKind::ClientImport(e.to_string())))?; + do_finalize_block( + client, + data, + finalized_block_hash, + finalized_block_number, + finality_effects.justification.encode(), + )?; + + // apply new authorities set + data.authority_set.update( + finality_effects.new_set_id, + finality_effects.new_authorities, + ); + + Ok((finalized_block_hash, finalized_block_number)) +} + +/// Try to import justification. +fn do_import_justification, RA, J>( + client: &Client, + data: &mut LightImportData, + hash: Block::Hash, + number: NumberFor, + justification: Justification, +) -> Result + where + B: Backend + 'static, + E: CallExecutor + 'static + Clone + Send + Sync, + RA: Send + Sync, + NumberFor: grandpa::BlockNumberOps, + J: ProvableJustification, +{ + // with justification, we have two cases + // + // optimistic: the same GRANDPA authorities set has generated intermediate justification + // => justification is verified using current authorities set + we could proceed further + // + // pessimistic scenario: the GRANDPA authorities set has changed + // => we need to fetch new authorities set (i.e. finality proof) from remote node + + // first, try to behave optimistically + let authority_set_id = data.authority_set.set_id(); + let justification = J::decode_and_verify( + &justification, + authority_set_id, + &data.authority_set.authorities(), + ); + + // BadJustification error means that justification has been successfully decoded, but + // it isn't valid within current authority set + let justification = match justification { + Err(ClientError::BadJustification(_)) => { + trace!( + target: "finality", + "Justification for {} is not valid within current authorities set. Requesting finality proof.", + hash, + ); + + let mut imported_aux = ImportedAux::default(); + imported_aux.needs_finality_proof = true; + return Ok(ImportResult::Imported(imported_aux)); + }, + Err(e) => { + trace!( + target: "finality", + "Justification for {} is not valid. Bailing.", + hash, + ); + + return Err(ConsensusErrorKind::ClientImport(e.to_string()).into()); + }, + Ok(justification) => { + trace!( + target: "finality", + "Justification for {} is valid. Finalizing the block.", + hash, + ); + + justification + }, + }; + + // finalize the block + do_finalize_block(client, data, hash, number, justification.encode()) +} + +/// Finalize the block. +fn do_finalize_block, RA>( + client: &Client, + data: &mut LightImportData, + hash: Block::Hash, + number: NumberFor, + justification: Justification, +) -> Result + where + B: Backend + 'static, + E: CallExecutor + 'static + Clone + Send + Sync, + RA: Send + Sync, + NumberFor: grandpa::BlockNumberOps, +{ + // finalize the block + client.finalize_block(BlockId::Hash(hash), Some(justification), true).map_err(|e| { + warn!(target: "finality", "Error applying finality to block {:?}: {:?}", (hash, number), e); + ConsensusError::from(ConsensusErrorKind::ClientImport(e.to_string())) + })?; + + // forget obsoleted consensus changes + let consensus_finalization_res = data.consensus_changes + .finalize((number, hash), |at_height| canonical_at_height(&client, (hash, number), true, at_height)); + match consensus_finalization_res { + Ok((true, _)) => require_insert_aux( + &client, + LIGHT_CONSENSUS_CHANGES_KEY, + &data.consensus_changes, + "consensus changes", + )?, + Ok(_) => (), + Err(error) => return Err(on_post_finalization_error(error, "consensus changes")), + } + + // update last finalized block reference + data.last_finalized = hash; + + Ok(ImportResult::imported()) +} + +/// Load light import aux data from the store. +fn load_aux_import_data, PRA>( + last_finalized: Block::Hash, + aux_store: &B, + api: Arc, +) -> Result, ClientError> + where + B: AuxStore, + PRA: ProvideRuntimeApi, + PRA::Api: GrandpaApi, +{ + use runtime_primitives::traits::Zero; + let authority_set = match load_decode(aux_store, LIGHT_AUTHORITY_SET_KEY)? { + Some(authority_set) => authority_set, + None => { + info!(target: "afg", "Loading GRANDPA authorities \ + from genesis on what appears to be first startup."); + + // no authority set on disk: fetch authorities from genesis state + let genesis_authorities = api.runtime_api().grandpa_authorities(&BlockId::number(Zero::zero()))?; + + let authority_set = LightAuthoritySet::genesis(genesis_authorities); + let encoded = authority_set.encode(); + aux_store.insert_aux(&[(LIGHT_AUTHORITY_SET_KEY, &encoded[..])], &[])?; + + authority_set + }, + }; + + let consensus_changes = match load_decode(aux_store, LIGHT_CONSENSUS_CHANGES_KEY)? { + Some(consensus_changes) => consensus_changes, + None => { + let consensus_changes = ConsensusChanges::>::empty(); + + let encoded = authority_set.encode(); + aux_store.insert_aux(&[(LIGHT_CONSENSUS_CHANGES_KEY, &encoded[..])], &[])?; + + consensus_changes + }, + }; + + Ok(LightImportData { + last_finalized, + authority_set, + consensus_changes, + }) +} + +/// Insert into aux store. If failed, return error && show inconsistency warning. +fn require_insert_aux, RA>( + client: &Client, + key: &[u8], + value: &T, + value_type: &str, +) -> Result<(), ConsensusError> + where + B: Backend + 'static, + E: CallExecutor + 'static + Clone + Send + Sync, +{ + let backend = &**client.backend(); + let encoded = value.encode(); + let update_res = Backend::insert_aux(backend, &[(key, &encoded[..])], &[]); + if let Err(error) = update_res { + return Err(on_post_finalization_error(error, value_type)); + } + + Ok(()) +} + +/// Display inconsistency warning. +fn on_post_finalization_error(error: ClientError, value_type: &str) -> ConsensusError { + warn!(target: "finality", "Failed to write updated {} to disk. Bailing.", value_type); + warn!(target: "finality", "Node is in a potentially inconsistent state."); + ConsensusError::from(ConsensusErrorKind::ClientImport(error.to_string())) +} + +#[cfg(test)] +pub mod tests { + use super::*; + use consensus_common::ForkChoiceStrategy; + use substrate_primitives::H256; + use test_client::client::in_mem::Blockchain as InMemoryAuxStore; + use test_client::runtime::{Block, Header}; + use crate::tests::TestApi; + use crate::finality_proof::tests::TestJustification; + + pub struct NoJustificationsImport, RA>( + pub GrandpaLightBlockImport + ); + + impl, RA> BlockImport + for NoJustificationsImport where + NumberFor: grandpa::BlockNumberOps, + B: Backend + 'static, + E: CallExecutor + 'static + Clone + Send + Sync, + DigestFor: Encode, + RA: Send + Sync, + { + type Error = ConsensusError; + + fn import_block( + &self, + mut block: ImportBlock, + new_cache: HashMap>, + ) -> Result { + block.justification.take(); + self.0.import_block(block, new_cache) + } + + fn check_block( + &self, + hash: Block::Hash, + parent_hash: Block::Hash, + ) -> Result { + self.0.check_block(hash, parent_hash) + } + } + + impl, RA> FinalityProofImport + for NoJustificationsImport where + NumberFor: grandpa::BlockNumberOps, + B: Backend + 'static, + E: CallExecutor + 'static + Clone + Send + Sync, + DigestFor: Encode, + RA: Send + Sync, + { + type Error = ConsensusError; + + fn on_start(&self, link: &::consensus_common::import_queue::Link) { + self.0.on_start(link) + } + + fn import_finality_proof( + &self, + hash: Block::Hash, + number: NumberFor, + finality_proof: Vec, + verifier: &Verifier, + ) -> Result<(Block::Hash, NumberFor), Self::Error> { + self.0.import_finality_proof(hash, number, finality_proof, verifier) + } + } + + /// Creates light block import that ignores justifications that came outside of finality proofs. + pub fn light_block_import_without_justifications, RA, PRA>( + client: Arc>, + authority_set_provider: Arc>, + api: Arc, + ) -> Result, ClientError> + where + B: Backend + 'static, + E: CallExecutor + 'static + Clone + Send + Sync, + RA: Send + Sync, + PRA: ProvideRuntimeApi, + PRA::Api: GrandpaApi, + { + light_block_import(client, authority_set_provider, api).map(NoJustificationsImport) + } + + fn import_block( + new_cache: HashMap>, + justification: Option, + ) -> ImportResult { + let client = test_client::new_light(); + let mut import_data = LightImportData { + last_finalized: Default::default(), + authority_set: LightAuthoritySet::genesis(vec![(AuthorityId([1; 32]), 1)]), + consensus_changes: ConsensusChanges::empty(), + }; + let block = ImportBlock { + origin: BlockOrigin::Own, + header: Header { + number: 1, + parent_hash: client.info().unwrap().chain.best_hash, + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }, + justification, + post_digests: Vec::new(), + body: None, + finalized: false, + auxiliary: Vec::new(), + fork_choice: ForkChoiceStrategy::LongestChain, + }; + do_import_block::<_, _, _, _, TestJustification>( + &client, + &mut import_data, + block, + new_cache, + ).unwrap() + } + + #[test] + fn finality_proof_not_required_when_consensus_data_does_not_changes_and_no_justification_provided() { + assert_eq!(import_block(HashMap::new(), None), ImportResult::Imported(ImportedAux { + clear_justification_requests: false, + needs_justification: false, + bad_justification: false, + needs_finality_proof: false, + })); + } + + #[test] + fn finality_proof_not_required_when_consensus_data_does_not_changes_and_correct_justification_provided() { + let justification = TestJustification(true, Vec::new()).encode(); + assert_eq!(import_block(HashMap::new(), Some(justification)), ImportResult::Imported(ImportedAux { + clear_justification_requests: false, + needs_justification: false, + bad_justification: false, + needs_finality_proof: false, + })); + } + + #[test] + fn finality_proof_required_when_consensus_data_changes_and_no_justification_provided() { + let mut cache = HashMap::new(); + cache.insert(well_known_cache_keys::AUTHORITIES, vec![AuthorityId([2; 32])].encode()); + assert_eq!(import_block(cache, None), ImportResult::Imported(ImportedAux { + clear_justification_requests: false, + needs_justification: false, + bad_justification: false, + needs_finality_proof: true, + })); + } + + #[test] + fn finality_proof_required_when_consensus_data_changes_and_incorrect_justification_provided() { + let justification = TestJustification(false, Vec::new()).encode(); + let mut cache = HashMap::new(); + cache.insert(well_known_cache_keys::AUTHORITIES, vec![AuthorityId([2; 32])].encode()); + assert_eq!( + import_block(cache, Some(justification)), + ImportResult::Imported(ImportedAux { + clear_justification_requests: false, + needs_justification: false, + bad_justification: false, + needs_finality_proof: true, + }, + )); + } + + + #[test] + fn aux_data_updated_on_start() { + let aux_store = InMemoryAuxStore::::new(); + let api = Arc::new(TestApi::new(vec![(AuthorityId([1; 32]), 1)])); + + // when aux store is empty initially + assert!(aux_store.get_aux(LIGHT_AUTHORITY_SET_KEY).unwrap().is_none()); + assert!(aux_store.get_aux(LIGHT_CONSENSUS_CHANGES_KEY).unwrap().is_none()); + + // it is updated on importer start + load_aux_import_data(Default::default(), &aux_store, api).unwrap(); + assert!(aux_store.get_aux(LIGHT_AUTHORITY_SET_KEY).unwrap().is_some()); + assert!(aux_store.get_aux(LIGHT_CONSENSUS_CHANGES_KEY).unwrap().is_some()); + } + + #[test] + fn aux_data_loaded_on_restart() { + let aux_store = InMemoryAuxStore::::new(); + let api = Arc::new(TestApi::new(vec![(AuthorityId([1; 32]), 1)])); + + // when aux store is non-empty initially + let mut consensus_changes = ConsensusChanges::::empty(); + consensus_changes.note_change((42, Default::default())); + aux_store.insert_aux( + &[ + ( + LIGHT_AUTHORITY_SET_KEY, + LightAuthoritySet::genesis(vec![(AuthorityId([42; 32]), 2)]).encode().as_slice(), + ), + ( + LIGHT_CONSENSUS_CHANGES_KEY, + consensus_changes.encode().as_slice(), + ), + ], + &[], + ).unwrap(); + + // importer uses it on start + let data = load_aux_import_data(Default::default(), &aux_store, api).unwrap(); + assert_eq!(data.authority_set.authorities(), vec![(AuthorityId([42; 32]), 2)]); + assert_eq!(data.consensus_changes.pending_changes(), &[(42, Default::default())]); + } +} diff --git a/core/finality-grandpa/src/service_integration.rs b/core/finality-grandpa/src/service_integration.rs index 168e64183782e..9f19b9204190b 100644 --- a/core/finality-grandpa/src/service_integration.rs +++ b/core/finality-grandpa/src/service_integration.rs @@ -17,7 +17,7 @@ /// Integrate grandpa finality with substrate service use client; -use service::{FullBackend, FullExecutor, ServiceFactory}; +use service::{FullBackend, FullExecutor, LightBackend, LightExecutor, ServiceFactory}; pub type BlockImportForService = crate::GrandpaBlockImport< FullBackend, @@ -25,12 +25,12 @@ pub type BlockImportForService = crate::GrandpaBlockImport< ::Block, ::RuntimeApi, client::Client< - FullBackend, - FullExecutor, - ::Block, - ::RuntimeApi - >, - ::SelectChain + FullBackend, + FullExecutor, + ::Block, + ::RuntimeApi + >, + ::SelectChain, >; pub type LinkHalfForService = crate::LinkHalf< @@ -40,3 +40,10 @@ pub type LinkHalfForService = crate::LinkHalf< ::RuntimeApi, ::SelectChain >; + +pub type BlockImportForLightService = crate::light_import::GrandpaLightBlockImport< + LightBackend, + LightExecutor, + ::Block, + ::RuntimeApi, +>; diff --git a/core/finality-grandpa/src/tests.rs b/core/finality-grandpa/src/tests.rs index 52b23bfcb79f2..98e4b6d85c08e 100644 --- a/core/finality-grandpa/src/tests.rs +++ b/core/finality-grandpa/src/tests.rs @@ -25,21 +25,24 @@ use parking_lot::Mutex; use tokio::runtime::current_thread; use keyring::ed25519::{Keyring as AuthorityKeyring}; use client::{ - BlockchainEvents, error::Result, - blockchain::Backend as BlockchainBackend, + error::Result, runtime_api::{Core, RuntimeVersion, ApiExt}, LongestChain, }; use test_client::{self, runtime::BlockNumber}; use consensus_common::{BlockOrigin, ForkChoiceStrategy, ImportedAux, ImportBlock, ImportResult}; -use consensus_common::import_queue::{SharedBlockImport, SharedJustificationImport}; +use consensus_common::import_queue::{SharedBlockImport, SharedJustificationImport, SharedFinalityProofImport, + SharedFinalityProofRequestBuilder, +}; use std::collections::{HashMap, HashSet}; use std::result; +use parity_codec::Decode; use runtime_primitives::traits::{ApiRef, ProvideRuntimeApi, Header as HeaderT}; use runtime_primitives::generic::BlockId; use substrate_primitives::{NativeOrEncoded, ExecutionContext, ed25519::Public as AuthorityId}; use authorities::AuthoritySet; +use finality_proof::{FinalityProofProvider, AuthoritySetForFinalityProver, AuthoritySetForFinalityChecker}; use communication::GRANDPA_ENGINE_ID; use consensus_changes::ConsensusChanges; @@ -72,7 +75,7 @@ impl GrandpaTestNet { }; let config = Self::default_config(); for _ in 0..n_peers { - net.add_peer(&config); + net.add_full_peer(&config); } net } @@ -99,27 +102,61 @@ impl TestNetFactory for GrandpaTestNet { } } - fn make_verifier(&self, _client: Arc, _cfg: &ProtocolConfig) + fn make_verifier(&self, _client: PeersClient, _cfg: &ProtocolConfig) -> Arc { Arc::new(PassThroughVerifier(false)) // use non-instant finality. } - fn make_block_import(&self, client: Arc) - -> (SharedBlockImport, Option>, PeerData) + fn make_block_import(&self, client: PeersClient) + -> ( + SharedBlockImport, + Option>, + Option>, + Option>, + PeerData, + ) { - - let select_chain = LongestChain::new( - client.backend().clone(), - client.import_lock().clone() - ); - let (import, link) = block_import( - client, - Arc::new(self.test_config.clone()), - select_chain, - ).expect("Could not create block import for fresh peer."); - let shared_import = Arc::new(import); - (shared_import.clone(), Some(shared_import), Mutex::new(Some(link))) + match client { + PeersClient::Full(ref client) => { + let select_chain = LongestChain::new( + client.backend().clone(), + client.import_lock().clone() + ); + let (import, link) = block_import( + client.clone(), + Arc::new(self.test_config.clone()), + select_chain, + ).expect("Could not create block import for fresh peer."); + let shared_import = Arc::new(import); + (shared_import.clone(), Some(shared_import), None, None, Mutex::new(Some(link))) + }, + PeersClient::Light(ref client) => { + use crate::light_import::tests::light_block_import_without_justifications; + + let authorities_provider = Arc::new(self.test_config.clone()); + // forbid direct finalization using justification that cames with the block + // => light clients will try to fetch finality proofs + let import = light_block_import_without_justifications( + client.clone(), + authorities_provider, + Arc::new(self.test_config.clone()) + ).expect("Could not create block import for fresh peer."); + let finality_proof_req_builder = import.0.create_finality_proof_request_builder(); + let shared_import = Arc::new(import); + (shared_import.clone(), None, Some(shared_import), Some(finality_proof_req_builder), Mutex::new(None)) + }, + } + } + + fn make_finality_proof_provider(&self, client: PeersClient) -> Option>> { + match client { + PeersClient::Full(ref client) => { + let authorities_provider = Arc::new(self.test_config.clone()); + Some(Arc::new(FinalityProofProvider::new(client.clone(), authorities_provider))) + }, + PeersClient::Light(_) => None, + } } fn peer(&self, i: usize) -> &GrandpaPeer { @@ -234,14 +271,14 @@ impl Future for Exit { } #[derive(Default, Clone)] -struct TestApi { +pub(crate) struct TestApi { genesis_authorities: Vec<(AuthorityId, u64)>, scheduled_changes: Arc>>>, forced_changes: Arc)>>>, } impl TestApi { - fn new(genesis_authorities: Vec<(AuthorityId, u64)>) -> Self { + pub fn new(genesis_authorities: Vec<(AuthorityId, u64)>) -> Self { TestApi { genesis_authorities, scheduled_changes: Arc::new(Mutex::new(HashMap::new())), @@ -250,7 +287,7 @@ impl TestApi { } } -struct RuntimeApi { +pub(crate) struct RuntimeApi { inner: TestApi, } @@ -327,16 +364,12 @@ impl ApiExt for RuntimeApi { impl GrandpaApi for RuntimeApi { fn GrandpaApi_grandpa_authorities_runtime_api_impl( &self, - at: &BlockId, + _: &BlockId, _: ExecutionContext, _: Option<()>, _: Vec, ) -> Result>> { - if at == &BlockId::Number(0) { - Ok(self.inner.genesis_authorities.clone()).map(NativeOrEncoded::Native) - } else { - panic!("should generally only request genesis authorities") - } + Ok(self.inner.genesis_authorities.clone()).map(NativeOrEncoded::Native) } fn GrandpaApi_grandpa_pending_change_runtime_api_impl( @@ -375,6 +408,33 @@ impl GrandpaApi for RuntimeApi { } } +impl AuthoritySetForFinalityProver for TestApi { + fn authorities(&self, block: &BlockId) -> Result> { + let runtime_api = RuntimeApi { inner: self.clone() }; + runtime_api.GrandpaApi_grandpa_authorities_runtime_api_impl(block, ExecutionContext::Syncing, None, Vec::new()) + .map(|v| match v { + NativeOrEncoded::Native(value) => value, + _ => unreachable!("only providing native values"), + }) + } + + fn prove_authorities(&self, block: &BlockId) -> Result>> { + self.authorities(block).map(|auth| vec![auth.encode()]) + } +} + +impl AuthoritySetForFinalityChecker for TestApi { + fn check_authorities_proof( + &self, + _hash: ::Hash, + _header: ::Header, + proof: Vec>, + ) -> Result> { + Decode::decode(&mut &proof[0][..]) + .ok_or_else(|| unreachable!("incorrect value is passed as GRANDPA authorities proof")) + } +} + const TEST_GOSSIP_DURATION: Duration = Duration::from_millis(500); const TEST_ROUTING_INTERVAL: Duration = Duration::from_millis(50); @@ -499,7 +559,7 @@ fn finalize_3_voters_no_observers() { run_to_completion(20, net.clone(), peers); // normally there's no justification for finalized blocks - assert!(net.lock().peer(0).client().backend().blockchain().justification(BlockId::Number(20)).unwrap().is_none(), + assert!(net.lock().peer(0).client().justification(&BlockId::Number(20)).unwrap().is_none(), "Extra justification for block#1"); } @@ -602,11 +662,12 @@ fn transition_3_voters_twice_1_full_observer() { net.lock().sync(); for (i, peer) in net.lock().peers().iter().enumerate() { - assert_eq!(peer.client().info().unwrap().chain.best_number, 1, + let full_client = peer.client().as_full().expect("only full clients are used in test"); + assert_eq!(full_client.info().unwrap().chain.best_number, 1, "Peer #{} failed to sync", i); let set: AuthoritySet = crate::aux_schema::load_authorities( - &**peer.client().backend() + &**full_client.backend() ).unwrap(); assert_eq!(set.current(), (0, make_ids(peers_a).as_slice())); @@ -693,8 +754,9 @@ fn transition_3_voters_twice_1_full_observer() { .take_while(|n| Ok(n.header.number() < &30)) .for_each(move |_| Ok(())) .map(move |()| { + let full_client = client.as_full().expect("only full clients are used in test"); let set: AuthoritySet = crate::aux_schema::load_authorities( - &**client.backend() + &**full_client.backend() ).unwrap(); assert_eq!(set.current(), (2, make_ids(peers_c).as_slice())); @@ -749,8 +811,8 @@ fn justification_is_emitted_when_consensus_data_changes() { let net = Arc::new(Mutex::new(net)); run_to_completion(1, net.clone(), peers); - // ... and check that there's no justification for block#1 - assert!(net.lock().peer(0).client().backend().blockchain().justification(BlockId::Number(1)).unwrap().is_some(), + // ... and check that there's justification for block#1 + assert!(net.lock().peer(0).client().justification(&BlockId::Number(1)).unwrap().is_some(), "Missing justification for block#1"); } @@ -769,8 +831,7 @@ fn justification_is_generated_periodically() { // when block#32 (justification_period) is finalized, justification // is required => generated for i in 0..3 { - assert!(net.lock().peer(i).client().backend().blockchain() - .justification(BlockId::Number(32)).unwrap().is_some()); + assert!(net.lock().peer(i).client().justification(&BlockId::Number(32)).unwrap().is_some()); } } @@ -963,8 +1024,9 @@ fn force_change_to_new_set() { assert_eq!(peer.client().info().unwrap().chain.best_number, 26, "Peer #{} failed to sync", i); + let full_client = peer.client().as_full().expect("only full clients are used in test"); let set: AuthoritySet = crate::aux_schema::load_authorities( - &**peer.client().backend() + &**full_client.backend() ).unwrap(); assert_eq!(set.current(), (1, voters.as_slice())); @@ -991,7 +1053,8 @@ fn allows_reimporting_change_blocks() { let client = net.peer(0).client().clone(); let (block_import, ..) = net.make_block_import(client.clone()); - let builder = client.new_block_at(&BlockId::Number(0)).unwrap(); + let full_client = client.as_full().unwrap(); + let builder = full_client.new_block_at(&BlockId::Number(0)).unwrap(); let block = builder.bake().unwrap(); api.scheduled_changes.lock().insert(*block.header.parent_hash(), ScheduledChange { next_authorities: make_ids(peers_b), @@ -1014,7 +1077,12 @@ fn allows_reimporting_change_blocks() { assert_eq!( block_import.import_block(block(), HashMap::new()).unwrap(), - ImportResult::Imported(ImportedAux { needs_justification: true, clear_justification_requests: false, bad_justification: false }), + ImportResult::Imported(ImportedAux { + needs_justification: true, + clear_justification_requests: false, + bad_justification: false, + needs_finality_proof: false, + }), ); assert_eq!( @@ -1034,7 +1102,8 @@ fn test_bad_justification() { let client = net.peer(0).client().clone(); let (block_import, ..) = net.make_block_import(client.clone()); - let builder = client.new_block_at(&BlockId::Number(0)).unwrap(); + let full_client = client.as_full().expect("only full clients are used in test"); + let builder = full_client.new_block_at(&BlockId::Number(0)).unwrap(); let block = builder.bake().unwrap(); api.scheduled_changes.lock().insert(*block.header.parent_hash(), ScheduledChange { next_authorities: make_ids(peers_b), @@ -1057,7 +1126,12 @@ fn test_bad_justification() { assert_eq!( block_import.import_block(block(), HashMap::new()).unwrap(), - ImportResult::Imported(ImportedAux { needs_justification: true, clear_justification_requests: false, bad_justification: true }), + ImportResult::Imported(ImportedAux { + needs_justification: true, + clear_justification_requests: false, + bad_justification: true, + ..Default::default() + }), ); assert_eq!( @@ -1102,7 +1176,7 @@ fn voter_persists_its_votes() { let net = net.clone(); let voter = future::loop_fn(voter_rx, move |rx| { - let (_block_import, _, link) = net.lock().make_block_import(client.clone()); + let (_block_import, _, _, _, link) = net.lock().make_block_import(client.clone()); let link = link.lock().take().unwrap(); let grandpa_params = GrandpaParams { @@ -1201,7 +1275,7 @@ fn voter_persists_its_votes() { "Peer #{} failed to sync", 0); let block_30_hash = - net.lock().peer(0).client().backend().blockchain().hash(30).unwrap().unwrap(); + net.lock().peer(0).client().as_full().unwrap().backend().blockchain().hash(30).unwrap().unwrap(); // we restart alice's voter voter_tx.unbounded_send(()).unwrap(); @@ -1302,3 +1376,94 @@ fn finalize_3_voters_1_light_observer() { Some(Box::new(finality_notifications.map(|_| ()))) }); } + +#[test] +fn finality_proof_is_fetched_by_light_client_when_consensus_data_changes() { + let _ = ::env_logger::try_init(); + + let peers = &[AuthorityKeyring::Alice]; + let mut net = GrandpaTestNet::new(TestApi::new(make_ids(peers)), 1); + net.add_light_peer(&GrandpaTestNet::default_config()); + + // import block#1 WITH consensus data change. Light client ignores justification + // && instead fetches finality proof for block #1 + net.peer(0).push_authorities_change_block(vec![substrate_primitives::sr25519::Public::from_raw([42; 32])]); + let net = Arc::new(Mutex::new(net)); + run_to_completion(1, net.clone(), peers); + net.lock().sync_without_disconnects(); + + // check that the block#1 is finalized on light client + while net.lock().peer(1).client().info().unwrap().chain.finalized_number != 1 { + net.lock().tick_peer(1); + net.lock().sync_without_disconnects(); + } +} + +#[test] +fn empty_finality_proof_is_returned_to_light_client_when_authority_set_is_different() { + // for debug: to ensure that without forced change light client will sync finality proof + const FORCE_CHANGE: bool = true; + + let _ = ::env_logger::try_init(); + + // two of these guys are offline. + let genesis_authorities = if FORCE_CHANGE { + vec![ + AuthorityKeyring::Alice, + AuthorityKeyring::Bob, + AuthorityKeyring::Charlie, + AuthorityKeyring::One, + AuthorityKeyring::Two, + ] + } else { + vec![ + AuthorityKeyring::Alice, + AuthorityKeyring::Bob, + AuthorityKeyring::Charlie, + ] + }; + let peers_a = &[AuthorityKeyring::Alice, AuthorityKeyring::Bob, AuthorityKeyring::Charlie]; + let api = TestApi::new(make_ids(&genesis_authorities)); + + let voters = make_ids(peers_a); + let forced_transitions = api.forced_changes.clone(); + let net = GrandpaTestNet::new(api, 3); + let net = Arc::new(Mutex::new(net)); + + let runner_net = net.clone(); + let add_blocks = move |_| { + net.lock().peer(0).push_blocks(1, false); // best is #1 + + // add a forced transition at block 5. + if FORCE_CHANGE { + let parent_hash = net.lock().peer(0).client().info().unwrap().chain.best_hash; + forced_transitions.lock().insert(parent_hash, (0, ScheduledChange { + next_authorities: voters.clone(), + delay: 3, + })); + } + + // ensure block#10 enacts authorities set change => justification is generated + // normally it will reach light client, but because of the forced change, it will not + net.lock().peer(0).push_blocks(8, false); // best is #9 + net.lock().peer(0).push_authorities_change_block( + vec![substrate_primitives::sr25519::Public::from_raw([42; 32])] + ); // #10 + net.lock().peer(0).push_blocks(1, false); // best is #11 + net.lock().sync_without_disconnects(); + + None + }; + + // finalize block #11 on full clients + run_to_completion_with(11, runner_net.clone(), peers_a, add_blocks); + // request finalization by light client + runner_net.lock().add_light_peer(&GrandpaTestNet::default_config()); + runner_net.lock().sync_without_disconnects(); + + // check block, finalized on light client + assert_eq!( + runner_net.lock().peer(3).client().info().unwrap().chain.finalized_number, + if FORCE_CHANGE { 0 } else { 10 }, + ); +} \ No newline at end of file diff --git a/core/network/src/chain.rs b/core/network/src/chain.rs index 92236e7c63848..9548afc9d1a23 100644 --- a/core/network/src/chain.rs +++ b/core/network/src/chain.rs @@ -68,6 +68,12 @@ pub trait Client: Send + Sync { fn is_descendent_of(&self, base: &Block::Hash, block: &Block::Hash) -> Result; } +/// Finality proof provider. +pub trait FinalityProofProvider: Send + Sync { + /// Prove finality of the block. + fn prove_finality(&self, for_block: Block::Hash, request: &[u8]) -> Result>, Error>; +} + impl Client for SubstrateClient where B: client::backend::Backend + Send + Sync + 'static, E: CallExecutor + Send + Sync + 'static, diff --git a/core/network/src/config.rs b/core/network/src/config.rs index 2491fc21c4c07..7e67d1cda8e57 100644 --- a/core/network/src/config.rs +++ b/core/network/src/config.rs @@ -19,7 +19,7 @@ pub use network_libp2p::{NonReservedPeerMode, NetworkConfiguration, NodeKeyConfig, Secret}; use bitflags::bitflags; -use crate::chain::Client; +use crate::chain::{Client, FinalityProofProvider}; use parity_codec; use crate::on_demand::OnDemandService; use runtime_primitives::traits::{Block as BlockT}; @@ -34,6 +34,8 @@ pub struct Params { pub network_config: NetworkConfiguration, /// Substrate relay chain access point. pub chain: Arc>, + /// Finality proof provider. + pub finality_proof_provider: Option>>, /// On-demand service reference. pub on_demand: Option>>, /// Transaction pool. diff --git a/core/network/src/extra_requests.rs b/core/network/src/extra_requests.rs new file mode 100644 index 0000000000000..d9d51d1e26560 --- /dev/null +++ b/core/network/src/extra_requests.rs @@ -0,0 +1,470 @@ +// Copyright 2017-2018 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use std::collections::{HashMap, HashSet, VecDeque}; +use std::time::{Duration, Instant}; +use log::{trace, warn}; +use client::error::Error as ClientError; +use consensus::import_queue::SharedFinalityProofRequestBuilder; +use fork_tree::ForkTree; +use network_libp2p::PeerId; +use runtime_primitives::Justification; +use runtime_primitives::traits::{Block as BlockT, NumberFor}; +use crate::message; +use crate::protocol::Context; +use crate::sync::{PeerSync, PeerSyncState}; + +// Time to wait before trying to get the same extra data from the same peer. +const EXTRA_RETRY_WAIT: Duration = Duration::from_secs(10); + +/// Pending extra data request for the given block (hash and number). +type ExtraRequest = (::Hash, NumberFor); + +/// Extra requests processor. +pub(crate) trait ExtraRequestsEssence { + type Response; + + /// Name of request type to display in logs. + fn type_name(&self) -> &'static str; + /// Send network message corresponding to the request. + fn send_network_request(&self, protocol: &mut Context, peer: PeerId, request: ExtraRequest); + /// Create peer state for peer that is downloading extra data. + fn peer_downloading_state(&self, block: B::Hash) -> PeerSyncState; +} + +/// Manages all extra data requests required for sync. +pub(crate) struct ExtraRequestsAggregator { + /// Manages justifications requests. + justifications: ExtraRequests, + /// Manages finality proof requests. + finality_proofs: ExtraRequests>, +} + +impl ExtraRequestsAggregator { + pub(crate) fn new() -> Self { + ExtraRequestsAggregator { + justifications: ExtraRequests::new(JustificationsRequestsEssence), + finality_proofs: ExtraRequests::new(FinalityProofRequestsEssence(None)), + } + } + + pub(crate) fn justifications(&mut self) -> &mut ExtraRequests { + &mut self.justifications + } + + pub(crate) fn finality_proofs(&mut self) -> &mut ExtraRequests> { + &mut self.finality_proofs + } + + /// Dispatches all possible pending requests to the given peers. + pub(crate) fn dispatch(&mut self, peers: &mut HashMap>, protocol: &mut Context) { + self.justifications.dispatch(peers, protocol); + self.finality_proofs.dispatch(peers, protocol); + } + + /// Removes any pending extra requests for blocks lower than the + /// given best finalized. + pub(crate) fn on_block_finalized( + &mut self, + best_finalized_hash: &B::Hash, + best_finalized_number: NumberFor, + is_descendent_of: &F, + ) -> Result<(), fork_tree::Error> + where F: Fn(&B::Hash, &B::Hash) -> Result + { + self.justifications.on_block_finalized(best_finalized_hash, best_finalized_number, is_descendent_of)?; + self.finality_proofs.on_block_finalized(best_finalized_hash, best_finalized_number, is_descendent_of)?; + Ok(()) + } + + /// Retry any pending request if a peer disconnected. + pub(crate) fn peer_disconnected(&mut self, who: PeerId) { + self.justifications.peer_disconnected(&who); + self.finality_proofs.peer_disconnected(&who); + } +} + +/// Manages pending block extra data (e.g. justification) requests. +/// Multiple extras may be requested for competing forks, or for the same branch +/// at different (increasing) heights. This structure will guarantee that extras +/// are fetched in-order, and that obsolete changes are pruned (when finalizing a +/// competing fork). +pub(crate) struct ExtraRequests { + tree: ForkTree, ()>, + pending_requests: VecDeque>, + peer_requests: HashMap>, + previous_requests: HashMap, Vec<(PeerId, Instant)>>, + importing_requests: HashSet>, + essence: Essence, +} + +impl> ExtraRequests { + fn new(essence: Essence) -> Self { + ExtraRequests { + tree: ForkTree::new(), + pending_requests: VecDeque::new(), + peer_requests: HashMap::new(), + previous_requests: HashMap::new(), + importing_requests: HashSet::new(), + essence, + } + } + + /// Get mutable reference to the requests essence. + pub(crate) fn essence(&mut self) -> &mut Essence { + &mut self.essence + } + + /// Dispatches all possible pending requests to the given peers. Peers are + /// filtered according to the current known best block (i.e. we won't send a + /// extra request for block #10 to a peer at block #2), and we also + /// throttle requests to the same peer if a previous justification request + /// yielded no results. + pub(crate) fn dispatch(&mut self, peers: &mut HashMap>, protocol: &mut Context) { + if self.pending_requests.is_empty() { + return; + } + + let initial_pending_requests = self.pending_requests.len(); + + // clean up previous failed requests so we can retry again + for (_, requests) in self.previous_requests.iter_mut() { + requests.retain(|(_, instant)| instant.elapsed() < EXTRA_RETRY_WAIT); + } + + let mut available_peers = peers.iter().filter_map(|(peer, sync)| { + // don't request to any peers that already have pending requests or are unavailable + if sync.state != PeerSyncState::Available || self.peer_requests.contains_key(&peer) { + None + } else { + Some((peer.clone(), sync.best_number)) + } + }).collect::>(); + + let mut last_peer = available_peers.back().map(|p| p.0.clone()); + let mut unhandled_requests = VecDeque::new(); + + loop { + let (peer, peer_best_number) = match available_peers.pop_front() { + Some(p) => p, + _ => break, + }; + + // only ask peers that have synced past the block number that we're + // asking the extra for and to whom we haven't already made + // the same request recently + let peer_eligible = { + let request = match self.pending_requests.front() { + Some(r) => r.clone(), + _ => break, + }; + + peer_best_number >= request.1 && + !self.previous_requests + .get(&request) + .map(|requests| requests.iter().any(|i| i.0 == peer)) + .unwrap_or(false) + }; + + if !peer_eligible { + available_peers.push_back((peer.clone(), peer_best_number)); + + // we tried all peers and none can answer this request + if Some(peer) == last_peer { + last_peer = available_peers.back().map(|p| p.0.clone()); + + let request = self.pending_requests.pop_front() + .expect("verified to be Some in the beginning of the loop; qed"); + + unhandled_requests.push_back(request); + } + + continue; + } + + last_peer = available_peers.back().map(|p| p.0.clone()); + + let request = self.pending_requests.pop_front() + .expect("verified to be Some in the beginning of the loop; qed"); + + self.peer_requests.insert(peer.clone(), request); + + peers.get_mut(&peer) + .expect("peer was is taken from available_peers; available_peers is a subset of peers; qed") + .state = self.essence.peer_downloading_state(request.0.clone()); + + trace!(target: "sync", "Requesting {} for block #{} from {}", self.essence.type_name(), request.0, peer); + self.essence.send_network_request(protocol, peer, request); + } + + self.pending_requests.append(&mut unhandled_requests); + + trace!(target: "sync", "Dispatched {} {} requests ({} pending)", + initial_pending_requests - self.pending_requests.len(), + self.essence.type_name(), + self.pending_requests.len(), + ); + } + + /// Queue a extra data request (without dispatching it). + pub(crate) fn queue_request(&mut self, request: ExtraRequest, is_descendent_of: F) + where F: Fn(&B::Hash, &B::Hash) -> Result + { + match self.tree.import(request.0.clone(), request.1.clone(), (), &is_descendent_of) { + Ok(true) => { + // this is a new root so we add it to the current `pending_requests` + self.pending_requests.push_back((request.0, request.1)); + }, + Err(err) => { + warn!(target: "sync", "Failed to insert requested {} {:?} {:?} into tree: {:?}", + self.essence.type_name(), + request.0, + request.1, + err, + ); + return; + }, + _ => {}, + } + } + + /// Retry any pending request if a peer disconnected. + fn peer_disconnected(&mut self, who: &PeerId) { + if let Some(request) = self.peer_requests.remove(who) { + self.pending_requests.push_front(request); + } + } + + /// Process the import result of an extra. + /// Queues a retry in case the import failed. + /// Returns true if import has been queued. + pub(crate) fn on_import_result( + &mut self, + request: (B::Hash, NumberFor), + finalization_result: Result<(B::Hash, NumberFor), ()>, + ) -> bool { + self.try_finalize_root(request, finalization_result, true) + } + + /// Processes the response for the request previously sent to the given + /// peer. Queues a retry in case the given justification + /// was `None`. + pub(crate) fn on_response( + &mut self, + who: PeerId, + response: Option, + ) -> Option<(PeerId, B::Hash, NumberFor, Essence::Response)> { + // we assume that the request maps to the given response, this is + // currently enforced by the outer network protocol before passing on + // messages to chain sync. + if let Some(request) = self.peer_requests.remove(&who) { + if let Some(response) = response { + self.importing_requests.insert(request); + return Some((who, request.0, request.1, response)); + } + + self.previous_requests + .entry(request) + .or_insert(Vec::new()) + .push((who, Instant::now())); + self.pending_requests.push_front(request); + } + + None + } + + /// Removes any pending extra requests for blocks lower than the + /// given best finalized. + fn on_block_finalized( + &mut self, + best_finalized_hash: &B::Hash, + best_finalized_number: NumberFor, + is_descendent_of: F, + ) -> Result<(), fork_tree::Error> + where F: Fn(&B::Hash, &B::Hash) -> Result + { + let is_scheduled_root = self.try_finalize_root( + (*best_finalized_hash, best_finalized_number), + Ok((*best_finalized_hash, best_finalized_number)), + false, + ); + if is_scheduled_root { + return Ok(()); + } + + use std::collections::HashSet; + + self.tree.finalize(best_finalized_hash, best_finalized_number, &is_descendent_of)?; + + let roots = self.tree.roots().collect::>(); + + self.pending_requests.retain(|(h, n)| roots.contains(&(h, n, &()))); + self.peer_requests.retain(|_, (h, n)| roots.contains(&(h, n, &()))); + self.previous_requests.retain(|(h, n), _| roots.contains(&(h, n, &()))); + + Ok(()) + } + + /// Clear all data. + pub(crate) fn clear(&mut self) { + self.tree = ForkTree::new(); + self.pending_requests.clear(); + self.peer_requests.clear(); + self.previous_requests.clear(); + } + + /// Try to finalize pending root. + /// Returns true if import of this request has been scheduled. + fn try_finalize_root( + &mut self, + request: (B::Hash, NumberFor), + finalization_result: Result<(B::Hash, NumberFor), ()>, + reschedule_on_failure: bool, + ) -> bool { + if !self.importing_requests.remove(&request) { + return false; + } + + let (finalized_hash, finalized_number) = match finalization_result { + Ok((finalized_hash, finalized_number)) => (finalized_hash, finalized_number), + Err(_) => { + if reschedule_on_failure { + self.pending_requests.push_front(request); + } + return true; + }, + }; + + if self.tree.finalize_root(&finalized_hash).is_none() { + warn!(target: "sync", "Imported {} for {:?} {:?} which isn't a root in the tree: {:?}", + self.essence.type_name(), + finalized_hash, + finalized_number, + self.tree.roots().collect::>(), + ); + return true; + }; + + self.previous_requests.clear(); + self.peer_requests.clear(); + self.pending_requests = + self.tree.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect(); + + true + } +} + +pub(crate) struct JustificationsRequestsEssence; + +impl ExtraRequestsEssence for JustificationsRequestsEssence { + type Response = Justification; + + fn type_name(&self) -> &'static str { + "justification" + } + + fn send_network_request(&self, protocol: &mut Context, peer: PeerId, request: ExtraRequest) { + protocol.send_block_request(peer, message::generic::BlockRequest { + id: 0, + fields: message::BlockAttributes::JUSTIFICATION, + from: message::FromBlock::Hash(request.0), + to: None, + direction: message::Direction::Ascending, + max: Some(1), + }) + } + + fn peer_downloading_state(&self, block: B::Hash) -> PeerSyncState { + PeerSyncState::DownloadingJustification(block) + } +} + +pub(crate) struct FinalityProofRequestsEssence(pub Option>); + +impl ExtraRequestsEssence for FinalityProofRequestsEssence { + type Response = Vec; + + fn type_name(&self) -> &'static str { + "finality proof" + } + + fn send_network_request(&self, protocol: &mut Context, peer: PeerId, request: ExtraRequest) { + protocol.send_finality_proof_request(peer, message::generic::FinalityProofRequest { + id: 0, + block: request.0, + request: self.0.as_ref() + .map(|builder| builder.build_request_data(&request.0)) + .unwrap_or_default(), + }) + } + + fn peer_downloading_state(&self, block: B::Hash) -> PeerSyncState { + PeerSyncState::DownloadingFinalityProof(block) + } +} + +#[cfg(test)] +mod tests { + use client::error::Error as ClientError; + use test_client::runtime::{Block, Hash}; + use super::ExtraRequestsAggregator; + + #[test] + fn request_is_rescheduled_when_earlier_block_is_finalized() { + let _ = ::env_logger::try_init(); + + let mut extra_requests = ExtraRequestsAggregator::::new(); + + let hash4 = [4; 32].into(); + let hash5 = [5; 32].into(); + let hash6 = [6; 32].into(); + let hash7 = [7; 32].into(); + + fn is_descendent_of(base: &Hash, target: &Hash) -> Result { + Ok(target[0] >= base[0]) + } + + // make #4 last finalized block + extra_requests.finality_proofs().tree.import(hash4, 4, (), &is_descendent_of).unwrap(); + extra_requests.finality_proofs().tree.finalize_root(&hash4); + + // schedule request for #6 + extra_requests.finality_proofs().queue_request((hash6, 6), is_descendent_of); + + // receive finality proof for #5 + extra_requests.finality_proofs().importing_requests.insert((hash6, 6)); + extra_requests.finality_proofs().on_block_finalized(&hash5, 5, is_descendent_of).unwrap(); + extra_requests.finality_proofs().on_import_result((hash6, 6), Ok((hash5, 5))); + + // ensure that request for #6 is still pending + assert_eq!( + extra_requests.finality_proofs().pending_requests.iter().collect::>(), + vec![&(hash6, 6)], + ); + + // receive finality proof for #7 + extra_requests.finality_proofs().importing_requests.insert((hash6, 6)); + extra_requests.finality_proofs().on_block_finalized(&hash6, 6, is_descendent_of).unwrap(); + extra_requests.finality_proofs().on_block_finalized(&hash7, 7, is_descendent_of).unwrap(); + extra_requests.finality_proofs().on_import_result((hash6, 6), Ok((hash7, 7))); + + // ensure that there's no request for #6 + assert_eq!( + extra_requests.finality_proofs().pending_requests.iter().collect::>(), + Vec::<&(Hash, u64)>::new(), + ); + } +} diff --git a/core/network/src/lib.rs b/core/network/src/lib.rs index 4c538e2cc0ee1..cd5772b2ae7b5 100644 --- a/core/network/src/lib.rs +++ b/core/network/src/lib.rs @@ -30,6 +30,7 @@ mod protocol; mod chain; mod blocks; mod on_demand; +mod extra_requests; mod util; pub mod config; pub mod consensus_gossip; @@ -40,7 +41,7 @@ pub mod specialization; #[cfg(any(test, feature = "test-helpers"))] pub mod test; -pub use chain::Client as ClientHandle; +pub use chain::{Client as ClientHandle, FinalityProofProvider}; pub use service::{ Service, FetchFuture, TransactionPool, ManageNetwork, NetworkMsg, SyncProvider, ExHashT, ReportHandle, diff --git a/core/network/src/message.rs b/core/network/src/message.rs index 85a526be5341c..31667033890dd 100644 --- a/core/network/src/message.rs +++ b/core/network/src/message.rs @@ -23,6 +23,7 @@ pub use self::generic::{ BlockAnnounce, RemoteCallRequest, RemoteReadRequest, RemoteHeaderRequest, RemoteHeaderResponse, RemoteChangesRequest, RemoteChangesResponse, + FinalityProofRequest, FinalityProofResponse, FromBlock, RemoteReadChildRequest, }; @@ -200,6 +201,10 @@ pub mod generic { RemoteChangesResponse(RemoteChangesResponse), /// Remote child storage read request. RemoteReadChildRequest(RemoteReadChildRequest), + /// Finality proof request. + FinalityProofRequest(FinalityProofRequest), + /// Finality proof reponse. + FinalityProofResponse(FinalityProofResponse), /// Chain-specific message #[codec(index = "255")] ChainSpecific(Vec), @@ -359,4 +364,26 @@ pub mod generic { /// Missing changes tries roots proof. pub roots_proof: Vec>, } + + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + /// Finality proof request. + pub struct FinalityProofRequest { + /// Unique request id. + pub id: RequestId, + /// Hash of the block to request proof for. + pub block: H, + /// Additional data blob (that both requester and provider understood) required for proving finality. + pub request: Vec, + } + + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + /// Finality proof response. + pub struct FinalityProofResponse { + /// Id of a request this response was made for. + pub id: RequestId, + /// Hash of the block (the same as in the FinalityProofRequest). + pub block: H, + /// Finality proof (if available). + pub proof: Option>, + } } diff --git a/core/network/src/on_demand.rs b/core/network/src/on_demand.rs index 080eb7f046386..504cf23385e78 100644 --- a/core/network/src/on_demand.rs +++ b/core/network/src/on_demand.rs @@ -155,6 +155,11 @@ impl OnDemand where } } + /// Get checker reference. + pub fn checker(&self) -> &Arc> { + &self.checker + } + /// Sets weak reference to network service. pub fn set_network_sender(&self, network_sender: NetworkChan) { self.network_sender.lock().replace(network_sender); diff --git a/core/network/src/protocol.rs b/core/network/src/protocol.rs index c6ae27cb16c39..d70a79f8798de 100644 --- a/core/network/src/protocol.rs +++ b/core/network/src/protocol.rs @@ -20,7 +20,11 @@ use primitives::storage::StorageKey; use consensus::{import_queue::IncomingBlock, import_queue::Origin, BlockOrigin}; use runtime_primitives::{generic::BlockId, ConsensusEngineId, Justification}; use runtime_primitives::traits::{As, Block as BlockT, Header as HeaderT, NumberFor, Zero}; -use crate::message::{self, BlockRequest as BlockRequestMessage, Message}; +use consensus::import_queue::SharedFinalityProofRequestBuilder; +use crate::message::{ + self, BlockRequest as BlockRequestMessage, + FinalityProofRequest as FinalityProofRequestMessage, Message, +}; use crate::message::generic::{Message as GenericMessage, ConsensusMessage}; use crate::consensus_gossip::{ConsensusGossip, MessageRecipient as GossipMessageRecipient}; use crate::on_demand::OnDemandService; @@ -34,7 +38,7 @@ use std::collections::{BTreeMap, HashMap}; use std::sync::Arc; use std::{cmp, num::NonZeroUsize, time}; use log::{trace, debug, warn, error}; -use crate::chain::Client; +use crate::chain::{Client, FinalityProofProvider}; use client::light::fetcher::ChangesProof; use crate::{error, util::LruHashSet}; @@ -163,6 +167,9 @@ pub trait Context { /// Request a block from a peer. fn send_block_request(&mut self, who: PeerId, request: BlockRequestMessage); + /// Request a finality proof from a peer. + fn send_finality_proof_request(&mut self, who: PeerId, request: FinalityProofRequestMessage); + /// Send a consensus message to a peer. fn send_consensus(&mut self, who: PeerId, consensus: ConsensusMessage); @@ -205,6 +212,12 @@ impl<'a, B: BlockT + 'a, H: ExHashT + 'a> Context for ProtocolContext<'a, B, ) } + fn send_finality_proof_request(&mut self, who: PeerId, request: FinalityProofRequestMessage) { + send_message(&mut self.context_data.peers, &self.network_chan, who, + GenericMessage::FinalityProofRequest(request) + ) + } + fn send_consensus(&mut self, who: PeerId, consensus: ConsensusMessage) { send_message(&mut self.context_data.peers, &self.network_chan, who, GenericMessage::Consensus(consensus) @@ -223,6 +236,7 @@ struct ContextData { // All connected peers peers: HashMap>, pub chain: Arc>, + pub finality_proof_provider: Option>>, } /// A task, consisting of a user-provided closure, to be executed on the Protocol thread. @@ -263,6 +277,12 @@ pub enum ProtocolMsg> { RequestJustification(B::Hash, NumberFor), /// Inform protocol whether a justification was successfully imported. JustificationImportResult(B::Hash, NumberFor, bool), + /// Set finality proof request builder. + SetFinalityProofRequestBuilder(SharedFinalityProofRequestBuilder), + /// Tell protocol to request finality proof for a block. + RequestFinalityProof(B::Hash, NumberFor), + /// Inform protocol whether a finality proof was successfully imported. + FinalityProofImportResult((B::Hash, NumberFor), Result<(B::Hash, NumberFor), ()>), /// Propagate a block to peers. AnnounceBlock(B::Hash), /// A block has been imported (sent by the client). @@ -290,6 +310,7 @@ impl, H: ExHashT> Protocol { network_chan: NetworkChan, config: ProtocolConfig, chain: Arc>, + finality_proof_provider: Option>>, on_demand: Option>>, transaction_pool: Arc>, specialization: S, @@ -306,6 +327,7 @@ impl, H: ExHashT> Protocol { context_data: ContextData { peers: HashMap::new(), chain, + finality_proof_provider, }, on_demand, genesis_hash: info.chain.genesis_hash, @@ -408,6 +430,16 @@ impl, H: ExHashT> Protocol { self.sync.request_justification(&hash, number, &mut context); }, ProtocolMsg::JustificationImportResult(hash, number, success) => self.sync.justification_import_result(hash, number, success), + ProtocolMsg::SetFinalityProofRequestBuilder(builder) => self.sync.set_finality_proof_request_builder(builder), + ProtocolMsg::RequestFinalityProof(hash, number) => { + let mut context = + ProtocolContext::new(&mut self.context_data, &self.network_chan); + self.sync.request_finality_proof(&hash, number, &mut context); + }, + ProtocolMsg::FinalityProofImportResult( + requested_block, + finalziation_result, + ) => self.sync.finality_proof_import_result(requested_block, finalziation_result), ProtocolMsg::PropagateExtrinsics => self.propagate_extrinsics(), #[cfg(any(test, feature = "test-helpers"))] ProtocolMsg::Tick => self.tick(), @@ -476,6 +508,8 @@ impl, H: ExHashT> Protocol { GenericMessage::RemoteHeaderResponse(response) => self.on_remote_header_response(who, response), GenericMessage::RemoteChangesRequest(request) => self.on_remote_changes_request(who, request), GenericMessage::RemoteChangesResponse(response) => self.on_remote_changes_response(who, response), + GenericMessage::FinalityProofRequest(request) => self.on_finality_proof_request(who, request), + GenericMessage::FinalityProofResponse(response) => return self.on_finality_proof_response(who, response), GenericMessage::Consensus(msg) => { if self.context_data.peers.get(&who).map_or(false, |peer| peer.info.protocol_version > 2) { self.consensus_gossip.on_incoming( @@ -1099,6 +1133,53 @@ impl, H: ExHashT> Protocol { .as_ref() .map(|s| s.on_remote_changes_response(who, response)); } + + fn on_finality_proof_request( + &mut self, + who: PeerId, + request: message::FinalityProofRequest, + ) { + trace!(target: "sync", "Finality proof request from {} for {}", who, request.block); + let finality_proof = self.context_data.finality_proof_provider.as_ref() + .ok_or_else(|| String::from("Finality provider is not configured")) + .and_then(|provider| provider.prove_finality(request.block, &request.request) + .map_err(|e| e.to_string())); + let finality_proof = match finality_proof { + Ok(finality_proof) => finality_proof, + Err(error) => { + trace!(target: "sync", "Finality proof request from {} for {} failed with: {}", + who, request.block, error); + None + }, + }; + self.send_message( + who, + GenericMessage::FinalityProofResponse(message::FinalityProofResponse { + id: 0, + block: request.block, + proof: finality_proof, + }), + ); + } + + fn on_finality_proof_response( + &mut self, + who: PeerId, + response: message::FinalityProofResponse, + ) -> CustomMessageOutcome { + trace!(target: "sync", "Finality proof response from {} for {}", who, response.block); + let outcome = self.sync.on_block_finality_proof_data( + &mut ProtocolContext::new(&mut self.context_data, &self.network_chan), + who, + response, + ); + + if let Some((origin, hash, nb, proof)) = outcome { + CustomMessageOutcome::FinalityProofImport(origin, hash, nb, proof) + } else { + CustomMessageOutcome::None + } + } } /// Outcome of an incoming custom message. @@ -1106,6 +1187,7 @@ impl, H: ExHashT> Protocol { pub enum CustomMessageOutcome { BlockImport(BlockOrigin, Vec>), JustificationImport(Origin, B::Hash, NumberFor, Justification), + FinalityProofImport(Origin, B::Hash, NumberFor, Vec), None, } diff --git a/core/network/src/service.rs b/core/network/src/service.rs index f681cc8fe9ec1..8a81524bf9d0f 100644 --- a/core/network/src/service.rs +++ b/core/network/src/service.rs @@ -26,7 +26,7 @@ use network_libp2p::{ProtocolId, NetworkConfiguration}; use network_libp2p::{start_service, parse_str_addr, Service as NetworkService, ServiceEvent as NetworkServiceEvent}; use network_libp2p::{RegisteredProtocol, NetworkState}; use peerset::PeersetHandle; -use consensus::import_queue::{ImportQueue, Link}; +use consensus::import_queue::{ImportQueue, Link, SharedFinalityProofRequestBuilder}; use runtime_primitives::{traits::{Block as BlockT, NumberFor}, ConsensusEngineId}; use crate::consensus_gossip::{ConsensusGossip, MessageRecipient as GossipMessageRecipient}; @@ -115,6 +115,31 @@ impl> Link for NetworkLink { let _ = self.protocol_sender.unbounded_send(ProtocolMsg::RequestJustification(hash.clone(), number)); } + fn request_finality_proof(&self, hash: &B::Hash, number: NumberFor) { + let _ = self.protocol_sender.unbounded_send(ProtocolMsg::RequestFinalityProof( + hash.clone(), + number, + )); + } + + fn finality_proof_imported( + &self, + who: PeerId, + request_block: (B::Hash, NumberFor), + finalization_result: Result<(B::Hash, NumberFor), ()>, + ) { + let success = finalization_result.is_ok(); + let _ = self.protocol_sender.unbounded_send(ProtocolMsg::FinalityProofImportResult( + request_block, + finalization_result, + )); + if !success { + info!("Invalid finality proof provided by {} for #{}", who, request_block.0); + let _ = self.network_sender.send(NetworkMsg::ReportPeer(who.clone(), i32::min_value())); + let _ = self.network_sender.send(NetworkMsg::DisconnectPeer(who.clone())); + } + } + fn report_peer(&self, who: PeerId, reputation_change: i32) { self.network_sender.send(NetworkMsg::ReportPeer(who, reputation_change)); } @@ -122,6 +147,10 @@ impl> Link for NetworkLink { fn restart(&self) { let _ = self.protocol_sender.unbounded_send(ProtocolMsg::RestartSync); } + + fn set_finality_proof_request_builder(&self, request_builder: SharedFinalityProofRequestBuilder) { + let _ = self.protocol_sender.unbounded_send(ProtocolMsg::SetFinalityProofRequestBuilder(request_builder)); + } } /// A cloneable handle for reporting cost/benefits of peers. @@ -179,6 +208,7 @@ impl> Service { network_chan.clone(), params.config, params.chain, + params.finality_proof_provider, params.on_demand, params.transaction_pool, params.specialization, @@ -593,6 +623,8 @@ fn run_thread, H: ExHashT>( import_queue.import_blocks(origin, blocks), CustomMessageOutcome::JustificationImport(origin, hash, nb, justification) => import_queue.import_justification(origin, hash, nb, justification), + CustomMessageOutcome::FinalityProofImport(origin, hash, nb, proof) => + import_queue.import_finality_proof(origin, hash, nb, proof), CustomMessageOutcome::None => {} } } diff --git a/core/network/src/sync.rs b/core/network/src/sync.rs index 151d13e829bb2..23cb80c7892c1 100644 --- a/core/network/src/sync.rs +++ b/core/network/src/sync.rs @@ -32,18 +32,16 @@ use std::cmp::max; use std::collections::{HashMap, VecDeque}; -use std::time::{Duration, Instant}; -use log::{debug, trace, info, warn}; +use log::{debug, trace, warn, info}; use crate::protocol::Context; -use fork_tree::ForkTree; use network_libp2p::PeerId; use client::{BlockStatus, ClientInfo}; -use consensus::{BlockOrigin, import_queue::IncomingBlock}; +use consensus::{BlockOrigin, import_queue::{IncomingBlock, SharedFinalityProofRequestBuilder}}; use client::error::Error as ClientError; use crate::blocks::BlockCollection; -use runtime_primitives::Justification; +use crate::extra_requests::ExtraRequestsAggregator; use runtime_primitives::traits::{Block as BlockT, Header as HeaderT, As, NumberFor, Zero, CheckedSub}; -use runtime_primitives::generic::BlockId; +use runtime_primitives::{Justification, generic::BlockId}; use crate::message; use crate::config::Roles; use std::collections::HashSet; @@ -54,8 +52,6 @@ const MAX_BLOCKS_TO_REQUEST: usize = 128; const MAX_IMPORTING_BLOCKS: usize = 2048; // Number of blocks in the queue that prevents ancestry search. const MAJOR_SYNC_BLOCKS: usize = 5; -// Time to wait before trying to get a justification from the same peer. -const JUSTIFICATION_RETRY_WAIT: Duration = Duration::from_secs(10); // Number of recently announced blocks to track for each peer. const ANNOUNCE_HISTORY_SIZE: usize = 64; // Max number of blocks to download for unknown forks. @@ -68,7 +64,7 @@ const ANCESTRY_BLOCK_ERROR_REPUTATION_CHANGE: i32 = -(1 << 9); const GENESIS_MISMATCH_REPUTATION_CHANGE: i32 = i32::min_value() + 1; #[derive(Debug)] -struct PeerSync { +pub(crate) struct PeerSync { pub common_number: NumberFor, pub best_hash: B::Hash, pub best_number: NumberFor, @@ -86,7 +82,7 @@ pub(crate) struct PeerInfo { } #[derive(Copy, Clone, Eq, PartialEq, Debug)] -enum AncestorSearchState { +pub(crate) enum AncestorSearchState { /// Use exponential backoff to find an ancestor, then switch to binary search. /// We keep track of the exponent. ExponentialBackoff(NumberFor), @@ -96,270 +92,13 @@ enum AncestorSearchState { } #[derive(Copy, Clone, Eq, PartialEq, Debug)] -enum PeerSyncState { +pub(crate) enum PeerSyncState { AncestorSearch(NumberFor, AncestorSearchState), Available, DownloadingNew(NumberFor), DownloadingStale(B::Hash), DownloadingJustification(B::Hash), -} - -/// Pending justification request for the given block (hash and number). -type PendingJustification = (::Hash, NumberFor); - -/// Manages pending block justification requests. Multiple justifications may be -/// requested for competing forks, or for the same branch at different -/// (increasing) heights. This structure will guarantee that justifications are -/// fetched in-order, and that obsolete changes are pruned (when finalizing a -/// competing fork). -struct PendingJustifications { - justifications: ForkTree, ()>, - pending_requests: VecDeque>, - peer_requests: HashMap>, - previous_requests: HashMap, Vec<(PeerId, Instant)>>, - importing_requests: HashSet>, -} - -impl PendingJustifications { - fn new() -> PendingJustifications { - PendingJustifications { - justifications: ForkTree::new(), - pending_requests: VecDeque::new(), - peer_requests: HashMap::new(), - previous_requests: HashMap::new(), - importing_requests: HashSet::new(), - } - } - - /// Dispatches all possible pending requests to the given peers. Peers are - /// filtered according to the current known best block (i.e. we won't send a - /// justification request for block #10 to a peer at block #2), and we also - /// throttle requests to the same peer if a previous justification request - /// yielded no results. - fn dispatch(&mut self, peers: &mut HashMap>, protocol: &mut Context) { - if self.pending_requests.is_empty() { - return; - } - - let initial_pending_requests = self.pending_requests.len(); - - // clean up previous failed requests so we can retry again - for (_, requests) in self.previous_requests.iter_mut() { - requests.retain(|(_, instant)| instant.elapsed() < JUSTIFICATION_RETRY_WAIT); - } - - let mut available_peers = peers.iter().filter_map(|(peer, sync)| { - // don't request to any peers that already have pending requests or are unavailable - if sync.state != PeerSyncState::Available || self.peer_requests.contains_key(&peer) { - None - } else { - Some((peer.clone(), sync.best_number)) - } - }).collect::>(); - - let mut last_peer = available_peers.back().map(|p| p.0.clone()); - let mut unhandled_requests = VecDeque::new(); - - loop { - let (peer, peer_best_number) = match available_peers.pop_front() { - Some(p) => p, - _ => break, - }; - - // only ask peers that have synced past the block number that we're - // asking the justification for and to whom we haven't already made - // the same request recently - let peer_eligible = { - let request = match self.pending_requests.front() { - Some(r) => r.clone(), - _ => break, - }; - - peer_best_number >= request.1 && - !self.previous_requests - .get(&request) - .map(|requests| requests.iter().any(|i| i.0 == peer)) - .unwrap_or(false) - }; - - if !peer_eligible { - available_peers.push_back((peer.clone(), peer_best_number)); - - // we tried all peers and none can answer this request - if Some(peer) == last_peer { - last_peer = available_peers.back().map(|p| p.0.clone()); - - let request = self.pending_requests.pop_front() - .expect("verified to be Some in the beginning of the loop; qed"); - - unhandled_requests.push_back(request); - } - - continue; - } - - last_peer = available_peers.back().map(|p| p.0.clone()); - - let request = self.pending_requests.pop_front() - .expect("verified to be Some in the beginning of the loop; qed"); - - self.peer_requests.insert(peer.clone(), request); - - peers.get_mut(&peer) - .expect("peer was is taken from available_peers; available_peers is a subset of peers; qed") - .state = PeerSyncState::DownloadingJustification(request.0); - - trace!(target: "sync", "Requesting justification for block #{} from {}", request.0, peer); - let request = message::generic::BlockRequest { - id: 0, - fields: message::BlockAttributes::JUSTIFICATION, - from: message::FromBlock::Hash(request.0), - to: None, - direction: message::Direction::Ascending, - max: Some(1), - }; - - protocol.send_block_request(peer, request); - } - - self.pending_requests.append(&mut unhandled_requests); - - trace!(target: "sync", "Dispatched {} justification requests ({} pending)", - initial_pending_requests - self.pending_requests.len(), - self.pending_requests.len(), - ); - } - - /// Queue a justification request (without dispatching it). - fn queue_request( - &mut self, - justification: &PendingJustification, - is_descendent_of: F, - ) where F: Fn(&B::Hash, &B::Hash) -> Result { - match self.justifications.import(justification.0.clone(), justification.1.clone(), (), &is_descendent_of) { - Ok(true) => { - // this is a new root so we add it to the current `pending_requests` - self.pending_requests.push_back((justification.0, justification.1)); - }, - Err(err) => { - warn!(target: "sync", "Failed to insert requested justification {:?} {:?} into tree: {:?}", - justification.0, - justification.1, - err, - ); - return; - }, - _ => {}, - }; - } - - /// Retry any pending request if a peer disconnected. - fn peer_disconnected(&mut self, who: PeerId) { - if let Some(request) = self.peer_requests.remove(&who) { - self.pending_requests.push_front(request); - } - } - - /// Process the import of a justification. - /// Queues a retry in case the import failed. - fn justification_import_result(&mut self, hash: B::Hash, number: NumberFor, success: bool) { - let request = (hash, number); - - if !self.importing_requests.remove(&request) { - debug!(target: "sync", "Got justification import result for unknown justification {:?} {:?} request.", - request.0, - request.1, - ); - - return; - }; - - if success { - if self.justifications.finalize_root(&request.0).is_none() { - warn!(target: "sync", "Imported justification for {:?} {:?} which isn't a root in the tree: {:?}", - request.0, - request.1, - self.justifications.roots().collect::>(), - ); - - return; - }; - - self.previous_requests.clear(); - self.peer_requests.clear(); - self.pending_requests = - self.justifications.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect(); - - return; - } - self.pending_requests.push_front(request); - } - - /// Processes the response for the request previously sent to the given - /// peer. Queues a retry in case the given justification - /// was `None`. - /// - /// Returns `Some` if this produces a justification that must be imported in the import queue. - #[must_use] - fn on_response( - &mut self, - who: PeerId, - justification: Option, - ) -> Option<(PeerId, B::Hash, NumberFor, Justification)> { - // we assume that the request maps to the given response, this is - // currently enforced by the outer network protocol before passing on - // messages to chain sync. - if let Some(request) = self.peer_requests.remove(&who) { - if let Some(justification) = justification { - self.importing_requests.insert(request); - return Some((who, request.0, request.1, justification)) - } - - self.previous_requests - .entry(request) - .or_insert(Vec::new()) - .push((who, Instant::now())); - - self.pending_requests.push_front(request); - } - - None - } - - /// Removes any pending justification requests for blocks lower than the - /// given best finalized. - fn on_block_finalized( - &mut self, - best_finalized_hash: &B::Hash, - best_finalized_number: NumberFor, - is_descendent_of: F, - ) -> Result<(), fork_tree::Error> - where F: Fn(&B::Hash, &B::Hash) -> Result - { - if self.importing_requests.contains(&(*best_finalized_hash, best_finalized_number)) { - // we imported this justification ourselves, so we should get back a response - // from the import queue through `justification_import_result` - return Ok(()); - } - - self.justifications.finalize(best_finalized_hash, best_finalized_number, &is_descendent_of)?; - - let roots = self.justifications.roots().collect::>(); - - self.pending_requests.retain(|(h, n)| roots.contains(&(h, n, &()))); - self.peer_requests.retain(|_, (h, n)| roots.contains(&(h, n, &()))); - self.previous_requests.retain(|(h, n), _| roots.contains(&(h, n, &()))); - - Ok(()) - } - - /// Clear all data. - fn clear(&mut self) { - self.justifications = ForkTree::new(); - self.pending_requests.clear(); - self.peer_requests.clear(); - self.previous_requests.clear(); - } + DownloadingFinalityProof(B::Hash), } /// Relay chain sync strategy. @@ -370,7 +109,7 @@ pub struct ChainSync { best_queued_number: NumberFor, best_queued_hash: B::Hash, required_block_attributes: message::BlockAttributes, - justifications: PendingJustifications, + extra_requests: ExtraRequestsAggregator, queue_blocks: HashSet, best_importing_number: NumberFor, } @@ -428,7 +167,7 @@ impl ChainSync { blocks: BlockCollection::new(), best_queued_hash: info.best_queued_hash.unwrap_or(info.chain.best_hash), best_queued_number: info.best_queued_number.unwrap_or(info.chain.best_number), - justifications: PendingJustifications::new(), + extra_requests: ExtraRequestsAggregator::new(), required_block_attributes, queue_blocks: Default::default(), best_importing_number: Zero::zero(), @@ -664,7 +403,7 @@ impl ChainSync { vec![] } }, - PeerSyncState::Available | PeerSyncState::DownloadingJustification(..) => Vec::new(), + PeerSyncState::Available | PeerSyncState::DownloadingJustification(..) | PeerSyncState::DownloadingFinalityProof(..) => Vec::new(), } } else { Vec::new() @@ -722,7 +461,7 @@ impl ChainSync { return None; } - return self.justifications.on_response( + return self.extra_requests.justifications().on_response( who, response.justification, ); @@ -744,6 +483,42 @@ impl ChainSync { None } + /// Handle new finality proof data. + pub(crate) fn on_block_finality_proof_data( + &mut self, + protocol: &mut Context, + who: PeerId, + response: message::FinalityProofResponse, + ) -> Option<(PeerId, B::Hash, NumberFor, Vec)> { + if let Some(ref mut peer) = self.peers.get_mut(&who) { + if let PeerSyncState::DownloadingFinalityProof(hash) = peer.state { + peer.state = PeerSyncState::Available; + + // we only request one finality proof at a time + if hash != response.block { + info!( + "Invalid block finality proof provided: requested: {:?} got: {:?}", + hash, + response.block, + ); + + protocol.report_peer(who.clone(), i32::min_value()); + protocol.disconnect_peer(who); + return None; + } + + return self.extra_requests.finality_proofs().on_response( + who, + response.proof, + ); + } + } + + self.maintain_sync(protocol); + None + } + + /// A batch of blocks have been processed, with or without errors. /// Call this when a batch of blocks have been processed by the import queue, with or without /// errors. pub fn blocks_processed(&mut self, processed_blocks: Vec, has_error: bool) { @@ -761,13 +536,13 @@ impl ChainSync { for peer in peers { self.download_new(protocol, peer); } - self.justifications.dispatch(&mut self.peers, protocol); + self.extra_requests.dispatch(&mut self.peers, protocol); } /// Called periodically to perform any time-based actions. Must be called at a regular /// interval. pub fn tick(&mut self, protocol: &mut Context) { - self.justifications.dispatch(&mut self.peers, protocol); + self.extra_requests.dispatch(&mut self.peers, protocol); } /// Request a justification for the given block. @@ -775,23 +550,53 @@ impl ChainSync { /// Uses `protocol` to queue a new justification request and tries to dispatch all pending /// requests. pub fn request_justification(&mut self, hash: &B::Hash, number: NumberFor, protocol: &mut Context) { - self.justifications.queue_request( - &(*hash, number), + self.extra_requests.justifications().queue_request( + (*hash, number), |base, block| protocol.client().is_descendent_of(base, block), ); - self.justifications.dispatch(&mut self.peers, protocol); + self.extra_requests.justifications().dispatch(&mut self.peers, protocol); } /// Clears all pending justification requests. pub fn clear_justification_requests(&mut self) { - self.justifications.clear(); + self.extra_requests.justifications().clear(); } /// Call this when a justification has been processed by the import queue, with or without /// errors. pub fn justification_import_result(&mut self, hash: B::Hash, number: NumberFor, success: bool) { - self.justifications.justification_import_result(hash, number, success); + let finalization_result = if success { Ok((hash, number)) } else { Err(()) }; + if !self.extra_requests.justifications().on_import_result((hash, number), finalization_result) { + debug!(target: "sync", "Got justification import result for unknown justification {:?} {:?} request.", + hash, + number, + ); + } + } + + /// Request a finality proof for the given block. + /// + /// Queues a new finality proof request and tries to dispatch all pending requests. + pub fn request_finality_proof(&mut self, hash: &B::Hash, number: NumberFor, protocol: &mut Context) { + self.extra_requests.finality_proofs().queue_request( + (*hash, number), + |base, block| protocol.client().is_descendent_of(base, block), + ); + + self.extra_requests.finality_proofs().dispatch(&mut self.peers, protocol); + } + + pub fn finality_proof_import_result( + &mut self, + request_block: (B::Hash, NumberFor), + finalization_result: Result<(B::Hash, NumberFor), ()>, + ) { + self.extra_requests.finality_proofs().on_import_result(request_block, finalization_result); + } + + pub fn set_finality_proof_request_builder(&mut self, request_builder: SharedFinalityProofRequestBuilder) { + self.extra_requests.finality_proofs().essence().0 = Some(request_builder); } /// Notify about successful import of the given block. @@ -801,12 +606,12 @@ impl ChainSync { /// Notify about finalization of the given block. pub fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor, protocol: &mut Context) { - if let Err(err) = self.justifications.on_block_finalized( + if let Err(err) = self.extra_requests.on_block_finalized( hash, number, - |base, block| protocol.client().is_descendent_of(base, block), + &|base, block| protocol.client().is_descendent_of(base, block), ) { - warn!(target: "sync", "Error cleaning up pending justification requests: {:?}", err); + warn!(target: "sync", "Error cleaning up pending extra data requests: {:?}", err); }; } @@ -916,7 +721,7 @@ impl ChainSync { pub(crate) fn peer_disconnected(&mut self, protocol: &mut Context, who: PeerId) { self.blocks.clear_peer_download(&who); self.peers.remove(&who); - self.justifications.peer_disconnected(who); + self.extra_requests.peer_disconnected(who); self.maintain_sync(protocol); } diff --git a/core/network/src/test/block_import.rs b/core/network/src/test/block_import.rs index 3b5e44cc47e5a..550d3c75ed0ef 100644 --- a/core/network/src/test/block_import.rs +++ b/core/network/src/test/block_import.rs @@ -77,7 +77,7 @@ fn async_import_queue_drops() { // Perform this test multiple times since it exhibits non-deterministic behavior. for _ in 0..100 { let verifier = Arc::new(PassThroughVerifier(true)); - let queue = BasicQueue::new(verifier, Arc::new(test_client::new()), None); + let queue = BasicQueue::new(verifier, Arc::new(test_client::new()), None, None, None); queue.start(Box::new(TestLink{})).unwrap(); drop(queue); } diff --git a/core/network/src/test/mod.rs b/core/network/src/test/mod.rs index f8d0ce9e3b01c..c2deab5a322b2 100644 --- a/core/network/src/test/mod.rs +++ b/core/network/src/test/mod.rs @@ -26,11 +26,16 @@ use std::sync::Arc; use std::sync::atomic::{AtomicBool, Ordering}; use log::trace; -use client; +use crate::chain::FinalityProofProvider; +use client::{self, ClientInfo, BlockchainEvents, FinalityNotifications, in_mem::Backend as InMemoryBackend, error::Result as ClientResult}; use client::block_builder::BlockBuilder; -use crate::config::ProtocolConfig; +use client::backend::AuxStore; +use crate::config::{ProtocolConfig, Roles}; use consensus::import_queue::{BasicQueue, ImportQueue, IncomingBlock}; -use consensus::import_queue::{Link, SharedBlockImport, SharedJustificationImport, Verifier}; +use consensus::import_queue::{ + Link, SharedBlockImport, SharedJustificationImport, Verifier, SharedFinalityProofImport, + SharedFinalityProofRequestBuilder, +}; use consensus::{Error as ConsensusError, ErrorKind as ConsensusErrorKind}; use consensus::{BlockOrigin, ForkChoiceStrategy, ImportBlock, JustificationImport}; use crate::consensus_gossip::{ConsensusGossip, MessageRecipient as GossipMessageRecipient, TopicNotification}; @@ -39,7 +44,7 @@ use futures::{prelude::*, sync::{mpsc, oneshot}}; use crate::message::Message; use network_libp2p::PeerId; use parking_lot::{Mutex, RwLock}; -use primitives::{H256, sr25519::Public as AuthorityId}; +use primitives::{H256, sr25519::Public as AuthorityId, Blake2Hasher}; use crate::protocol::{ConnectedPeer, Context, Protocol, ProtocolMsg, CustomMessageOutcome}; use runtime_primitives::generic::BlockId; use runtime_primitives::traits::{AuthorityIdFor, Block as BlockT, Digest, DigestItem, Header, NumberFor}; @@ -111,7 +116,79 @@ impl NetworkSpecialization for DummySpecialization { } } -pub type PeersClient = client::Client; +pub type PeersFullClient = client::Client; +pub type PeersLightClient = client::Client; + +#[derive(Clone)] +pub enum PeersClient { + Full(Arc), + Light(Arc), +} + +impl PeersClient { + pub fn as_full(&self) -> Option> { + match *self { + PeersClient::Full(ref client) => Some(client.clone()), + _ => None, + } + } + + pub fn as_block_import(&self) -> SharedBlockImport { + match *self { + PeersClient::Full(ref client) => client.clone() as _, + PeersClient::Light(ref client) => client.clone() as _, + } + } + + pub fn as_in_memory_backend(&self) -> InMemoryBackend { + match *self { + PeersClient::Full(ref client) => client.backend().as_in_memory(), + PeersClient::Light(_) => unimplemented!("TODO"), + } + } + + pub fn get_aux(&self, key: &[u8]) -> ClientResult>> { + match *self { + PeersClient::Full(ref client) => client.backend().get_aux(key), + PeersClient::Light(ref client) => client.backend().get_aux(key), + } + } + + pub fn info(&self) -> ClientResult> { + match *self { + PeersClient::Full(ref client) => client.info(), + PeersClient::Light(ref client) => client.info(), + } + } + + pub fn header(&self, block: &BlockId) -> ClientResult::Header>> { + match *self { + PeersClient::Full(ref client) => client.header(block), + PeersClient::Light(ref client) => client.header(block), + } + } + + pub fn justification(&self, block: &BlockId) -> ClientResult> { + match *self { + PeersClient::Full(ref client) => client.justification(block), + PeersClient::Light(ref client) => client.justification(block), + } + } + + pub fn finality_notification_stream(&self) -> FinalityNotifications { + match *self { + PeersClient::Full(ref client) => client.finality_notification_stream(), + PeersClient::Light(ref client) => client.finality_notification_stream(), + } + } + + pub fn finalize_block(&self, id: BlockId, justification: Option, notify: bool) -> ClientResult<()> { + match *self { + PeersClient::Full(ref client) => client.finalize_block(id, justification, notify), + PeersClient::Light(ref client) => client.finalize_block(id, justification, notify), + } + } +} /// A Link that can wait for a block to have been imported. pub struct TestLink> { @@ -155,6 +232,23 @@ impl> Link for TestLink { self.link.request_justification(hash, number); } + fn finality_proof_imported( + &self, + who: PeerId, + request_block: (Hash, NumberFor), + finalization_result: Result<(Hash, NumberFor), ()>, + ) { + self.link.finality_proof_imported(who, request_block, finalization_result); + } + + fn request_finality_proof(&self, hash: &Hash, number: NumberFor) { + self.link.request_finality_proof(hash, number); + } + + fn set_finality_proof_request_builder(&self, request_builder: SharedFinalityProofRequestBuilder) { + self.link.set_finality_proof_request_builder(request_builder); + } + fn report_peer(&self, who: PeerId, reputation_change: i32) { self.link.report_peer(who, reputation_change); } @@ -178,7 +272,7 @@ pub struct Peer> { pub is_major_syncing: Arc, pub peers: Arc>>>, pub peer_id: PeerId, - client: Arc, + client: PeersClient, net_proto_channel: ProtocolChannel, pub import_queue: Box>, pub data: D, @@ -188,7 +282,7 @@ pub struct Peer> { type MessageFilter = Fn(&NetworkMsg) -> bool; -enum FromNetworkMsg { +pub enum FromNetworkMsg { /// A peer connected, with debug info. PeerConnected(PeerId, String), /// A peer disconnected, with debug info. @@ -294,7 +388,7 @@ impl> Peer { is_offline: Arc, is_major_syncing: Arc, peers: Arc>>>, - client: Arc, + client: PeersClient, import_queue: Box>, network_to_protocol_sender: mpsc::UnboundedSender>, protocol_sender: mpsc::UnboundedSender>, @@ -327,7 +421,7 @@ impl> Peer { } } /// Called after blockchain has been populated to updated current state. - fn start(&self) { + pub fn start(&self) { // Update the sync state to the latest chain state. let info = self.client.info().expect("In-mem client does not fail"); let header = self @@ -484,7 +578,7 @@ impl> Peer { /// Add blocks to the peer -- edit the block before adding pub fn generate_blocks(&self, count: usize, origin: BlockOrigin, edit_block: F) -> H256 - where F: FnMut(BlockBuilder) -> Block + where F: FnMut(BlockBuilder) -> Block { let best_hash = self.client.info().unwrap().chain.best_hash; self.generate_blocks_at(BlockId::Hash(best_hash), count, origin, edit_block) @@ -493,11 +587,12 @@ impl> Peer { /// Add blocks to the peer -- edit the block before adding. The chain will /// start at the given block iD. pub fn generate_blocks_at(&self, at: BlockId, count: usize, origin: BlockOrigin, mut edit_block: F) -> H256 - where F: FnMut(BlockBuilder) -> Block + where F: FnMut(BlockBuilder) -> Block { - let mut at = self.client.header(&at).unwrap().unwrap().hash(); + let full_client = self.client.as_full().expect("blocks could only be generated by full clients"); + let mut at = full_client.header(&at).unwrap().unwrap().hash(); for _ in 0..count { - let builder = self.client.new_block_at(&BlockId::Hash(at)).unwrap(); + let builder = full_client.new_block_at(&BlockId::Hash(at)).unwrap(); let block = edit_block(builder); let hash = block.header.hash(); trace!( @@ -562,7 +657,7 @@ impl> Peer { } /// Get a reference to the client. - pub fn client(&self) -> &Arc { + pub fn client(&self) -> &PeersClient { &self.client } } @@ -598,7 +693,7 @@ pub trait TestNetFactory: Sized { /// These two need to be implemented! fn from_config(config: &ProtocolConfig) -> Self; - fn make_verifier(&self, client: Arc, config: &ProtocolConfig) -> Arc; + fn make_verifier(&self, client: PeersClient, config: &ProtocolConfig) -> Arc; /// Get reference to peer. fn peer(&self, i: usize) -> &Peer; @@ -609,10 +704,21 @@ pub trait TestNetFactory: Sized { fn set_started(&mut self, now: bool); /// Get custom block import handle for fresh client, along with peer data. - fn make_block_import(&self, client: Arc) - -> (SharedBlockImport, Option>, Self::PeerData) + fn make_block_import(&self, client: PeersClient) + -> ( + SharedBlockImport, + Option>, + Option>, + Option>, + Self::PeerData, + ) { - (client, None, Default::default()) + (client.as_block_import(), None, None, None, Default::default()) + } + + /// Get finality proof provider (if supported). + fn make_finality_proof_provider(&self, _client: PeersClient) -> Option>> { + None } fn default_config() -> ProtocolConfig { @@ -627,41 +733,21 @@ pub trait TestNetFactory: Sized { for i in 0..n { trace!(target: "test_network", "Adding peer {}", i); - net.add_peer(&config); + net.add_full_peer(&config); } net } - /// Add a peer. - fn add_peer(&mut self, config: &ProtocolConfig) { - let client = Arc::new(test_client::new()); - let tx_pool = Arc::new(EmptyTransactionPool); - let verifier = self.make_verifier(client.clone(), config); - let (block_import, justification_import, data) = self.make_block_import(client.clone()); - let (network_sender, network_port) = network_channel(); - - let import_queue = Box::new(BasicQueue::new(verifier, block_import, justification_import)); - let is_offline = Arc::new(AtomicBool::new(true)); - let is_major_syncing = Arc::new(AtomicBool::new(false)); - let specialization = self::SpecializationFactory::create(); - let peers: Arc>>> = Arc::new(Default::default()); - - let (network_to_protocol_sender, mut network_to_protocol_rx) = mpsc::unbounded(); - - let (mut protocol, protocol_sender) = Protocol::new( - peers.clone(), - network_sender.clone(), - config.clone(), - client.clone(), - None, - tx_pool, - specialization, - ).unwrap(); - - let is_offline2 = is_offline.clone(); - let is_major_syncing2 = is_major_syncing.clone(); - let import_queue2 = import_queue.clone(); - + /// Add created peer. + fn add_peer( + &mut self, + is_offline: Arc, + is_major_syncing: Arc, + import_queue: Box>, + mut protocol: Protocol, + mut network_to_protocol_rx: mpsc::UnboundedReceiver>, + peer: Arc>, + ) { std::thread::spawn(move || { tokio::runtime::current_thread::run(futures::future::poll_fn(move || { while let Async::Ready(msg) = network_to_protocol_rx.poll().unwrap() { @@ -680,14 +766,16 @@ pub trait TestNetFactory: Sized { protocol.synchronize(); CustomMessageOutcome::None }, - None => return Ok(Async::Ready(())) + None => return Ok(Async::Ready(())), }; match outcome { CustomMessageOutcome::BlockImport(origin, blocks) => - import_queue2.import_blocks(origin, blocks), + import_queue.import_blocks(origin, blocks), CustomMessageOutcome::JustificationImport(origin, hash, nb, justification) => - import_queue2.import_justification(origin, hash, nb, justification), + import_queue.import_justification(origin, hash, nb, justification), + CustomMessageOutcome::FinalityProofImport(origin, hash, nb, proof) => + import_queue.import_finality_proof(origin, hash, nb, proof), CustomMessageOutcome::None => {} } } @@ -696,31 +784,140 @@ pub trait TestNetFactory: Sized { return Ok(Async::Ready(())) } - is_offline2.store(protocol.is_offline(), Ordering::Relaxed); - is_major_syncing2.store(protocol.is_major_syncing(), Ordering::Relaxed); + + is_offline.store(protocol.is_offline(), Ordering::Relaxed); + is_major_syncing.store(protocol.is_major_syncing(), Ordering::Relaxed); Ok(Async::NotReady) })); }); - let peer = Arc::new(Peer::new( - is_offline, - is_major_syncing, - peers, - client, - import_queue, - network_to_protocol_sender, - protocol_sender, - network_sender, - network_port, - data, - )); + if self.started() { + peer.start(); + self.peers().iter().for_each(|other| { + other.on_connect(&*peer); + peer.on_connect(other); + }); + } self.mut_peers(|peers| { peers.push(peer) }); } + /// Add a full peer. + fn add_full_peer(&mut self, config: &ProtocolConfig) { + let client = Arc::new(test_client::new()); + let tx_pool = Arc::new(EmptyTransactionPool); + let verifier = self.make_verifier(PeersClient::Full(client.clone()), config); + let (block_import, justification_import, finality_proof_import, finality_proof_request_builder, data) + = self.make_block_import(PeersClient::Full(client.clone())); + let (network_sender, network_port) = network_channel(); + + let import_queue = Box::new(BasicQueue::new( + verifier, + block_import, + justification_import, + finality_proof_import, + finality_proof_request_builder, + )); + let is_offline = Arc::new(AtomicBool::new(true)); + let is_major_syncing = Arc::new(AtomicBool::new(false)); + let specialization = self::SpecializationFactory::create(); + let peers: Arc>>> = Arc::new(Default::default()); + + let (network_to_protocol_sender, network_to_protocol_rx) = mpsc::unbounded(); + + let (protocol, protocol_sender) = Protocol::new( + peers.clone(), + network_sender.clone(), + config.clone(), + client.clone(), + self.make_finality_proof_provider(PeersClient::Full(client.clone())), + None, + tx_pool, + specialization, + ).unwrap(); + + self.add_peer( + is_offline.clone(), + is_major_syncing.clone(), + import_queue.clone(), + protocol, + network_to_protocol_rx, + Arc::new(Peer::new( + is_offline, + is_major_syncing, + peers, + PeersClient::Full(client), + import_queue, + network_to_protocol_sender, + protocol_sender, + network_sender, + network_port, + data, + )), + ); + } + + /// Add a light peer. + fn add_light_peer(&mut self, config: &ProtocolConfig) { + let mut config = config.clone(); + config.roles = Roles::LIGHT; + + let client = Arc::new(test_client::new_light()); + let tx_pool = Arc::new(EmptyTransactionPool); + let verifier = self.make_verifier(PeersClient::Light(client.clone()), &config); + let (block_import, justification_import, finality_proof_import, finality_proof_request_builder, data) + = self.make_block_import(PeersClient::Light(client.clone())); + let (network_sender, network_port) = network_channel(); + + let import_queue = Box::new(BasicQueue::new( + verifier, + block_import, + justification_import, + finality_proof_import, + finality_proof_request_builder, + )); + let is_offline = Arc::new(AtomicBool::new(true)); + let is_major_syncing = Arc::new(AtomicBool::new(false)); + let specialization = self::SpecializationFactory::create(); + let peers: Arc>>> = Arc::new(Default::default()); + + let (network_to_protocol_sender, network_to_protocol_rx) = mpsc::unbounded(); + + let (protocol, protocol_sender) = Protocol::new( + peers.clone(), + network_sender.clone(), + config, + client.clone(), + self.make_finality_proof_provider(PeersClient::Light(client.clone())), + None, + tx_pool, + specialization, + ).unwrap(); + + self.add_peer( + is_offline.clone(), + is_major_syncing.clone(), + import_queue.clone(), + protocol, + network_to_protocol_rx, + Arc::new(Peer::new( + is_offline, + is_major_syncing, + peers, + PeersClient::Light(client), + import_queue, + network_to_protocol_sender, + protocol_sender, + network_sender, + network_port, + data, + )), + ); + } + /// Start network. fn start(&mut self) { if self.started() { @@ -832,6 +1029,11 @@ pub trait TestNetFactory: Sized { self.route_single(true, None, &|_| true); } + /// Maintain sync for a peer. + fn tick_peer(&mut self, i: usize) { + self.peers()[i].sync_step(); + } + /// Deliver pending messages until there are no more. fn sync(&mut self) { self.sync_with(true, None) @@ -866,7 +1068,7 @@ impl TestNetFactory for TestNet { } } - fn make_verifier(&self, _client: Arc, _config: &ProtocolConfig) + fn make_verifier(&self, _client: PeersClient, _config: &ProtocolConfig) -> Arc { Arc::new(PassThroughVerifier(false)) @@ -893,7 +1095,7 @@ impl TestNetFactory for TestNet { } } -pub struct ForceFinalized(Arc); +pub struct ForceFinalized(PeersClient); impl JustificationImport for ForceFinalized { type Error = ConsensusError; @@ -920,7 +1122,7 @@ impl TestNetFactory for JustificationTestNet { JustificationTestNet(TestNet::from_config(config)) } - fn make_verifier(&self, client: Arc, config: &ProtocolConfig) + fn make_verifier(&self, client: PeersClient, config: &ProtocolConfig) -> Arc { self.0.make_verifier(client, config) @@ -946,9 +1148,15 @@ impl TestNetFactory for JustificationTestNet { self.0.set_started(new) } - fn make_block_import(&self, client: Arc) - -> (SharedBlockImport, Option>, Self::PeerData) + fn make_block_import(&self, client: PeersClient) + -> ( + SharedBlockImport, + Option>, + Option>, + Option>, + Self::PeerData, + ) { - (client.clone(), Some(Arc::new(ForceFinalized(client))), Default::default()) + (client.as_block_import(), Some(Arc::new(ForceFinalized(client))), None, None, Default::default()) } } diff --git a/core/network/src/test/sync.rs b/core/network/src/test/sync.rs index 6d582d858fe17..990d80e3f15fb 100644 --- a/core/network/src/test/sync.rs +++ b/core/network/src/test/sync.rs @@ -14,8 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use client::backend::Backend; -use client::blockchain::HeaderBackend as BlockchainHeaderBackend; +use client::{backend::Backend, blockchain::HeaderBackend}; use crate::config::Roles; use consensus::BlockOrigin; use std::collections::HashSet; @@ -34,8 +33,8 @@ fn test_ancestor_search_when_common_is(n: usize) { net.peer(2).push_blocks(100, false); net.sync(); - assert!(net.peer(0).client.backend().as_in_memory().blockchain() - .canon_equals_to(net.peer(1).client.backend().as_in_memory().blockchain())); + assert!(net.peer(0).client.as_in_memory_backend().blockchain() + .canon_equals_to(net.peer(1).client.as_in_memory_backend().blockchain())); } #[test] @@ -130,8 +129,8 @@ fn sync_from_two_peers_works() { net.peer(1).push_blocks(100, false); net.peer(2).push_blocks(100, false); net.sync(); - assert!(net.peer(0).client.backend().as_in_memory().blockchain() - .equals_to(net.peer(1).client.backend().as_in_memory().blockchain())); + assert!(net.peer(0).client.as_in_memory_backend().blockchain() + .equals_to(net.peer(1).client.as_in_memory_backend().blockchain())); assert!(!net.peer(0).is_major_syncing()); } @@ -143,8 +142,8 @@ fn sync_from_two_peers_with_ancestry_search_works() { net.peer(1).push_blocks(100, false); net.peer(2).push_blocks(100, false); net.sync(); - assert!(net.peer(0).client.backend().as_in_memory().blockchain() - .canon_equals_to(net.peer(1).client.backend().as_in_memory().blockchain())); + assert!(net.peer(0).client.as_in_memory_backend().blockchain() + .canon_equals_to(net.peer(1).client.as_in_memory_backend().blockchain())); } #[test] @@ -157,8 +156,8 @@ fn ancestry_search_works_when_backoff_is_one() { net.peer(2).push_blocks(2, false); net.sync(); - assert!(net.peer(0).client.backend().as_in_memory().blockchain() - .canon_equals_to(net.peer(1).client.backend().as_in_memory().blockchain())); + assert!(net.peer(0).client.as_in_memory_backend().blockchain() + .canon_equals_to(net.peer(1).client.as_in_memory_backend().blockchain())); } #[test] @@ -171,8 +170,8 @@ fn ancestry_search_works_when_ancestor_is_genesis() { net.peer(2).push_blocks(100, false); net.sync(); - assert!(net.peer(0).client.backend().as_in_memory().blockchain() - .canon_equals_to(net.peer(1).client.backend().as_in_memory().blockchain())); + assert!(net.peer(0).client.as_in_memory_backend().blockchain() + .canon_equals_to(net.peer(1).client.as_in_memory_backend().blockchain())); } #[test] @@ -195,8 +194,8 @@ fn sync_long_chain_works() { let mut net = TestNet::new(2); net.peer(1).push_blocks(500, false); net.sync(); - assert!(net.peer(0).client.backend().as_in_memory().blockchain() - .equals_to(net.peer(1).client.backend().as_in_memory().blockchain())); + assert!(net.peer(0).client.as_in_memory_backend().blockchain() + .equals_to(net.peer(1).client.as_in_memory_backend().blockchain())); } #[test] @@ -206,8 +205,8 @@ fn sync_no_common_longer_chain_fails() { net.peer(0).push_blocks(20, true); net.peer(1).push_blocks(20, false); net.sync(); - assert!(!net.peer(0).client.backend().as_in_memory().blockchain() - .canon_equals_to(net.peer(1).client.backend().as_in_memory().blockchain())); + assert!(!net.peer(0).client.as_in_memory_backend().blockchain() + .canon_equals_to(net.peer(1).client.as_in_memory_backend().blockchain())); } #[test] @@ -285,11 +284,11 @@ fn sync_after_fork_works() { net.peer(2).push_blocks(1, false); // peer 1 has the best chain - let peer1_chain = net.peer(1).client.backend().as_in_memory().blockchain().clone(); + let peer1_chain = net.peer(1).client.as_in_memory_backend().blockchain().clone(); net.sync(); - assert!(net.peer(0).client.backend().as_in_memory().blockchain().canon_equals_to(&peer1_chain)); - assert!(net.peer(1).client.backend().as_in_memory().blockchain().canon_equals_to(&peer1_chain)); - assert!(net.peer(2).client.backend().as_in_memory().blockchain().canon_equals_to(&peer1_chain)); + assert!(net.peer(0).client.as_in_memory_backend().blockchain().canon_equals_to(&peer1_chain)); + assert!(net.peer(1).client.as_in_memory_backend().blockchain().canon_equals_to(&peer1_chain)); + assert!(net.peer(2).client.as_in_memory_backend().blockchain().canon_equals_to(&peer1_chain)); } #[test] @@ -305,8 +304,8 @@ fn syncs_all_forks() { net.sync(); // Check that all peers have all of the blocks. - assert_eq!(9, net.peer(0).client.backend().as_in_memory().blockchain().blocks_count()); - assert_eq!(9, net.peer(1).client.backend().as_in_memory().blockchain().blocks_count()); + assert_eq!(9, net.peer(0).client.as_in_memory_backend().blockchain().blocks_count()); + assert_eq!(9, net.peer(1).client.as_in_memory_backend().blockchain().blocks_count()); } #[test] @@ -320,11 +319,11 @@ fn own_blocks_are_announced() { net.peer(0).on_block_imported(header.hash(), &header); net.sync(); - assert_eq!(net.peer(0).client.backend().blockchain().info().unwrap().best_number, 1); - assert_eq!(net.peer(1).client.backend().blockchain().info().unwrap().best_number, 1); - let peer0_chain = net.peer(0).client.backend().as_in_memory().blockchain().clone(); - assert!(net.peer(1).client.backend().as_in_memory().blockchain().canon_equals_to(&peer0_chain)); - assert!(net.peer(2).client.backend().as_in_memory().blockchain().canon_equals_to(&peer0_chain)); + assert_eq!(net.peer(0).client.as_in_memory_backend().blockchain().info().unwrap().best_number, 1); + assert_eq!(net.peer(1).client.as_in_memory_backend().blockchain().info().unwrap().best_number, 1); + let peer0_chain = net.peer(0).client.as_in_memory_backend().blockchain().clone(); + assert!(net.peer(1).client.as_in_memory_backend().blockchain().canon_equals_to(&peer0_chain)); + assert!(net.peer(2).client.as_in_memory_backend().blockchain().canon_equals_to(&peer0_chain)); } #[test] @@ -336,9 +335,9 @@ fn blocks_are_not_announced_by_light_nodes() { // light peer1 is connected to full peer2 let mut light_config = ProtocolConfig::default(); light_config.roles = Roles::LIGHT; - net.add_peer(&ProtocolConfig::default()); - net.add_peer(&light_config); - net.add_peer(&ProtocolConfig::default()); + net.add_full_peer(&ProtocolConfig::default()); + net.add_full_peer(&light_config); + net.add_full_peer(&ProtocolConfig::default()); net.peer(0).push_blocks(1, false); net.peer(0).start(); @@ -356,9 +355,9 @@ fn blocks_are_not_announced_by_light_nodes() { // peer 0 has the best chain // peer 1 has the best chain // peer 2 has genesis-chain only - assert_eq!(net.peer(0).client.backend().blockchain().info().unwrap().best_number, 1); - assert_eq!(net.peer(1).client.backend().blockchain().info().unwrap().best_number, 1); - assert_eq!(net.peer(2).client.backend().blockchain().info().unwrap().best_number, 0); + assert_eq!(net.peer(0).client.info().unwrap().chain.best_number, 1); + assert_eq!(net.peer(1).client.info().unwrap().chain.best_number, 1); + assert_eq!(net.peer(2).client.info().unwrap().chain.best_number, 0); } #[test] diff --git a/core/service/src/components.rs b/core/service/src/components.rs index 7e76a8f201a9b..7bca1cc3e516a 100644 --- a/core/service/src/components.rs +++ b/core/service/src/components.rs @@ -24,7 +24,7 @@ use client_db; use client::{self, Client, runtime_api}; use crate::{error, Service, maybe_start_server}; use consensus_common::{import_queue::ImportQueue, SelectChain}; -use network::{self, OnDemand}; +use network::{self, OnDemand, FinalityProofProvider}; use substrate_executor::{NativeExecutor, NativeExecutionDispatch}; use transaction_pool::txpool::{self, Options as TransactionPoolOptions, Pool as TransactionPool}; use runtime_primitives::{ @@ -72,7 +72,7 @@ pub type LightExecutor = client::light::call_executor::RemoteOrLocalCallExecu client_db::light::LightStorage<::Block>, network::OnDemand<::Block> >, - network::OnDemand<::Block> + network::OnDemand<::Block>, >, client::LocalCallExecutor< client::light::backend::Backend< @@ -322,6 +322,11 @@ pub trait ServiceFactory: 'static + Sized { fn build_network_protocol(config: &FactoryFullConfiguration) -> Result; + /// Build finality proof provider for serving network requests on full node. + fn build_finality_proof_provider( + client: Arc> + ) -> Result>>, error::Error>; + /// Build the Fork Choice algorithm for full client fn build_select_chain( config: &mut FactoryFullConfiguration, @@ -413,12 +418,16 @@ pub trait Components: Sized + 'static { select_chain: Self::SelectChain, ) -> Result; + /// Finality proof provider for serving network requests. + fn build_finality_proof_provider( + client: Arc> + ) -> Result::Block>>>, error::Error>; + /// Build fork choice selector fn build_select_chain( config: &mut FactoryFullConfiguration, client: Arc> ) -> Result; - } /// A struct that implement `Components` for the full client. @@ -508,7 +517,12 @@ impl Components for FullComponents { ) -> Result { Self::Factory::build_select_chain(config, client) } - + + fn build_finality_proof_provider( + client: Arc> + ) -> Result::Block>>>, error::Error> { + Factory::build_finality_proof_provider(client) + } } /// A struct that implement `Components` for the light client. @@ -587,14 +601,17 @@ impl Components for LightComponents { Factory::build_light_import_queue(config, client) } - /// Build fork choice selector + fn build_finality_proof_provider( + _client: Arc> + ) -> Result::Block>>>, error::Error> { + Ok(None) + } fn build_select_chain( _config: &mut FactoryFullConfiguration, _client: Arc> ) -> Result { Err("Fork choice doesn't happen on light clients.".into()) } - } #[cfg(test)] diff --git a/core/service/src/lib.rs b/core/service/src/lib.rs index aed14568cb682..53aaff02cc75f 100644 --- a/core/service/src/lib.rs +++ b/core/service/src/lib.rs @@ -65,7 +65,7 @@ use components::{StartRPC, MaintainTransactionPool, OffchainWorker}; #[doc(hidden)] pub use std::{ops::Deref, result::Result, sync::Arc}; #[doc(hidden)] -pub use network::OnDemand; +pub use network::{FinalityProofProvider, OnDemand}; #[doc(hidden)] pub use tokio::runtime::TaskExecutor; @@ -156,8 +156,9 @@ impl Service { let import_queue = Box::new(Components::build_import_queue( &mut config, client.clone(), - select_chain.clone() + select_chain.clone(), )?); + let finality_proof_provider = Components::build_finality_proof_provider(client.clone())?; let best_header = select_chain.best_chain()?; let version = config.full_version(); @@ -178,6 +179,7 @@ impl Service { config: network::config::ProtocolConfig { roles: config.roles }, network_config: config.network.clone(), chain: client.clone(), + finality_proof_provider, on_demand: on_demand.as_ref().map(|d| d.clone() as _), transaction_pool: transaction_pool_adapter.clone() as _, specialization: network_protocol, @@ -593,6 +595,7 @@ macro_rules! construct_service_factory { { $( $light_import_queue_init:tt )* }, SelectChain = $select_chain:ty { $( $select_chain_init:tt )* }, + FinalityProofProvider = { $( $finality_proof_provider_init:tt )* }, } ) => { $( #[$attr] )* @@ -658,6 +661,12 @@ macro_rules! construct_service_factory { ( $( $light_import_queue_init )* ) (config, client) } + fn build_finality_proof_provider( + client: Arc<$crate::FullClient> + ) -> Result>>, $crate::Error> { + ( $( $finality_proof_provider_init )* ) (client) + } + fn new_light( config: $crate::FactoryFullConfiguration, executor: $crate::TaskExecutor diff --git a/node-template/src/service.rs b/node-template/src/service.rs index 2a4980688f8a7..f964e899d27e0 100644 --- a/node-template/src/service.rs +++ b/node-template/src/service.rs @@ -92,6 +92,8 @@ construct_service_factory! { SlotDuration::get_or_compute(&*client)?, client.clone(), None, + None, + None, client, NothingExtra, config.custom.inherent_data_providers.clone(), @@ -106,6 +108,8 @@ construct_service_factory! { SlotDuration::get_or_compute(&*client)?, client.clone(), None, + None, + None, client, NothingExtra, config.custom.inherent_data_providers.clone(), @@ -120,5 +124,8 @@ construct_service_factory! { )) } }, + FinalityProofProvider = { |_client: Arc>| { + Ok(None) + }}, } } diff --git a/node/cli/src/service.rs b/node/cli/src/service.rs index 47bdb24122309..42b3faf236f0d 100644 --- a/node/cli/src/service.rs +++ b/node/cli/src/service.rs @@ -22,10 +22,8 @@ use std::sync::Arc; use std::time::Duration; use client::{self, LongestChain}; -use consensus::{import_queue, start_aura, AuraImportQueue, - SlotDuration, NothingExtra -}; -use grandpa; +use consensus::{import_queue, start_aura, AuraImportQueue, SlotDuration, NothingExtra}; +use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}; use node_executor; use primitives::{Pair as PairT, ed25519}; use node_primitives::Block; @@ -170,6 +168,8 @@ construct_service_factory! { slot_duration, block_import, Some(justification_import), + None, + None, client, NothingExtra, config.custom.inherent_data_providers.clone(), @@ -177,16 +177,28 @@ construct_service_factory! { }}, LightImportQueue = AuraImportQueue { |config: &FactoryFullConfiguration, client: Arc>| { + let fetch_checker = client.backend().blockchain().fetcher() + .upgrade() + .map(|fetcher| fetcher.checker().clone()) + .ok_or_else(|| "Trying to start light import queue without active fetch checker")?; + let block_import = grandpa::light_block_import::<_, _, _, RuntimeApi, LightClient>( + client.clone(), Arc::new(fetch_checker), client.clone() + )?; + let block_import = Arc::new(block_import); + let finality_proof_import = block_import.clone(); + let finality_proof_request_builder = finality_proof_import.create_finality_proof_request_builder(); + import_queue::<_, _, _, ed25519::Pair>( SlotDuration::get_or_compute(&*client)?, - client.clone(), + block_import, None, + Some(finality_proof_import), + Some(finality_proof_request_builder), client, NothingExtra, config.custom.inherent_data_providers.clone(), ).map_err(Into::into) - } - }, + }}, SelectChain = LongestChain, Self::Block> { |config: &FactoryFullConfiguration, client: Arc>| { Ok(LongestChain::new( @@ -195,6 +207,9 @@ construct_service_factory! { )) } }, + FinalityProofProvider = { |client: Arc>| { + Ok(Some(Arc::new(GrandpaFinalityProofProvider::new(client.clone(), client)) as _)) + }}, } }