diff --git a/polkadot/node/service/src/builder/partial.rs b/polkadot/node/service/src/builder/partial.rs index 0926230bff1db..1bca72e210716 100644 --- a/polkadot/node/service/src/builder/partial.rs +++ b/polkadot/node/service/src/builder/partial.rs @@ -29,6 +29,7 @@ use sc_service::{Configuration, Error as SubstrateServiceError, KeystoreContaine use sc_telemetry::{Telemetry, TelemetryWorker, TelemetryWorkerHandle}; use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_consensus::SelectChain; +use sp_consensus_babe::inherents::BabeCreateInherentDataProviders; use sp_consensus_beefy::ecdsa_crypto; use std::sync::Arc; @@ -64,6 +65,8 @@ pub(crate) type PolkadotPartialComponents = sc_service::PartialC FullGrandpaBlockImport, ecdsa_crypto::AuthorityId, >, + BabeCreateInherentDataProviders, + ChainSelection, >, sc_consensus_grandpa::LinkHalf, sc_consensus_babe::BabeLink, @@ -184,32 +187,33 @@ where ); let babe_config = sc_consensus_babe::configuration(&*client)?; - let (block_import, babe_link) = - sc_consensus_babe::block_import(babe_config.clone(), beefy_block_import, client.clone())?; + let slot_duration = babe_config.slot_duration(); + let (block_import, babe_link) = sc_consensus_babe::block_import( + babe_config.clone(), + beefy_block_import, + client.clone(), + Arc::new(move |_, _| async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + let slot = sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_slot_duration( + *timestamp, + slot_duration, + ); + Ok((slot, timestamp)) + }) as BabeCreateInherentDataProviders, + select_chain.clone(), + OffchainTransactionPoolFactory::new(transaction_pool.clone()), + )?; - let slot_duration = babe_link.config().slot_duration(); let (import_queue, babe_worker_handle) = sc_consensus_babe::import_queue(sc_consensus_babe::ImportQueueParams { link: babe_link.clone(), block_import: block_import.clone(), justification_import: Some(Box::new(justification_import)), client: client.clone(), - select_chain: select_chain.clone(), - create_inherent_data_providers: move |_, ()| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - slot_duration, - ); - - Ok((slot, timestamp)) - }, + slot_duration, spawner: &task_manager.spawn_essential_handle(), registry: config.prometheus_registry(), telemetry: telemetry.as_ref().map(|x| x.handle()), - offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool.clone()), })?; let justification_stream = grandpa_link.justification_stream(); diff --git a/prdoc/pr_9147.prdoc b/prdoc/pr_9147.prdoc new file mode 100644 index 0000000000000..979e3ccaf28f1 --- /dev/null +++ b/prdoc/pr_9147.prdoc @@ -0,0 +1,18 @@ +title: 'babe: keep stateless verification in `Verifier`, move everything else to the + import queue' +doc: +- audience: Node Operator + description: Only do stateless verification without runtime calls in the `BabeVerifier`, do all other verification in the import queue. +crates: +- name: polkadot-service + bump: patch +- name: sc-consensus-babe-rpc + bump: patch +- name: sc-consensus-babe + bump: major +- name: sp-consensus-babe + bump: minor +- name: sp-consensus + bump: major +- name: sp-inherents + bump: minor diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index a080de61cf19f..f42f14de021df 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -22,6 +22,7 @@ use polkadot_sdk::{ sc_consensus_beefy as beefy, sc_consensus_grandpa as grandpa, + sp_consensus_babe::inherents::BabeCreateInherentDataProviders, sp_consensus_beefy as beefy_primitives, *, }; @@ -190,6 +191,8 @@ pub fn new_partial( Block, FullClient, FullBeefyBlockImport, + BabeCreateInherentDataProviders, + FullSelectChain, >, grandpa::LinkHalf, sc_consensus_babe::BabeLink, @@ -259,35 +262,35 @@ pub fn new_partial( config.prometheus_registry().cloned(), ); + let babe_config = sc_consensus_babe::configuration(&*client)?; + let slot_duration = babe_config.slot_duration(); let (block_import, babe_link) = sc_consensus_babe::block_import( - sc_consensus_babe::configuration(&*client)?, + babe_config, beefy_block_import, client.clone(), + Arc::new(move |_, _| async move { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + let slot = + sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_slot_duration( + *timestamp, + slot_duration, + ); + Ok((slot, timestamp)) + }) as BabeCreateInherentDataProviders, + select_chain.clone(), + OffchainTransactionPoolFactory::new(transaction_pool.clone()), )?; - let slot_duration = babe_link.config().slot_duration(); let (import_queue, babe_worker_handle) = sc_consensus_babe::import_queue(sc_consensus_babe::ImportQueueParams { link: babe_link.clone(), block_import: block_import.clone(), justification_import: Some(Box::new(justification_import)), client: client.clone(), - select_chain: select_chain.clone(), - create_inherent_data_providers: move |_, ()| async move { - let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); - - let slot = - sp_consensus_babe::inherents::InherentDataProvider::from_timestamp_and_slot_duration( - *timestamp, - slot_duration, - ); - - Ok((slot, timestamp)) - }, + slot_duration, spawner: &task_manager.spawn_essential_handle(), registry: config.prometheus_registry(), telemetry: telemetry.as_ref().map(|x| x.handle()), - offchain_tx_pool_factory: OffchainTransactionPoolFactory::new(transaction_pool.clone()), })?; let import_setup = (block_import, grandpa_link, babe_link, beefy_voter_links); @@ -408,6 +411,8 @@ pub fn new_full_base::Hash>>( Block, FullClient, FullBeefyBlockImport, + BabeCreateInherentDataProviders, + FullSelectChain, >, &sc_consensus_babe::BabeLink, ), @@ -926,7 +931,7 @@ mod tests { config, None, false, - |block_import: &sc_consensus_babe::BabeBlockImport, + |block_import: &sc_consensus_babe::BabeBlockImport, babe_link: &sc_consensus_babe::BabeLink| { setup_handles = Some((block_import.clone(), babe_link.clone())); }, diff --git a/substrate/client/consensus/babe/Cargo.toml b/substrate/client/consensus/babe/Cargo.toml index 305409b80c787..6390ec840d555 100644 --- a/substrate/client/consensus/babe/Cargo.toml +++ b/substrate/client/consensus/babe/Cargo.toml @@ -45,13 +45,13 @@ sp-crypto-hashing = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +sp-timestamp = { workspace = true, default-features = true } thiserror = { workspace = true } [dev-dependencies] sc-block-builder = { workspace = true, default-features = true } sc-network-test = { workspace = true } sp-keyring = { workspace = true, default-features = true } -sp-timestamp = { workspace = true, default-features = true } sp-tracing = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } tokio = { workspace = true, default-features = true } diff --git a/substrate/client/consensus/babe/rpc/src/lib.rs b/substrate/client/consensus/babe/rpc/src/lib.rs index 338d71a432565..5b04e6eaab312 100644 --- a/substrate/client/consensus/babe/rpc/src/lib.rs +++ b/substrate/client/consensus/babe/rpc/src/lib.rs @@ -224,28 +224,30 @@ mod tests { let config = sc_consensus_babe::configuration(&*client).expect("config available"); let slot_duration = config.slot_duration(); - let (block_import, link) = - sc_consensus_babe::block_import(config.clone(), client.clone(), client.clone()) - .expect("can initialize block-import"); + let (block_import, link) = sc_consensus_babe::block_import( + config.clone(), + client.clone(), + client.clone(), + move |_, _| async move { + Ok((InherentDataProvider::from_timestamp_and_slot_duration( + 0.into(), + slot_duration, + ),)) + }, + longest_chain.clone(), + OffchainTransactionPoolFactory::new(RejectAllTxPool::default()), + ) + .expect("can initialize block-import"); let (_, babe_worker_handle) = sc_consensus_babe::import_queue(ImportQueueParams { link: link.clone(), block_import: block_import.clone(), justification_import: None, client: client.clone(), - select_chain: longest_chain.clone(), - create_inherent_data_providers: move |_, _| async move { - Ok((InherentDataProvider::from_timestamp_and_slot_duration( - 0.into(), - slot_duration, - ),)) - }, + slot_duration, spawner: &task_executor, registry: None, telemetry: None, - offchain_tx_pool_factory: OffchainTransactionPoolFactory::new( - RejectAllTxPool::default(), - ), }) .unwrap(); diff --git a/substrate/client/consensus/babe/src/lib.rs b/substrate/client/consensus/babe/src/lib.rs index 5002ff3ac9c4e..5af2e9a003d22 100644 --- a/substrate/client/consensus/babe/src/lib.rs +++ b/substrate/client/consensus/babe/src/lib.rs @@ -100,7 +100,8 @@ use sc_consensus::{ import_queue::{BasicQueue, BoxJustificationImport, DefaultImportQueue, Verifier}, }; use sc_consensus_epochs::{ - descendent_query, Epoch as EpochT, EpochChangesFor, SharedEpochChanges, ViableEpochDescriptor, + descendent_query, Epoch as EpochT, EpochChangesFor, SharedEpochChanges, ViableEpoch, + ViableEpochDescriptor, }; use sc_consensus_slots::{ check_equivocation, BackoffAuthoringBlocksStrategy, CheckedHeader, InherentDataProviderExt, @@ -116,10 +117,10 @@ use sp_blockchain::{ Result as ClientResult, }; use sp_consensus::{BlockOrigin, Environment, Error as ConsensusError, Proposer, SelectChain}; -use sp_consensus_babe::inherents::BabeInherentData; +use sp_consensus_babe::{inherents::BabeInherentData, SlotDuration}; use sp_consensus_slots::Slot; use sp_core::traits::SpawnEssentialNamed; -use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; +use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; use sp_keystore::KeystorePtr; use sp_runtime::{ generic::OpaqueDigestItemId, @@ -139,6 +140,7 @@ pub use sp_consensus_babe::{ }; pub use aux_schema::load_block_weight as block_weight; +use sp_timestamp::Timestamp; mod migration; mod verification; @@ -978,145 +980,16 @@ impl BabeLink { } /// A verifier for Babe blocks. -pub struct BabeVerifier { +pub struct BabeVerifier { client: Arc, - select_chain: SelectChain, - create_inherent_data_providers: CIDP, + slot_duration: SlotDuration, config: BabeConfiguration, epoch_changes: SharedEpochChanges, telemetry: Option, - offchain_tx_pool_factory: OffchainTransactionPoolFactory, -} - -impl BabeVerifier -where - Block: BlockT, - Client: AuxStore + HeaderBackend + HeaderMetadata + ProvideRuntimeApi, - Client::Api: BlockBuilderApi + BabeApi, - SelectChain: sp_consensus::SelectChain, - CIDP: CreateInherentDataProviders, -{ - async fn check_inherents( - &self, - block: Block, - at_hash: Block::Hash, - inherent_data: InherentData, - create_inherent_data_providers: CIDP::InherentDataProviders, - ) -> Result<(), Error> { - use sp_block_builder::CheckInherentsError; - - sp_block_builder::check_inherents_with_data( - self.client.clone(), - at_hash, - block, - &create_inherent_data_providers, - inherent_data, - ) - .await - .map_err(|e| match e { - CheckInherentsError::CreateInherentData(e) => Error::CreateInherents(e), - CheckInherentsError::Client(e) => Error::RuntimeApi(e), - CheckInherentsError::CheckInherents(e) => Error::CheckInherents(e), - CheckInherentsError::CheckInherentsUnknownError(id) => - Error::CheckInherentsUnhandled(id), - })?; - - Ok(()) - } - - async fn check_and_report_equivocation( - &self, - slot_now: Slot, - slot: Slot, - header: &Block::Header, - author: &AuthorityId, - origin: &BlockOrigin, - ) -> Result<(), Error> { - // don't report any equivocations during initial sync - // as they are most likely stale. - if *origin == BlockOrigin::NetworkInitialSync { - return Ok(()) - } - - // check if authorship of this header is an equivocation and return a proof if so. - let equivocation_proof = - match check_equivocation(&*self.client, slot_now, slot, header, author) - .map_err(Error::Client)? - { - Some(proof) => proof, - None => return Ok(()), - }; - - info!( - "Slot author {:?} is equivocating at slot {} with headers {:?} and {:?}", - author, - slot, - equivocation_proof.first_header.hash(), - equivocation_proof.second_header.hash(), - ); - - // get the best block on which we will build and send the equivocation report. - let best_hash = self - .select_chain - .best_chain() - .await - .map(|h| h.hash()) - .map_err(|e| Error::Client(e.into()))?; - - // generate a key ownership proof. we start by trying to generate the - // key ownership proof at the parent of the equivocating header, this - // will make sure that proof generation is successful since it happens - // during the on-going session (i.e. session keys are available in the - // state to be able to generate the proof). this might fail if the - // equivocation happens on the first block of the session, in which case - // its parent would be on the previous session. if generation on the - // parent header fails we try with best block as well. - let generate_key_owner_proof = |at_hash: Block::Hash| { - self.client - .runtime_api() - .generate_key_ownership_proof(at_hash, slot, equivocation_proof.offender.clone()) - .map_err(Error::RuntimeApi) - }; - - let parent_hash = *header.parent_hash(); - let key_owner_proof = match generate_key_owner_proof(parent_hash)? { - Some(proof) => proof, - None => match generate_key_owner_proof(best_hash)? { - Some(proof) => proof, - None => { - debug!( - target: LOG_TARGET, - "Equivocation offender is not part of the authority set." - ); - return Ok(()) - }, - }, - }; - - // submit equivocation report at best block. - let mut runtime_api = self.client.runtime_api(); - - // Register the offchain tx pool to be able to use it from the runtime. - runtime_api - .register_extension(self.offchain_tx_pool_factory.offchain_transaction_pool(best_hash)); - - runtime_api - .submit_report_equivocation_unsigned_extrinsic( - best_hash, - equivocation_proof, - key_owner_proof, - ) - .map_err(Error::RuntimeApi)?; - - info!(target: LOG_TARGET, "Submitted equivocation report for author {:?}", author); - - Ok(()) - } } #[async_trait::async_trait] -impl Verifier - for BabeVerifier +impl Verifier for BabeVerifier where Block: BlockT, Client: HeaderMetadata @@ -1126,9 +999,6 @@ where + Sync + AuxStore, Client::Api: BlockBuilderApi + BabeApi, - SelectChain: sp_consensus::SelectChain, - CIDP: CreateInherentDataProviders + Send + Sync, - CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, { async fn verify( &self, @@ -1168,34 +1038,18 @@ where block.header.digest().logs().len() ); - let create_inherent_data_providers = self - .create_inherent_data_providers - .create_inherent_data_providers(parent_hash, ()) - .await - .map_err(|e| Error::::Client(ConsensusError::from(e).into()))?; - - let slot_now = create_inherent_data_providers.slot(); - - let parent_header_metadata = self - .client - .header_metadata(parent_hash) - .map_err(Error::::FetchParentHeader)?; + let slot_now = Slot::from_timestamp(Timestamp::current(), self.slot_duration); let pre_digest = find_pre_digest::(&block.header)?; let (check_header, epoch_descriptor) = { - let epoch_changes = self.epoch_changes.shared_data(); - let epoch_descriptor = epoch_changes - .epoch_descriptor_for_child_of( - descendent_query(&*self.client), - &parent_hash, - parent_header_metadata.number, - pre_digest.slot(), - ) - .map_err(|e| Error::::ForkTree(Box::new(e)))? - .ok_or(Error::::FetchEpoch(parent_hash))?; - let viable_epoch = epoch_changes - .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&self.config, slot)) - .ok_or(Error::::FetchEpoch(parent_hash))?; + let (epoch_descriptor, viable_epoch) = query_epoch_changes( + &self.epoch_changes, + self.client.as_ref(), + &self.config, + number, + pre_digest.slot(), + parent_hash, + )?; // We add one to the current slot to allow for some small drift. // FIXME #1019 in the future, alter this queue to allow deferring of headers @@ -1211,56 +1065,6 @@ where match check_header { CheckedHeader::Checked(pre_header, verified_info) => { - let babe_pre_digest = verified_info - .pre_digest - .as_babe_pre_digest() - .expect("check_header always returns a pre-digest digest item; qed"); - let slot = babe_pre_digest.slot(); - - // the header is valid but let's check if there was something else already - // proposed at the same slot by the given author. if there was, we will - // report the equivocation to the runtime. - if let Err(err) = self - .check_and_report_equivocation( - slot_now, - slot, - &block.header, - &verified_info.author, - &block.origin, - ) - .await - { - warn!( - target: LOG_TARGET, - "Error checking/reporting BABE equivocation: {}", err - ); - } - - if let Some(inner_body) = block.body { - let new_block = Block::new(pre_header.clone(), inner_body); - if !block.state_action.skip_execution_checks() { - // if the body is passed through and the block was executed, - // we need to use the runtime to check that the internally-set - // timestamp in the inherents actually matches the slot set in the seal. - let mut inherent_data = create_inherent_data_providers - .create_inherent_data() - .await - .map_err(Error::::CreateInherents)?; - inherent_data.babe_replace_inherent_data(slot); - - self.check_inherents( - new_block.clone(), - parent_hash, - inherent_data, - create_inherent_data_providers, - ) - .await?; - } - - let (_, inner_body) = new_block.deconstruct(); - block.body = Some(inner_body); - } - trace!(target: LOG_TARGET, "Checked {:?}; importing.", pre_header); telemetry!( self.telemetry; @@ -1301,36 +1105,62 @@ where /// it is missing. /// /// The epoch change tree should be pruned as blocks are finalized. -pub struct BabeBlockImport { +pub struct BabeBlockImport { inner: I, client: Arc, epoch_changes: SharedEpochChanges, + create_inherent_data_providers: CIDP, config: BabeConfiguration, + // A [`SelectChain`] implementation. + // + // Used to determine the best block that should be used as basis when sending an equivocation + // report. + select_chain: SC, + // The offchain transaction pool factory. + // + // Will be used when sending equivocation reports. + offchain_tx_pool_factory: OffchainTransactionPoolFactory, } -impl Clone for BabeBlockImport { +impl Clone + for BabeBlockImport +{ fn clone(&self) -> Self { BabeBlockImport { inner: self.inner.clone(), client: self.client.clone(), epoch_changes: self.epoch_changes.clone(), config: self.config.clone(), + create_inherent_data_providers: self.create_inherent_data_providers.clone(), + select_chain: self.select_chain.clone(), + offchain_tx_pool_factory: self.offchain_tx_pool_factory.clone(), } } } -impl BabeBlockImport { +impl BabeBlockImport { fn new( client: Arc, epoch_changes: SharedEpochChanges, block_import: I, config: BabeConfiguration, + create_inherent_data_providers: CIDP, + select_chain: SC, + offchain_tx_pool_factory: OffchainTransactionPoolFactory, ) -> Self { - BabeBlockImport { client, inner: block_import, epoch_changes, config } + BabeBlockImport { + client, + inner: block_import, + epoch_changes, + config, + create_inherent_data_providers, + select_chain, + offchain_tx_pool_factory, + } } } -impl BabeBlockImport +impl BabeBlockImport where Block: BlockT, Inner: BlockImport + Send + Sync, @@ -1341,7 +1171,9 @@ where + ProvideRuntimeApi + Send + Sync, - Client::Api: BabeApi + ApiExt, + Client::Api: BlockBuilderApi + BabeApi + ApiExt, + CIDP: CreateInherentDataProviders, + SC: sp_consensus::SelectChain + 'static, { /// Import whole state after warp sync. // This function makes multiple transactions to the DB. If one of them fails we may @@ -1391,10 +1223,148 @@ where Ok(ImportResult::Imported(aux)) } + + async fn check_inherents( + &self, + block: &mut BlockImportParams, + at_hash: Block::Hash, + slot: Slot, + create_inherent_data_providers: CIDP::InherentDataProviders, + ) -> Result<(), ConsensusError> { + if block.state_action.skip_execution_checks() { + return Ok(()) + } + + if let Some(inner_body) = block.body.take() { + let new_block = Block::new(block.header.clone(), inner_body); + // if the body is passed through and the block was executed, + // we need to use the runtime to check that the internally-set + // timestamp in the inherents actually matches the slot set in the seal. + let mut inherent_data = create_inherent_data_providers + .create_inherent_data() + .await + .map_err(|e| ConsensusError::Other(Box::new(e)))?; + inherent_data.babe_replace_inherent_data(slot); + + use sp_block_builder::CheckInherentsError; + + sp_block_builder::check_inherents_with_data( + self.client.clone(), + at_hash, + new_block.clone(), + &create_inherent_data_providers, + inherent_data, + ) + .await + .map_err(|e| { + ConsensusError::Other(Box::new(match e { + CheckInherentsError::CreateInherentData(e) => + Error::::CreateInherents(e), + CheckInherentsError::Client(e) => Error::RuntimeApi(e), + CheckInherentsError::CheckInherents(e) => Error::CheckInherents(e), + CheckInherentsError::CheckInherentsUnknownError(id) => + Error::CheckInherentsUnhandled(id), + })) + })?; + let (_, inner_body) = new_block.deconstruct(); + block.body = Some(inner_body); + } + + Ok(()) + } + + async fn check_and_report_equivocation( + &self, + slot_now: Slot, + slot: Slot, + header: &Block::Header, + author: &AuthorityId, + origin: &BlockOrigin, + ) -> Result<(), Error> { + // don't report any equivocations during initial sync + // as they are most likely stale. + if *origin == BlockOrigin::NetworkInitialSync { + return Ok(()) + } + + // check if authorship of this header is an equivocation and return a proof if so. + let Some(equivocation_proof) = + check_equivocation(&*self.client, slot_now, slot, header, author) + .map_err(Error::Client)? + else { + return Ok(()) + }; + + info!( + "Slot author {:?} is equivocating at slot {} with headers {:?} and {:?}", + author, + slot, + equivocation_proof.first_header.hash(), + equivocation_proof.second_header.hash(), + ); + + // get the best block on which we will build and send the equivocation report. + let best_hash = self + .select_chain + .best_chain() + .await + .map(|h| h.hash()) + .map_err(|e| Error::Client(e.into()))?; + + // generate a key ownership proof. we start by trying to generate the + // key ownership proof at the parent of the equivocating header, this + // will make sure that proof generation is successful since it happens + // during the on-going session (i.e. session keys are available in the + // state to be able to generate the proof). this might fail if the + // equivocation happens on the first block of the session, in which case + // its parent would be on the previous session. if generation on the + // parent header fails we try with best block as well. + let generate_key_owner_proof = |at_hash: Block::Hash| { + self.client + .runtime_api() + .generate_key_ownership_proof(at_hash, slot, equivocation_proof.offender.clone()) + .map_err(Error::RuntimeApi) + }; + + let parent_hash = *header.parent_hash(); + let key_owner_proof = match generate_key_owner_proof(parent_hash)? { + Some(proof) => proof, + None => match generate_key_owner_proof(best_hash)? { + Some(proof) => proof, + None => { + debug!( + target: LOG_TARGET, + "Equivocation offender is not part of the authority set." + ); + return Ok(()) + }, + }, + }; + + // submit equivocation report at best block. + let mut runtime_api = self.client.runtime_api(); + + // Register the offchain tx pool to be able to use it from the runtime. + runtime_api + .register_extension(self.offchain_tx_pool_factory.offchain_transaction_pool(best_hash)); + + runtime_api + .submit_report_equivocation_unsigned_extrinsic( + best_hash, + equivocation_proof, + key_owner_proof, + ) + .map_err(Error::RuntimeApi)?; + + info!(target: LOG_TARGET, "Submitted equivocation report for author {:?}", author); + + Ok(()) + } } #[async_trait::async_trait] -impl BlockImport for BabeBlockImport +impl BlockImport + for BabeBlockImport where Block: BlockT, Inner: BlockImport + Send + Sync, @@ -1405,7 +1375,10 @@ where + ProvideRuntimeApi + Send + Sync, - Client::Api: BabeApi + ApiExt, + Client::Api: BlockBuilderApi + BabeApi + ApiExt, + CIDP: CreateInherentDataProviders, + CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, + SC: SelectChain + 'static, { type Error = ConsensusError; @@ -1416,6 +1389,54 @@ where let hash = block.post_hash(); let number = *block.header.number(); let info = self.client.info(); + let parent_hash = *block.header.parent_hash(); + + let create_inherent_data_providers = self + .create_inherent_data_providers + .create_inherent_data_providers(parent_hash, ()) + .await?; + + let slot_now = create_inherent_data_providers.slot(); + + let babe_pre_digest = find_pre_digest::(&block.header) + .map_err(|e| ConsensusError::Other(Box::new(e)))?; + let slot = babe_pre_digest.slot(); + + // Check inherents. + self.check_inherents(&mut block, parent_hash, slot, create_inherent_data_providers) + .await?; + + // Check for equivocation and report it to the runtime if needed. + let author = { + let viable_epoch = query_epoch_changes( + &self.epoch_changes, + self.client.as_ref(), + &self.config, + number, + slot, + parent_hash, + ) + .map_err(|e| ConsensusError::Other(babe_err(e).into()))? + .1; + match viable_epoch + .as_ref() + .authorities + .get(babe_pre_digest.authority_index() as usize) + { + Some(author) => author.0.clone(), + None => + return Err(ConsensusError::Other(Error::::SlotAuthorNotFound.into())), + } + }; + if let Err(err) = self + .check_and_report_equivocation(slot_now, slot, &block.header, &author, &block.origin) + .await + { + warn!( + target: LOG_TARGET, + "Error checking/reporting BABE equivocation: {}", err + ); + } let block_status = self .client @@ -1734,11 +1755,14 @@ where /// /// Also returns a link object used to correctly instantiate the import queue /// and background worker. -pub fn block_import( +pub fn block_import( config: BabeConfiguration, wrapped_block_import: I, client: Arc, -) -> ClientResult<(BabeBlockImport, BabeLink)> + create_inherent_data_providers: CIDP, + select_chain: SC, + offchain_tx_pool_factory: OffchainTransactionPoolFactory, +) -> ClientResult<(BabeBlockImport, BabeLink)> where Client: AuxStore + HeaderBackend @@ -1764,13 +1788,21 @@ where }; client.register_finality_action(Box::new(on_finality)); - let import = BabeBlockImport::new(client, epoch_changes, wrapped_block_import, config); + let import = BabeBlockImport::new( + client, + epoch_changes, + wrapped_block_import, + config, + create_inherent_data_providers, + select_chain, + offchain_tx_pool_factory, + ); Ok((import, link)) } /// Parameters passed to [`import_queue`]. -pub struct ImportQueueParams<'a, Block: BlockT, BI, Client, CIDP, SelectChain, Spawn> { +pub struct ImportQueueParams<'a, Block: BlockT, BI, Client, Spawn> { /// The BABE link that is created by [`block_import`]. pub link: BabeLink, /// The block import that should be wrapped. @@ -1779,26 +1811,14 @@ pub struct ImportQueueParams<'a, Block: BlockT, BI, Client, CIDP, SelectChain, S pub justification_import: Option>, /// The client to interact with the internals of the node. pub client: Arc, - /// A [`SelectChain`] implementation. - /// - /// Used to determine the best block that should be used as basis when sending an equivocation - /// report. - pub select_chain: SelectChain, - /// Used to crate the inherent data providers. - /// - /// These inherent data providers are then used to create the inherent data that is - /// passed to the `check_inherents` runtime call. - pub create_inherent_data_providers: CIDP, + /// Slot duration. + pub slot_duration: SlotDuration, /// Spawner for spawning futures. pub spawner: &'a Spawn, /// Registry for prometheus metrics. pub registry: Option<&'a Registry>, /// Optional telemetry handle to report telemetry events. pub telemetry: Option, - /// The offchain transaction pool factory. - /// - /// Will be used when sending equivocation reports. - pub offchain_tx_pool_factory: OffchainTransactionPoolFactory, } /// Start an import queue for the BABE consensus algorithm. @@ -1810,19 +1830,17 @@ pub struct ImportQueueParams<'a, Block: BlockT, BI, Client, CIDP, SelectChain, S /// /// The block import object provided must be the `BabeBlockImport` or a wrapper /// of it, otherwise crucial import logic will be omitted. -pub fn import_queue( +pub fn import_queue( ImportQueueParams { link: babe_link, block_import, justification_import, client, - select_chain, - create_inherent_data_providers, + slot_duration, spawner, registry, telemetry, - offchain_tx_pool_factory, - }: ImportQueueParams<'_, Block, BI, Client, CIDP, SelectChain, Spawn>, + }: ImportQueueParams<'_, Block, BI, Client, Spawn>, ) -> ClientResult<(DefaultImportQueue, BabeWorkerHandle)> where BI: BlockImport + Send + Sync + 'static, @@ -1834,21 +1852,16 @@ where + Sync + 'static, Client::Api: BlockBuilderApi + BabeApi + ApiExt, - SelectChain: sp_consensus::SelectChain + 'static, - CIDP: CreateInherentDataProviders + Send + Sync + 'static, - CIDP::InherentDataProviders: InherentDataProviderExt + Send + Sync, Spawn: SpawnEssentialNamed, { const HANDLE_BUFFER_SIZE: usize = 1024; let verifier = BabeVerifier { - select_chain, - create_inherent_data_providers, + slot_duration, config: babe_link.config.clone(), epoch_changes: babe_link.epoch_changes.clone(), telemetry, client: client.clone(), - offchain_tx_pool_factory, }; let (worker_tx, worker_rx) = channel(HANDLE_BUFFER_SIZE); @@ -1940,3 +1953,34 @@ where client.insert_aux(values, weight_keys.iter()) }) } + +fn query_epoch_changes( + epoch_changes: &SharedEpochChanges, + client: &Client, + config: &BabeConfiguration, + block_number: NumberFor, + slot: Slot, + parent_hash: Block::Hash, +) -> Result< + (ViableEpochDescriptor, Epoch>, ViableEpoch), + Error, +> +where + Block: BlockT, + Client: HeaderBackend + HeaderMetadata, +{ + let epoch_changes = epoch_changes.shared_data(); + let epoch_descriptor = epoch_changes + .epoch_descriptor_for_child_of( + descendent_query(client), + &parent_hash, + block_number - 1u32.into(), + slot, + ) + .map_err(|e| Error::::ForkTree(Box::new(e)))? + .ok_or(Error::::FetchEpoch(parent_hash))?; + let viable_epoch = epoch_changes + .viable_epoch(&epoch_descriptor, |slot| Epoch::genesis(&config, slot)) + .ok_or(Error::::FetchEpoch(parent_hash))?; + Ok((epoch_descriptor, viable_epoch.into_cloned())) +} diff --git a/substrate/client/consensus/babe/src/tests.rs b/substrate/client/consensus/babe/src/tests.rs index 70fb5a5529d98..e5f8c26a4488c 100644 --- a/substrate/client/consensus/babe/src/tests.rs +++ b/substrate/client/consensus/babe/src/tests.rs @@ -30,19 +30,20 @@ use sc_transaction_pool_api::RejectAllTxPool; use sp_application_crypto::key_types::BABE; use sp_consensus::{DisableProofRecording, NoNetwork as DummyOracle, Proposal}; use sp_consensus_babe::{ - inherents::InherentDataProvider, make_vrf_sign_data, AllowedSlots, AuthorityId, AuthorityPair, - Slot, + inherents::{BabeCreateInherentDataProviders, InherentDataProvider}, + make_vrf_sign_data, AllowedSlots, AuthorityId, AuthorityPair, Slot, }; use sp_consensus_slots::SlotDuration; use sp_core::crypto::Pair; +use sp_inherents::InherentData; use sp_keyring::Sr25519Keyring; use sp_keystore::{testing::MemoryKeystore, Keystore}; use sp_runtime::{ generic::{Digest, DigestItem}, traits::Block as BlockT, }; -use sp_timestamp::Timestamp; use std::{cell::RefCell, task::Poll, time::Duration}; +use substrate_test_runtime_client::DefaultTestClientBuilderExt; type Item = DigestItem; @@ -63,8 +64,15 @@ enum Stage { type Mutator = Arc; -type BabeBlockImport = - PanickingBlockImport>>; +type BabeBlockImport = PanickingBlockImport< + crate::BabeBlockImport< + TestBlock, + TestClient, + Arc, + BabeCreateInherentDataProviders, + sc_consensus::LongestChain, + >, +>; const SLOT_DURATION_MS: u64 = 1000; @@ -173,22 +181,8 @@ pub struct BabeTestNet { type TestHeader = ::Header; -type TestSelectChain = - substrate_test_runtime_client::LongestChain; - pub struct TestVerifier { - inner: BabeVerifier< - TestBlock, - PeersFullClient, - TestSelectChain, - Box< - dyn CreateInherentDataProviders< - TestBlock, - (), - InherentDataProviders = (InherentDataProvider,), - >, - >, - >, + inner: BabeVerifier, mutator: Mutator, } @@ -228,8 +222,23 @@ impl TestNetFactory for BabeTestNet { let client = client.as_client(); let config = crate::configuration(&*client).expect("config available"); - let (block_import, link) = crate::block_import(config, client.clone(), client.clone()) - .expect("can initialize block-import"); + let (_, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); + let (block_import, link) = crate::block_import( + config, + client.clone(), + client.clone(), + Arc::new(move |_, _| async { + let timestamp = sp_timestamp::InherentDataProvider::from_system_time(); + let slot = InherentDataProvider::from_timestamp_and_slot_duration( + *timestamp, + SlotDuration::from_millis(SLOT_DURATION_MS), + ); + Ok((slot, timestamp)) + }) as BabeCreateInherentDataProviders, + longest_chain, + OffchainTransactionPoolFactory::new(RejectAllTxPool::default()), + ) + .expect("can initialize block-import"); let block_import = PanickingBlockImport(block_import); @@ -243,8 +252,6 @@ impl TestNetFactory for BabeTestNet { } fn make_verifier(&self, client: PeersClient, maybe_link: &Option) -> Self::Verifier { - use substrate_test_runtime_client::DefaultTestClientBuilderExt; - let client = client.as_client(); trace!(target: LOG_TARGET, "Creating a verifier"); @@ -253,25 +260,13 @@ impl TestNetFactory for BabeTestNet { .as_ref() .expect("babe link always provided to verifier instantiation"); - let (_, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); - TestVerifier { inner: BabeVerifier { client: client.clone(), - select_chain: longest_chain, - create_inherent_data_providers: Box::new(|_, _| async { - let slot = InherentDataProvider::from_timestamp_and_slot_duration( - Timestamp::current(), - SlotDuration::from_millis(SLOT_DURATION_MS), - ); - Ok((slot,)) - }), + slot_duration: SlotDuration::from_millis(SLOT_DURATION_MS), config: data.link.config.clone(), epoch_changes: data.link.epoch_changes.clone(), telemetry: None, - offchain_tx_pool_factory: OffchainTransactionPoolFactory::new( - RejectAllTxPool::default(), - ), }, mutator: MUTATOR.with(|m| m.borrow().clone()), } @@ -430,7 +425,7 @@ async fn authoring_blocks() { } #[tokio::test] -#[should_panic(expected = "valid babe headers must contain a predigest")] +#[should_panic(expected = "importing block failed: Other(NoPreRuntimeDigest)")] async fn rejects_missing_inherent_digest() { run_one_test(|header: &mut TestHeader, stage| { let v = std::mem::take(&mut header.digest_mut().logs); diff --git a/substrate/client/consensus/babe/src/verification.rs b/substrate/client/consensus/babe/src/verification.rs index c6e4ec0c10c13..e2c37931d27fe 100644 --- a/substrate/client/consensus/babe/src/verification.rs +++ b/substrate/client/consensus/babe/src/verification.rs @@ -30,7 +30,7 @@ use sp_consensus_babe::{ CompatibleDigestItem, PreDigest, PrimaryPreDigest, SecondaryPlainPreDigest, SecondaryVRFPreDigest, }, - make_vrf_sign_data, AuthorityId, AuthorityPair, AuthoritySignature, + make_vrf_sign_data, AuthorityPair, AuthoritySignature, }; use sp_consensus_slots::Slot; use sp_core::{ @@ -69,7 +69,6 @@ pub(super) fn check_header( ) -> Result, Error> { let VerificationParams { mut header, pre_digest, slot_now, epoch } = params; - let authorities = &epoch.authorities; let pre_digest = pre_digest.map(Ok).unwrap_or_else(|| find_pre_digest::(&header))?; trace!(target: LOG_TARGET, "Checking header"); @@ -91,11 +90,6 @@ pub(super) fn check_header( return Ok(CheckedHeader::Deferred(header, pre_digest.slot())) } - let author = match authorities.get(pre_digest.authority_index() as usize) { - Some(author) => author.0.clone(), - None => return Err(babe_err(Error::SlotAuthorNotFound)), - }; - match &pre_digest { PreDigest::Primary(primary) => { debug!( @@ -134,18 +128,12 @@ pub(super) fn check_header( _ => return Err(babe_err(Error::SecondarySlotAssignmentsDisabled)), } - let info = VerifiedHeaderInfo { - pre_digest: CompatibleDigestItem::babe_pre_digest(pre_digest), - seal, - author, - }; + let info = VerifiedHeaderInfo { seal }; Ok(CheckedHeader::Checked(header, info)) } pub(super) struct VerifiedHeaderInfo { - pub(super) pre_digest: DigestItem, pub(super) seal: DigestItem, - pub(super) author: AuthorityId, } /// Check a primary slot proposal header. We validate that the given header is @@ -159,7 +147,11 @@ fn check_primary_header( epoch: &Epoch, c: (u64, u64), ) -> Result<(), Error> { - let authority_id = &epoch.authorities[pre_digest.authority_index as usize].0; + let authority_id = &epoch + .authorities + .get(pre_digest.authority_index as usize) + .ok_or(Error::SlotAuthorNotFound)? + .0; let mut epoch_index = epoch.epoch_index; if epoch.end_slot() <= pre_digest.slot { @@ -212,7 +204,11 @@ fn check_secondary_plain_header( secondary_slot_author(pre_digest.slot, &epoch.authorities, epoch.randomness) .ok_or(Error::NoSecondaryAuthorExpected)?; - let author = &epoch.authorities[pre_digest.authority_index as usize].0; + let author = &epoch + .authorities + .get(pre_digest.authority_index as usize) + .ok_or(Error::SlotAuthorNotFound)? + .0; if expected_author != author { return Err(Error::InvalidAuthor(expected_author.clone(), author.clone())) @@ -237,7 +233,11 @@ fn check_secondary_vrf_header( secondary_slot_author(pre_digest.slot, &epoch.authorities, epoch.randomness) .ok_or(Error::NoSecondaryAuthorExpected)?; - let author = &epoch.authorities[pre_digest.authority_index as usize].0; + let author = &epoch + .authorities + .get(pre_digest.authority_index as usize) + .ok_or(Error::SlotAuthorNotFound)? + .0; if expected_author != author { return Err(Error::InvalidAuthor(expected_author.clone(), author.clone())) diff --git a/substrate/primitives/consensus/babe/src/inherents.rs b/substrate/primitives/consensus/babe/src/inherents.rs index 54b7b64401673..be5aa52d03206 100644 --- a/substrate/primitives/consensus/babe/src/inherents.rs +++ b/substrate/primitives/consensus/babe/src/inherents.rs @@ -24,6 +24,17 @@ pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"babeslot"; /// The type of the BABE inherent. pub type InherentType = sp_consensus_slots::Slot; + +/// Create inherent data providers for BABE with timestamp. +#[cfg(feature = "std")] +pub type BabeCreateInherentDataProviders = std::sync::Arc< + dyn sp_inherents::CreateInherentDataProviders< + Block, + (), + InherentDataProviders = (InherentDataProvider, sp_timestamp::InherentDataProvider), + >, +>; + /// Auxiliary trait to extract BABE inherent data. pub trait BabeInherentData { /// Get BABE inherent data. diff --git a/substrate/primitives/inherents/src/client_side.rs b/substrate/primitives/inherents/src/client_side.rs index 3c299dfa4eea0..bbbb72c0eda01 100644 --- a/substrate/primitives/inherents/src/client_side.rs +++ b/substrate/primitives/inherents/src/client_side.rs @@ -15,6 +15,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +use std::sync::Arc; + use crate::{Error, InherentData, InherentIdentifier}; use sp_runtime::traits::Block as BlockT; @@ -23,9 +25,9 @@ use sp_runtime::traits::Block as BlockT; /// It is possible for the caller to provide custom arguments to the callee by setting the /// `ExtraArgs` generic parameter. /// -/// The crate already provides some convince implementations of this trait for -/// `Box` and closures. So, it should not be required to implement -/// this trait manually. +/// The crate already provides some convenience implementations of this trait for +/// `Box`, `Arc` and closures. So, +/// it should not be required to implement this trait manually. #[async_trait::async_trait] pub trait CreateInherentDataProviders: Send + Sync { /// The inherent data providers that will be created. @@ -77,6 +79,22 @@ impl } } +#[async_trait::async_trait] +impl + CreateInherentDataProviders + for Arc> +{ + type InherentDataProviders = IDPS; + + async fn create_inherent_data_providers( + &self, + parent: Block::Hash, + extra_args: ExtraArgs, + ) -> Result> { + (**self).create_inherent_data_providers(parent, extra_args).await + } +} + /// Something that provides inherent data. #[async_trait::async_trait] pub trait InherentDataProvider: Send + Sync {