diff --git a/Cargo.lock b/Cargo.lock index 0bde6c6b90931..18b4c1da40ea4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4431,7 +4431,6 @@ dependencies = [ "async-trait", "cumulus-client-collator", "cumulus-client-consensus-common", - "cumulus-client-consensus-proposer", "cumulus-client-parachain-inherent", "cumulus-primitives-aura", "cumulus-primitives-core", @@ -4463,6 +4462,7 @@ dependencies = [ "sp-consensus", "sp-consensus-aura", "sp-core 28.0.0", + "sp-externalities 0.25.0", "sp-inherents", "sp-keyring", "sp-keystore", @@ -4513,25 +4513,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "cumulus-client-consensus-proposer" -version = "0.7.0" -dependencies = [ - "anyhow", - "async-trait", - "cumulus-primitives-parachain-inherent", - "sc-basic-authorship", - "sc-block-builder", - "sc-transaction-pool-api", - "sp-api", - "sp-blockchain", - "sp-consensus", - "sp-inherents", - "sp-runtime", - "sp-state-machine", - "thiserror 1.0.65", -] - [[package]] name = "cumulus-client-consensus-relay-chain" version = "0.7.0" @@ -5166,6 +5147,7 @@ dependencies = [ "sp-keystore", "sp-runtime", "sp-timestamp", + "sp-trie", "substrate-test-client", ] @@ -5234,7 +5216,6 @@ dependencies = [ "cumulus-client-collator", "cumulus-client-consensus-aura", "cumulus-client-consensus-common", - "cumulus-client-consensus-proposer", "cumulus-client-parachain-inherent", "cumulus-client-pov-recovery", "cumulus-client-service", @@ -16154,7 +16135,6 @@ dependencies = [ "cumulus-client-collator", "cumulus-client-consensus-aura", "cumulus-client-consensus-common", - "cumulus-client-consensus-proposer", "cumulus-client-consensus-relay-chain", "cumulus-client-parachain-inherent", "cumulus-client-service", @@ -16533,7 +16513,6 @@ dependencies = [ "cumulus-client-collator", "cumulus-client-consensus-aura", "cumulus-client-consensus-common", - "cumulus-client-consensus-proposer", "cumulus-client-consensus-relay-chain", "cumulus-client-network", "cumulus-client-parachain-inherent", @@ -19818,6 +19797,7 @@ dependencies = [ "sp-core 28.0.0", "sp-inherents", "sp-runtime", + "sp-state-machine", "sp-trie", "substrate-prometheus-endpoint", "substrate-test-runtime-client", @@ -19832,10 +19812,10 @@ dependencies = [ "sp-block-builder", "sp-blockchain", "sp-core 28.0.0", + "sp-externalities 0.25.0", "sp-inherents", "sp-runtime", "sp-state-machine", - "sp-trie", "substrate-test-runtime-client", ] @@ -20287,10 +20267,12 @@ dependencies = [ "sp-consensus-babe", "sp-consensus-slots", "sp-core 28.0.0", + "sp-externalities 0.25.0", "sp-inherents", "sp-keystore", "sp-runtime", "sp-timestamp", + "sp-trie", "substrate-prometheus-endpoint", "substrate-test-runtime-client", "substrate-test-runtime-transaction-pool", @@ -20342,6 +20324,7 @@ dependencies = [ "sp-inherents", "sp-runtime", "sp-state-machine", + "sp-trie", "substrate-test-runtime-client", ] @@ -23075,9 +23058,12 @@ dependencies = [ "async-trait", "futures", "log", + "sp-api", + "sp-externalities 0.25.0", "sp-inherents", "sp-runtime", "sp-state-machine", + "sp-trie", "thiserror 1.0.65", ] diff --git a/Cargo.toml b/Cargo.toml index ccbd5497268e4..c83e57fdae3a3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -67,7 +67,6 @@ members = [ "cumulus/client/collator", "cumulus/client/consensus/aura", "cumulus/client/consensus/common", - "cumulus/client/consensus/proposer", "cumulus/client/consensus/relay-chain", "cumulus/client/network", "cumulus/client/parachain-inherent", @@ -749,7 +748,6 @@ cumulus-client-cli = { path = "cumulus/client/cli", default-features = false } cumulus-client-collator = { path = "cumulus/client/collator", default-features = false } cumulus-client-consensus-aura = { path = "cumulus/client/consensus/aura", default-features = false } cumulus-client-consensus-common = { path = "cumulus/client/consensus/common", default-features = false } -cumulus-client-consensus-proposer = { path = "cumulus/client/consensus/proposer", default-features = false } cumulus-client-consensus-relay-chain = { path = "cumulus/client/consensus/relay-chain", default-features = false } cumulus-client-network = { path = "cumulus/client/network", default-features = false } cumulus-client-parachain-inherent = { path = "cumulus/client/parachain-inherent", default-features = false } diff --git a/cumulus/client/consensus/aura/Cargo.toml b/cumulus/client/consensus/aura/Cargo.toml index 28fa9e32eae77..c64aa5455b323 100644 --- a/cumulus/client/consensus/aura/Cargo.toml +++ b/cumulus/client/consensus/aura/Cargo.toml @@ -37,6 +37,7 @@ sp-blockchain = { workspace = true, default-features = true } sp-consensus = { workspace = true, default-features = true } sp-consensus-aura = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } @@ -47,7 +48,6 @@ sp-trie = { workspace = true, default-features = true } # Cumulus cumulus-client-collator = { workspace = true, default-features = true } cumulus-client-consensus-common = { workspace = true, default-features = true } -cumulus-client-consensus-proposer = { workspace = true, default-features = true } cumulus-client-parachain-inherent = { workspace = true, default-features = true } cumulus-primitives-aura = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true, default-features = true } diff --git a/cumulus/client/consensus/aura/src/collator.rs b/cumulus/client/consensus/aura/src/collator.rs index 457be6e632df3..3999352322e20 100644 --- a/cumulus/client/consensus/aura/src/collator.rs +++ b/cumulus/client/consensus/aura/src/collator.rs @@ -30,22 +30,25 @@ use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterfa use cumulus_client_consensus_common::{ self as consensus_common, ParachainBlockImportMarker, ParachainCandidate, }; -use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_client_parachain_inherent::{ParachainInherentData, ParachainInherentDataProvider}; use cumulus_primitives_core::{ relay_chain::Hash as PHash, DigestItem, ParachainBlockData, PersistedValidationData, }; use cumulus_relay_chain_interface::RelayChainInterface; +use sc_client_api::BackendTransaction; +use sp_consensus::{Environment, ProposeArgs, Proposer}; use polkadot_node_primitives::{Collation, MaybeCompressedPoV}; use polkadot_primitives::{Header as PHeader, Id as ParaId}; +use sp_externalities::Extensions; +use sp_trie::proof_size_extension::ProofSizeExt; use crate::collators::RelayParentData; use futures::prelude::*; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, StateAction}; use sc_consensus_aura::standalone as aura_internal; use sc_network_types::PeerId; -use sp_api::ProvideRuntimeApi; +use sp_api::{ProofRecorder, ProvideRuntimeApi, StorageProof}; use sp_application_crypto::AppPublic; use sp_consensus::BlockOrigin; use sp_consensus_aura::{AuraApi, Slot, SlotDuration}; @@ -61,7 +64,7 @@ use sp_timestamp::Timestamp; use std::{error::Error, time::Duration}; /// Parameters for instantiating a [`Collator`]. -pub struct Params { +pub struct Params { /// A builder for inherent data builders. pub create_inherent_data_providers: CIDP, /// The block import handle. @@ -74,40 +77,83 @@ pub struct Params { pub collator_peer_id: PeerId, /// The identifier of the parachain within the relay-chain. pub para_id: ParaId, - /// The block proposer used for building blocks. - pub proposer: Proposer, + /// The proposer used for building blocks. + pub proposer: PF, /// The collator service used for bundling proposals into collations and announcing /// to the network. pub collator_service: CS, } +/// Parameters for [`Collator::build_block_and_import`]. +pub struct BuildBlockAndImportParams<'a, Block: BlockT, P: Pair> { + /// The parent header to build on top of. + pub parent_header: &'a Block::Header, + /// The slot claim for this block. + pub slot_claim: &'a SlotClaim, + /// Additional pre-digest items to include. + pub additional_pre_digest: Vec, + /// Parachain-specific inherent data. + pub parachain_inherent_data: ParachainInherentData, + /// Other inherent data (timestamp, etc.). + pub extra_inherent_data: InherentData, + /// Maximum duration to spend on block proposal. + pub proposal_duration: Duration, + /// Maximum PoV size in bytes. + pub max_pov_size: usize, + /// Optional [`ProofRecorder`] to use. + /// + /// If not set, a default recorder will be used internally and [`ProofSizeExt`] will be + /// registered. + pub storage_proof_recorder: Option>, + /// Extra extensions to forward to the block production. + pub extra_extensions: Extensions, +} + +/// Result of [`Collator::build_block_and_import`]. +pub struct BuiltBlock { + /// The block that was built. + pub block: Block, + /// The proof that was recorded while building the block. + pub proof: StorageProof, + /// The transaction resulting from building the block. + /// + /// This contains all the state changes. + pub backend_transaction: BackendTransaction>, +} + +impl From> for ParachainCandidate { + fn from(built: BuiltBlock) -> Self { + Self { block: built.block, proof: built.proof } + } +} + /// A utility struct for writing collation logic that makes use of Aura entirely /// or in part. See module docs for more details. -pub struct Collator { +pub struct Collator { create_inherent_data_providers: CIDP, block_import: BI, relay_client: RClient, keystore: KeystorePtr, para_id: ParaId, - proposer: Proposer, + proposer: PF, collator_service: CS, _marker: std::marker::PhantomData<(Block, Box)>, } -impl Collator +impl Collator where Block: BlockT, RClient: RelayChainInterface, CIDP: CreateInherentDataProviders + 'static, BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, - Proposer: ProposerInterface, + PF: Environment, CS: CollatorServiceInterface, P: Pair, P::Public: AppPublic + Member, P::Signature: TryFrom> + Member + Codec, { /// Instantiate a new instance of the `Aura` manager. - pub fn new(params: Params) -> Self { + pub fn new(params: Params) -> Self { Collator { create_inherent_data_providers: params.create_inherent_data_providers, block_import: params.block_import, @@ -191,41 +237,67 @@ where .await } - /// Build and import a parachain block on the given parent header, using the given slot claim. + /// Build and import a parachain block using the given parameters. pub async fn build_block_and_import( &mut self, - parent_header: &Block::Header, - slot_claim: &SlotClaim, - additional_pre_digest: impl Into>>, - inherent_data: (ParachainInherentData, InherentData), - proposal_duration: Duration, - max_pov_size: usize, - ) -> Result>, Box> { - let mut digest = additional_pre_digest.into().unwrap_or_default(); - digest.push(slot_claim.pre_digest.clone()); + mut params: BuildBlockAndImportParams<'_, Block, P>, + ) -> Result>, Box> { + let mut digest = params.additional_pre_digest; + digest.push(params.slot_claim.pre_digest.clone()); - let maybe_proposal = self + // Create the proposer using the factory + let proposer = self .proposer - .propose( - &parent_header, - &inherent_data.0, - inherent_data.1, - Digest { logs: digest }, - proposal_duration, - Some(max_pov_size), - ) + .init(¶ms.parent_header) .await .map_err(|e| Box::new(e) as Box)?; - let proposal = match maybe_proposal { - None => return Ok(None), - Some(p) => p, + // Prepare inherent data - merge parachain inherent data with other inherent data + let mut inherent_data_combined = params.extra_inherent_data; + params + .parachain_inherent_data + .provide_inherent_data(&mut inherent_data_combined) + .await + .map_err(|e| Box::new(e) as Box)?; + + let recorder_passed = params.storage_proof_recorder.is_some(); + let storage_proof_recorder = params.storage_proof_recorder.unwrap_or_default(); + let proof_size_ext_registered = + params.extra_extensions.is_registered(ProofSizeExt::type_id()); + + if !proof_size_ext_registered { + params + .extra_extensions + .register(ProofSizeExt::new(storage_proof_recorder.clone())); + } else if proof_size_ext_registered && !recorder_passed { + return Err( + Box::from("`ProofSizeExt` registered, but no `storage_proof_recorder` provided. This is a bug.") + as Box + ) + } + + // Create proposal arguments + let propose_args = ProposeArgs { + inherent_data: inherent_data_combined, + inherent_digests: Digest { logs: digest }, + max_duration: params.proposal_duration, + block_size_limit: Some(params.max_pov_size), + extra_extensions: params.extra_extensions, + storage_proof_recorder: Some(storage_proof_recorder.clone()), }; + // Propose the block + let proposal = proposer + .propose(propose_args) + .await + .map_err(|e| Box::new(e) as Box)?; + + let backend_transaction = proposal.storage_changes.transaction.clone(); + let sealed_importable = seal::<_, P>( proposal.block, proposal.storage_changes, - &slot_claim.author_pub, + ¶ms.slot_claim.author_pub, &self.keystore, ) .map_err(|e| e as Box)?; @@ -244,7 +316,9 @@ where .map_err(|e| Box::new(e) as Box) .await?; - Ok(Some(ParachainCandidate { block, proof: proposal.proof })) + let proof = storage_proof_recorder.drain_storage_proof(); + + Ok(Some(BuiltBlock { block, proof, backend_transaction })) } /// Propose, seal, import a block and packaging it into a collation. @@ -265,21 +339,24 @@ where max_pov_size: usize, ) -> Result)>, Box> { let maybe_candidate = self - .build_block_and_import( + .build_block_and_import(BuildBlockAndImportParams { parent_header, slot_claim, - additional_pre_digest, - inherent_data, + additional_pre_digest: additional_pre_digest.into().unwrap_or_default(), + parachain_inherent_data: inherent_data.0, + extra_inherent_data: inherent_data.1, proposal_duration, max_pov_size, - ) + storage_proof_recorder: None, + extra_extensions: Default::default(), + }) .await?; let Some(candidate) = maybe_candidate else { return Ok(None) }; let hash = candidate.block.header().hash(); if let Some((collation, block_data)) = - self.collator_service.build_collation(parent_header, hash, candidate) + self.collator_service.build_collation(parent_header, hash, candidate.into()) { block_data.log_size_info(); diff --git a/cumulus/client/consensus/aura/src/collators/basic.rs b/cumulus/client/consensus/aura/src/collators/basic.rs index c10c42e233215..1f99e2f6e5cc0 100644 --- a/cumulus/client/consensus/aura/src/collators/basic.rs +++ b/cumulus/client/consensus/aura/src/collators/basic.rs @@ -28,9 +28,9 @@ use cumulus_client_collator::{ relay_chain_driven::CollationRequest, service::ServiceInterface as CollatorServiceInterface, }; use cumulus_client_consensus_common::ParachainBlockImportMarker; -use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_core::{relay_chain::BlockId as RBlockId, CollectCollationInfo}; use cumulus_relay_chain_interface::RelayChainInterface; +use sp_consensus::Environment; use polkadot_node_primitives::CollationResult; use polkadot_overseer::Handle as OverseerHandle; @@ -77,7 +77,7 @@ pub struct Params { pub overseer_handle: OverseerHandle, /// The length of slots in the relay chain. pub relay_chain_slot_duration: Duration, - /// The underlying block proposer this should call into. + /// The proposer for building blocks. pub proposer: Proposer, /// The generic collator service used to plug into this consensus engine. pub collator_service: CS, @@ -109,7 +109,7 @@ where CIDP: CreateInherentDataProviders + Send + 'static, CIDP::InherentDataProviders: Send, BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, - Proposer: ProposerInterface + Send + Sync + 'static, + Proposer: Environment + Clone + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + 'static, P: Pair, P::Public: AppPublic + Member + Codec, diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 8dd695b7bd233..303b5268095c5 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -35,10 +35,10 @@ use codec::{Codec, Encode}; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; -use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_aura::AuraUnincludedSegmentApi; use cumulus_primitives_core::{CollectCollationInfo, PersistedValidationData}; use cumulus_relay_chain_interface::RelayChainInterface; +use sp_consensus::Environment; use polkadot_node_primitives::SubmitCollationParams; use polkadot_node_subsystem::messages::CollationGenerationMessage; @@ -66,7 +66,7 @@ use sp_timestamp::Timestamp; use std::{path::PathBuf, sync::Arc, time::Duration}; /// Parameters for [`run`]. -pub struct Params { +pub struct Params { /// Inherent data providers. Only non-consensus inherent data should be provided, i.e. /// the timestamp, slot, and paras inherents should be omitted, as they are set by this /// collator. @@ -93,8 +93,8 @@ pub struct Params { pub overseer_handle: OverseerHandle, /// The length of slots in the relay chain. pub relay_chain_slot_duration: Duration, - /// The underlying block proposer this should call into. - pub proposer: Proposer, + /// The proposer for building blocks. + pub proposer: ProposerFactory, /// The generic collator service used to plug into this consensus engine. pub collator_service: CS, /// The amount of time to spend authoring each block. @@ -171,7 +171,7 @@ where CIDP: CreateInherentDataProviders + 'static, CIDP::InherentDataProviders: Send, BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, - Proposer: ProposerInterface + Send + Sync + 'static, + Proposer: Environment + Clone + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + 'static, CHP: consensus_common::ValidationCodeHashProvider + Send + 'static, P: Pair + Send + Sync + 'static, @@ -223,7 +223,7 @@ where CIDP: CreateInherentDataProviders + 'static, CIDP::InherentDataProviders: Send, BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, - Proposer: ProposerInterface + Send + Sync + 'static, + Proposer: Environment + Clone + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + 'static, CHP: consensus_common::ValidationCodeHashProvider + Send + 'static, P: Pair + Send + Sync + 'static, diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 5fca9bde5bf84..4a58ed81426af 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -19,7 +19,7 @@ use codec::{Codec, Encode}; use super::CollatorMessage; use crate::{ - collator as collator_util, + collator::{self as collator_util, BuildBlockAndImportParams}, collators::{ check_validation_code_or_log, slot_based::{ @@ -32,7 +32,6 @@ use crate::{ }; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; -use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_aura::{AuraUnincludedSegmentApi, Slot}; use cumulus_primitives_core::{ extract_relay_parent, rpsr_digest, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, @@ -50,6 +49,7 @@ use sc_network_types::PeerId; use sp_api::ProvideRuntimeApi; use sp_application_crypto::AppPublic; use sp_blockchain::HeaderBackend; +use sp_consensus::Environment; use sp_consensus_aura::AuraApi; use sp_core::crypto::Pair; use sp_inherents::CreateInherentDataProviders; @@ -89,7 +89,7 @@ pub struct BuilderTaskParams< pub collator_peer_id: PeerId, /// The para's ID. pub para_id: ParaId, - /// The underlying block proposer this should call into. + /// The proposer for building blocks. pub proposer: Proposer, /// The generic collator service used to plug into this consensus engine. pub collator_service: CS, @@ -134,7 +134,7 @@ where CIDP: CreateInherentDataProviders + 'static, CIDP::InherentDataProviders: Send, BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, - Proposer: ProposerInterface + Send + Sync + 'static, + Proposer: Environment + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + 'static, CHP: consensus_common::ValidationCodeHashProvider + Send + 'static, P: Pair + Send + Sync + 'static, @@ -425,14 +425,19 @@ where }; let Ok(Some(candidate)) = collator - .build_block_and_import( - &parent_header, - &slot_claim, - Some(vec![CumulusDigestItem::CoreInfo(core.core_info()).to_digest_item()]), - (parachain_inherent_data, other_inherent_data), - adjusted_authoring_duration, - allowed_pov_size, - ) + .build_block_and_import(BuildBlockAndImportParams { + parent_header: &parent_header, + slot_claim: &slot_claim, + additional_pre_digest: vec![ + CumulusDigestItem::CoreInfo(core.core_info()).to_digest_item() + ], + parachain_inherent_data, + extra_inherent_data: other_inherent_data, + proposal_duration: adjusted_authoring_duration, + max_pov_size: allowed_pov_size, + storage_proof_recorder: None, + extra_extensions: Default::default(), + }) .await else { tracing::error!(target: crate::LOG_TARGET, "Unable to build block at slot."); @@ -449,7 +454,7 @@ where if let Err(err) = collator_sender.unbounded_send(CollatorMessage { relay_parent, parent_header: parent_header.clone(), - parachain_candidate: candidate, + parachain_candidate: candidate.into(), validation_code_hash, core_index: core.core_index(), max_pov_size: validation_data.max_pov_size, diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs index eac2bb9760d87..39e0bbdb41d73 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs @@ -72,7 +72,6 @@ use codec::Codec; use consensus_common::ParachainCandidate; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; -use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_primitives_aura::AuraUnincludedSegmentApi; use cumulus_primitives_core::RelayParentOffsetApi; use cumulus_relay_chain_interface::RelayChainInterface; @@ -87,6 +86,7 @@ use sc_utils::mpsc::tracing_unbounded; use sp_api::ProvideRuntimeApi; use sp_application_crypto::AppPublic; use sp_blockchain::HeaderBackend; +use sp_consensus::Environment; use sp_consensus_aura::AuraApi; use sp_core::{crypto::Pair, traits::SpawnEssentialNamed}; use sp_inherents::CreateInherentDataProviders; @@ -127,7 +127,7 @@ pub struct Params + 'static, CIDP::InherentDataProviders: Send, BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, - Proposer: ProposerInterface + Send + Sync + 'static, + Proposer: Environment + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + Clone + 'static, CHP: consensus_common::ValidationCodeHashProvider + Send + 'static, P: Pair + Send + Sync + 'static, diff --git a/cumulus/client/consensus/proposer/Cargo.toml b/cumulus/client/consensus/proposer/Cargo.toml deleted file mode 100644 index b98c77b3f891b..0000000000000 --- a/cumulus/client/consensus/proposer/Cargo.toml +++ /dev/null @@ -1,31 +0,0 @@ -[package] -name = "cumulus-client-consensus-proposer" -description = "A Substrate `Proposer` for building parachain blocks" -version = "0.7.0" -authors.workspace = true -edition.workspace = true -license = "GPL-3.0-or-later WITH Classpath-exception-2.0" -homepage.workspace = true -repository.workspace = true - -[lints] -workspace = true - -[dependencies] -anyhow = { workspace = true, default-features = true } -async-trait = { workspace = true } -thiserror = { workspace = true } - -# Substrate -sc-basic-authorship = { workspace = true } -sc-block-builder = { workspace = true } -sc-transaction-pool-api = { workspace = true } -sp-api = { workspace = true, default-features = true } -sp-blockchain = { workspace = true, default-features = true } -sp-consensus = { workspace = true, default-features = true } -sp-inherents = { workspace = true, default-features = true } -sp-runtime = { workspace = true, default-features = true } -sp-state-machine = { workspace = true, default-features = true } - -# Cumulus -cumulus-primitives-parachain-inherent = { workspace = true, default-features = true } diff --git a/cumulus/client/consensus/proposer/src/lib.rs b/cumulus/client/consensus/proposer/src/lib.rs deleted file mode 100644 index af6894fd5da72..0000000000000 --- a/cumulus/client/consensus/proposer/src/lib.rs +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright (C) Parity Technologies (UK) Ltd. -// This file is part of Cumulus. -// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 - -// Cumulus is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Cumulus is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Cumulus. If not, see . - -//! The Cumulus [`ProposerInterface`] is an extension of the Substrate [`ProposerFactory`] -//! for creating new parachain blocks. -//! -//! This utility is designed to be composed within any collator consensus algorithm. - -use async_trait::async_trait; -use cumulus_primitives_parachain_inherent::ParachainInherentData; -use sc_basic_authorship::{ProposeArgs, ProposerFactory}; -use sc_block_builder::BlockBuilderApi; -use sc_transaction_pool_api::TransactionPool; -use sp_api::{ApiExt, CallApiAt, ProvideRuntimeApi}; -use sp_blockchain::HeaderBackend; -use sp_consensus::{EnableProofRecording, Environment, Proposal}; -use sp_inherents::{InherentData, InherentDataProvider}; -use sp_runtime::{traits::Block as BlockT, Digest}; -use sp_state_machine::StorageProof; -use std::{fmt::Debug, time::Duration}; - -/// Errors that can occur when proposing a parachain block. -#[derive(thiserror::Error, Debug)] -#[error(transparent)] -pub struct Error { - inner: anyhow::Error, -} - -impl Error { - /// Create an error tied to the creation of a proposer. - pub fn proposer_creation(err: impl Into) -> Self { - Error { inner: err.into().context("Proposer Creation") } - } - - /// Create an error tied to the proposing logic itself. - pub fn proposing(err: impl Into) -> Self { - Error { inner: err.into().context("Proposing") } - } -} - -/// A type alias for easily referring to the type of a proposal produced by a specific -/// [`ProposerInterface`]. -pub type ProposalOf = Proposal; - -/// An interface for proposers. -#[async_trait] -pub trait ProposerInterface { - /// Propose a collation using the supplied `InherentData` and the provided - /// `ParachainInherentData`. - /// - /// Also specify any required inherent digests, the maximum proposal duration, - /// and the block size limit in bytes. See the documentation on - /// [`sp_consensus::Proposer::propose`] for more details on how to interpret these parameters. - /// - /// The `InherentData` and `Digest` are left deliberately general in order to accommodate - /// all possible collator selection algorithms or inherent creation mechanisms, - /// while the `ParachainInherentData` is made explicit so it will be constructed appropriately. - /// - /// If the `InherentData` passed into this function already has a `ParachainInherentData`, - /// this should throw an error. - async fn propose( - &mut self, - parent_header: &Block::Header, - paras_inherent_data: &ParachainInherentData, - other_inherent_data: InherentData, - inherent_digests: Digest, - max_duration: Duration, - block_size_limit: Option, - ) -> Result>, Error>; -} - -#[async_trait] -impl ProposerInterface for ProposerFactory -where - A: TransactionPool + 'static, - C: HeaderBackend + ProvideRuntimeApi + CallApiAt + Send + Sync + 'static, - C::Api: ApiExt + BlockBuilderApi, - Block: sp_runtime::traits::Block, -{ - async fn propose( - &mut self, - parent_header: &Block::Header, - paras_inherent_data: &ParachainInherentData, - other_inherent_data: InherentData, - inherent_digests: Digest, - max_duration: Duration, - block_size_limit: Option, - ) -> Result>, Error> { - let proposer = self - .init(parent_header) - .await - .map_err(|e| Error::proposer_creation(anyhow::Error::new(e)))?; - - let mut inherent_data = other_inherent_data; - paras_inherent_data - .provide_inherent_data(&mut inherent_data) - .await - .map_err(|e| Error::proposing(anyhow::Error::new(e)))?; - - proposer - .propose_block(ProposeArgs { - inherent_data, - inherent_digests, - max_duration, - block_size_limit, - ignored_nodes_by_proof_recording: None, - }) - .await - .map(Some) - .map_err(|e| Error::proposing(anyhow::Error::new(e)).into()) - } -} diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index dcfa496486dc0..d8c1b3086ede1 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -148,6 +148,7 @@ fn build_block_with_witness( let cumulus_test_client::BlockBuilderAndSupportData { mut block_builder, persisted_validation_data, + .. } = client.init_block_builder_with_pre_digests(Some(validation_data), sproof_builder, pre_digests); extra_extrinsics.into_iter().for_each(|e| block_builder.push(e).unwrap()); @@ -201,6 +202,7 @@ fn build_multiple_blocks_with_witness( let cumulus_test_client::BlockBuilderAndSupportData { mut block_builder, persisted_validation_data: p_v_data, + proof_recorder, } = client.init_block_builder_with_ignored_nodes( parent_head.hash(), Some(validation_data.clone()), @@ -242,11 +244,11 @@ fn build_multiple_blocks_with_witness( }) .unwrap(); - ignored_nodes.extend(IgnoredNodes::from_storage_proof::( - &built_block.proof.clone().unwrap(), - )); + let new_proof = proof_recorder.drain_storage_proof(); + + ignored_nodes.extend(IgnoredNodes::from_storage_proof::(&new_proof)); ignored_nodes.extend(IgnoredNodes::from_memory_db(built_block.storage_changes.transaction)); - proof = StorageProof::merge([proof, built_block.proof.unwrap()]); + proof = StorageProof::merge([proof, new_proof]); parent_head = built_block.block.header.clone(); diff --git a/cumulus/polkadot-omni-node/lib/Cargo.toml b/cumulus/polkadot-omni-node/lib/Cargo.toml index 4a346e221f4ce..81c0fc772ed3a 100644 --- a/cumulus/polkadot-omni-node/lib/Cargo.toml +++ b/cumulus/polkadot-omni-node/lib/Cargo.toml @@ -97,7 +97,6 @@ cumulus-client-cli = { workspace = true, default-features = true } cumulus-client-collator = { workspace = true, default-features = true } cumulus-client-consensus-aura = { workspace = true, default-features = true } cumulus-client-consensus-common = { workspace = true, default-features = true } -cumulus-client-consensus-proposer = { workspace = true, default-features = true } cumulus-client-consensus-relay-chain = { workspace = true, default-features = true } cumulus-client-parachain-inherent = { workspace = true, default-features = true } cumulus-client-service = { workspace = true, default-features = true } diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs index bddfa87f32a2d..bbdce78d0121d 100644 --- a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs +++ b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs @@ -45,7 +45,6 @@ use cumulus_client_consensus_aura::{ }, equivocation_import_queue::Verifier as EquivocationVerifier, }; -use cumulus_client_consensus_proposer::ProposerInterface; use cumulus_client_consensus_relay_chain::Verifier as RelayChainVerifier; use cumulus_client_parachain_inherent::MockValidationDataInherentDataProvider; use cumulus_client_service::CollatorSybilResistance; @@ -69,6 +68,7 @@ use sc_telemetry::TelemetryHandle; use sc_transaction_pool::TransactionPoolHandle; use sc_transaction_pool_api::OffchainTransactionPoolFactory; use sp_api::ProvideRuntimeApi; +use sp_consensus::Environment; use sp_core::traits::SpawnEssentialNamed; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; @@ -283,7 +283,8 @@ where // The aura digest provider will provide digests that match the provided timestamp data. // Without this, the AURA parachain runtimes complain about slot mismatches. - let aura_digest_provider = AuraConsensusDataProvider::new_with_slot_duration(slot_duration); + let aura_digest_provider = + AuraConsensusDataProvider::::new_with_slot_duration(slot_duration); let para_id = Self::parachain_id(&client, &config).ok_or("Failed to retrieve the parachain id")?; @@ -521,7 +522,7 @@ where CIDP: CreateInherentDataProviders + 'static, CIDP::InherentDataProviders: Send, CHP: cumulus_client_consensus_common::ValidationCodeHashProvider + Send + 'static, - Proposer: ProposerInterface + Send + Sync + 'static, + Proposer: Environment + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + Clone + 'static, Spawner: SpawnEssentialNamed + Clone + 'static, { @@ -574,7 +575,7 @@ where node_extra_args: NodeExtraArgs, block_import_handle: SlotBasedBlockImportHandle, ) -> Result<(), Error> { - let proposer = sc_basic_authorship::ProposerFactory::with_proof_recording( + let proposer = sc_basic_authorship::ProposerFactory::new( task_manager.spawn_handle(), client.clone(), transaction_pool, @@ -701,7 +702,7 @@ where node_extra_args: NodeExtraArgs, _: (), ) -> Result<(), Error> { - let proposer = sc_basic_authorship::ProposerFactory::with_proof_recording( + let proposer = sc_basic_authorship::ProposerFactory::new( task_manager.spawn_handle(), client.clone(), transaction_pool, diff --git a/cumulus/test/client/Cargo.toml b/cumulus/test/client/Cargo.toml index f64ee832ace3b..143f665551c26 100644 --- a/cumulus/test/client/Cargo.toml +++ b/cumulus/test/client/Cargo.toml @@ -32,6 +32,7 @@ sp-keyring = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-timestamp = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } substrate-test-client = { workspace = true } # Polkadot diff --git a/cumulus/test/client/src/block_builder.rs b/cumulus/test/client/src/block_builder.rs index d1b8a6886130f..6672a19e35257 100644 --- a/cumulus/test/client/src/block_builder.rs +++ b/cumulus/test/client/src/block_builder.rs @@ -25,11 +25,13 @@ use sc_block_builder::BlockBuilderBuilder; use sp_api::{ProofRecorder, ProofRecorderIgnoredNodes, ProvideRuntimeApi}; use sp_consensus_aura::{AuraApi, Slot}; use sp_runtime::{traits::Header as HeaderT, Digest, DigestItem}; +use sp_trie::proof_size_extension::ProofSizeExt; /// A struct containing a block builder and support data required to build test scenarios. pub struct BlockBuilderAndSupportData<'a> { pub block_builder: sc_block_builder::BlockBuilder<'a, Block, Client>, pub persisted_validation_data: PersistedValidationData, + pub proof_recorder: ProofRecorder, } /// An extension for the Cumulus test client to init a block builder. @@ -143,14 +145,16 @@ fn init_block_builder( .collect::>(), }; + let proof_recorder = + ProofRecorder::::with_ignored_nodes(ignored_nodes.unwrap_or_default()); + let mut block_builder = BlockBuilderBuilder::new(client) .on_parent_block(at) .fetch_parent_block_number(client) .unwrap() - .with_proof_recorder(Some(ProofRecorder::::with_ignored_nodes( - ignored_nodes.unwrap_or_default(), - ))) + .with_proof_recorder(Some(proof_recorder.clone())) .with_inherent_digests(pre_digests) + .with_extra_extensions(ProofSizeExt::new(proof_recorder.clone())) .build() .expect("Creates new block builder for test runtime"); @@ -186,7 +190,11 @@ fn init_block_builder( .into_iter() .for_each(|ext| block_builder.push(ext).expect("Pushes inherent")); - BlockBuilderAndSupportData { block_builder, persisted_validation_data: validation_data } + BlockBuilderAndSupportData { + block_builder, + persisted_validation_data: validation_data, + proof_recorder, + } } impl InitBlockBuilder for Client { @@ -274,11 +282,11 @@ pub trait BuildParachainBlockData { impl<'a> BuildParachainBlockData for sc_block_builder::BlockBuilder<'a, Block, Client> { fn build_parachain_block(self, parent_state_root: Hash) -> ParachainBlockData { + let proof_recorder = self.proof_recorder().expect("Proof recorder is always set"); let built_block = self.build().expect("Builds the block"); - let storage_proof = built_block - .proof - .expect("We enabled proof recording before.") + let storage_proof = proof_recorder + .drain_storage_proof() .into_compact_proof::<
::Hashing>(parent_state_root) .expect("Creates the compact proof"); diff --git a/cumulus/test/service/Cargo.toml b/cumulus/test/service/Cargo.toml index 4a427f4ad6593..80adf90b46f42 100644 --- a/cumulus/test/service/Cargo.toml +++ b/cumulus/test/service/Cargo.toml @@ -74,7 +74,6 @@ cumulus-client-cli = { workspace = true, default-features = true } cumulus-client-collator = { workspace = true, default-features = true } cumulus-client-consensus-aura = { workspace = true, default-features = true } cumulus-client-consensus-common = { workspace = true, default-features = true } -cumulus-client-consensus-proposer = { workspace = true, default-features = true } cumulus-client-parachain-inherent = { workspace = true, default-features = true } cumulus-client-pov-recovery = { workspace = true, default-features = true } cumulus-client-service = { workspace = true, default-features = true } diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 8b98b99908f83..a8b3a028ce2bb 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -433,7 +433,7 @@ where let collator_peer_id = network.local_peer_id(); if let Some(collator_key) = collator_key { - let proposer = sc_basic_authorship::ProposerFactory::with_proof_recording( + let proposer = sc_basic_authorship::ProposerFactory::new( task_manager.spawn_handle(), client.clone(), transaction_pool.clone(), diff --git a/prdoc/pr_9947.prdoc b/prdoc/pr_9947.prdoc new file mode 100644 index 0000000000000..5d5d6751141e2 --- /dev/null +++ b/prdoc/pr_9947.prdoc @@ -0,0 +1,36 @@ +title: 'Proposer/BlockBuilder: Accept proof recorder & extensions' +doc: +- audience: Node Dev + description: |- + This pull request fundamentally changes how `Proposer` and `BlockBuilder` are handling the proof recorder and extensions. Before this pull request the proof recorder was initialized by the `BlockBuilder` and the proposer statically enabled proof recording or disabled it. With this pull request the proof recorder is passed from the the caller down to the block builder. This also moves the responsibility for extracting the final storage proof to the caller and is not part of the block builder logic anymore. The extensions are now also configurable by the caller and are not longer "guessed" by the block builder. + + This pull request also remvoes the `cumulus-client-proposer` crate as it is not really required anymore. +crates: +- name: cumulus-client-consensus-aura + bump: major +- name: cumulus-pallet-parachain-system + bump: major +- name: polkadot-omni-node-lib + bump: major +- name: sc-basic-authorship + bump: major +- name: sc-block-builder + bump: major +- name: sc-consensus-aura + bump: major +- name: sc-consensus-babe + bump: major +- name: sc-consensus-manual-seal + bump: major +- name: sc-consensus-pow + bump: major +- name: sc-consensus-slots + bump: major +- name: sp-consensus + bump: major +- name: sp-externalities + bump: major +- name: frame-benchmarking-cli + bump: major +- name: polkadot-sdk + bump: major diff --git a/substrate/bin/node/bench/src/construct.rs b/substrate/bin/node/bench/src/construct.rs index 4f78097254eaf..5e38a585f9c1b 100644 --- a/substrate/bin/node/bench/src/construct.rs +++ b/substrate/bin/node/bench/src/construct.rs @@ -33,7 +33,7 @@ use sc_transaction_pool_api::{ ImportNotificationStream, PoolStatus, ReadyTransactions, TransactionFor, TransactionSource, TransactionStatusStreamFor, TxHash, TxInvalidityReportMap, }; -use sp_consensus::{Environment, Proposer}; +use sp_consensus::{Environment, ProposeArgs, Proposer}; use sp_inherents::InherentDataProvider; use sp_runtime::OpaqueExtrinsic; @@ -146,10 +146,12 @@ impl core::Benchmark for ConstructionBenchmark { .expect("Create inherent data failed"); let _block = futures::executor::block_on(Proposer::propose( proposer, - inherent_data, - Default::default(), - std::time::Duration::from_secs(20), - None, + ProposeArgs { + inherent_data, + inherent_digests: Default::default(), + max_duration: std::time::Duration::from_secs(20), + ..Default::default() + }, )) .map(|r| r.block) .expect("Proposing failed"); diff --git a/substrate/bin/node/cli/src/service.rs b/substrate/bin/node/cli/src/service.rs index 89a5ef151df30..0ec655e3fa6ac 100644 --- a/substrate/bin/node/cli/src/service.rs +++ b/substrate/bin/node/cli/src/service.rs @@ -1018,10 +1018,14 @@ mod tests { let proposer = proposer_factory.init(&parent_header).await.unwrap(); Proposer::propose( proposer, - inherent_data, - digest, - std::time::Duration::from_secs(1), - None, + sp_consensus::ProposeArgs { + inherent_data, + inherent_digests: digest, + max_duration: std::time::Duration::from_secs(1), + block_size_limit: None, + storage_proof_recorder: None, + extra_extensions: Default::default(), + }, ) .await }) diff --git a/substrate/client/basic-authorship/Cargo.toml b/substrate/client/basic-authorship/Cargo.toml index 59f0c3a645c58..d72e5bac42a94 100644 --- a/substrate/client/basic-authorship/Cargo.toml +++ b/substrate/client/basic-authorship/Cargo.toml @@ -30,6 +30,7 @@ sp-consensus = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } +sp-state-machine = { workspace = true, default-features = true } sp-trie = { workspace = true, default-features = true } [dev-dependencies] diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs index c06a69c9c7f92..799a20a07dc63 100644 --- a/substrate/client/basic-authorship/src/basic_authorship.rs +++ b/substrate/client/basic-authorship/src/basic_authorship.rs @@ -32,17 +32,16 @@ use sc_block_builder::{BlockBuilderApi, BlockBuilderBuilder}; use sc_proposer_metrics::{EndProposingReason, MetricsLink as PrometheusMetrics}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_INFO}; use sc_transaction_pool_api::{InPoolTransaction, TransactionPool, TxInvalidityReportMap}; -use sp_api::{ApiExt, CallApiAt, ProofRecorder, ProvideRuntimeApi}; +use sp_api::{ApiExt, CallApiAt, ProvideRuntimeApi}; use sp_blockchain::{ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed, HeaderBackend}; -use sp_consensus::{DisableProofRecording, EnableProofRecording, ProofRecording, Proposal}; +use sp_consensus::{Proposal, ProposeArgs}; use sp_core::traits::SpawnNamed; use sp_inherents::InherentData; use sp_runtime::{ traits::{BlakeTwo256, Block as BlockT, Hash as HashT, Header as HeaderT}, - Digest, ExtrinsicInclusionMode, Percent, SaturatedConversion, + ExtrinsicInclusionMode, Percent, SaturatedConversion, }; -use sp_trie::recorder::IgnoredNodes; -use std::{marker::PhantomData, pin::Pin, sync::Arc, time}; +use std::{pin::Pin, sync::Arc, time}; /// Default block size limit in bytes used by [`Proposer`]. /// @@ -58,7 +57,7 @@ const DEFAULT_SOFT_DEADLINE_PERCENT: Percent = Percent::from_percent(50); const LOG_TARGET: &'static str = "basic-authorship"; /// [`Proposer`] factory. -pub struct ProposerFactory { +pub struct ProposerFactory { spawn_handle: Box, /// The client instance. client: Arc, @@ -80,13 +79,9 @@ pub struct ProposerFactory { /// transactions which exhaust resources, we will conclude that the block is full. soft_deadline_percent: Percent, telemetry: Option, - /// When estimating the block size, should the proof be included? - include_proof_in_block_size_estimation: bool, - /// phantom member to pin the `ProofRecording` type. - _phantom: PhantomData, } -impl Clone for ProposerFactory { +impl Clone for ProposerFactory { fn clone(&self) -> Self { Self { spawn_handle: self.spawn_handle.clone(), @@ -96,17 +91,12 @@ impl Clone for ProposerFactory { default_block_size_limit: self.default_block_size_limit, soft_deadline_percent: self.soft_deadline_percent, telemetry: self.telemetry.clone(), - include_proof_in_block_size_estimation: self.include_proof_in_block_size_estimation, - _phantom: self._phantom, } } } -impl ProposerFactory { +impl ProposerFactory { /// Create a new proposer factory. - /// - /// Proof recording will be disabled when using proposers built by this instance to build - /// blocks. pub fn new( spawn_handle: impl SpawnNamed + 'static, client: Arc, @@ -122,19 +112,11 @@ impl ProposerFactory { soft_deadline_percent: DEFAULT_SOFT_DEADLINE_PERCENT, telemetry, client, - include_proof_in_block_size_estimation: false, - _phantom: PhantomData, } } -} -impl ProposerFactory { - /// Create a new proposer factory with proof recording enabled. - /// - /// Each proposer created by this instance will record a proof while building a block. - /// - /// This will also include the proof into the estimation of the block size. This can be disabled - /// by calling [`ProposerFactory::disable_proof_in_block_size_estimation`]. + /// Deprecated, use [`Self::new`] instead. + #[deprecated(note = "Proof recording is now handled differently. Use `new` instead.")] pub fn with_proof_recording( spawn_handle: impl SpawnNamed + 'static, client: Arc, @@ -142,26 +124,9 @@ impl ProposerFactory { prometheus: Option<&PrometheusRegistry>, telemetry: Option, ) -> Self { - ProposerFactory { - client, - spawn_handle: Box::new(spawn_handle), - transaction_pool, - metrics: PrometheusMetrics::new(prometheus), - default_block_size_limit: DEFAULT_BLOCK_SIZE_LIMIT, - soft_deadline_percent: DEFAULT_SOFT_DEADLINE_PERCENT, - telemetry, - include_proof_in_block_size_estimation: true, - _phantom: PhantomData, - } + Self::new(spawn_handle, client, transaction_pool, prometheus, telemetry) } - /// Disable the proof inclusion when estimating the block size. - pub fn disable_proof_in_block_size_estimation(&mut self) { - self.include_proof_in_block_size_estimation = false; - } -} - -impl ProposerFactory { /// Set the default block size limit in bytes. /// /// The default value for the block size limit is: @@ -190,7 +155,7 @@ impl ProposerFactory { } } -impl ProposerFactory +impl ProposerFactory where A: TransactionPool + 'static, Block: BlockT, @@ -201,7 +166,7 @@ where &mut self, parent_header: &::Header, now: Box time::Instant + Send + Sync>, - ) -> Proposer { + ) -> Proposer { let parent_hash = parent_header.hash(); info!( @@ -210,7 +175,7 @@ where parent_header.number() ); - let proposer = Proposer::<_, _, _, PR> { + let proposer = Proposer::<_, _, _> { spawn_handle: self.spawn_handle.clone(), client: self.client.clone(), parent_hash, @@ -221,24 +186,21 @@ where default_block_size_limit: self.default_block_size_limit, soft_deadline_percent: self.soft_deadline_percent, telemetry: self.telemetry.clone(), - _phantom: PhantomData, - include_proof_in_block_size_estimation: self.include_proof_in_block_size_estimation, }; proposer } } -impl sp_consensus::Environment for ProposerFactory +impl sp_consensus::Environment for ProposerFactory where A: TransactionPool + 'static, Block: BlockT, C: HeaderBackend + ProvideRuntimeApi + CallApiAt + Send + Sync + 'static, C::Api: ApiExt + BlockBuilderApi, - PR: ProofRecording, { type CreateProposer = future::Ready>; - type Proposer = Proposer; + type Proposer = Proposer; type Error = sp_blockchain::Error; fn init(&mut self, parent_header: &::Header) -> Self::CreateProposer { @@ -247,7 +209,7 @@ where } /// The proposer logic. -pub struct Proposer { +pub struct Proposer { spawn_handle: Box, client: Arc, parent_hash: Block::Hash, @@ -256,72 +218,22 @@ pub struct Proposer { now: Box time::Instant + Send + Sync>, metrics: PrometheusMetrics, default_block_size_limit: usize, - include_proof_in_block_size_estimation: bool, soft_deadline_percent: Percent, telemetry: Option, - _phantom: PhantomData, } -impl sp_consensus::Proposer for Proposer +impl sp_consensus::Proposer for Proposer where A: TransactionPool + 'static, Block: BlockT, C: HeaderBackend + ProvideRuntimeApi + CallApiAt + Send + Sync + 'static, C::Api: ApiExt + BlockBuilderApi, - PR: ProofRecording, { - type Proposal = - Pin, Self::Error>> + Send>>; + type Proposal = Pin, Self::Error>> + Send>>; type Error = sp_blockchain::Error; - type ProofRecording = PR; - type Proof = PR::Proof; - - fn propose( - self, - inherent_data: InherentData, - inherent_digests: Digest, - max_duration: time::Duration, - block_size_limit: Option, - ) -> Self::Proposal { - self.propose_block(ProposeArgs { - inherent_data, - inherent_digests, - max_duration, - block_size_limit, - ignored_nodes_by_proof_recording: None, - }) - .boxed() - } -} -/// Arguments for [`Proposer::propose_block`]. -pub struct ProposeArgs { - /// The inherent data to pass to the block production. - pub inherent_data: InherentData, - /// The inherent digests to include in the produced block. - pub inherent_digests: Digest, - /// Max duration for building the block. - pub max_duration: time::Duration, - /// Optional size limit for the produced block. - /// - /// When set, block production ends before hitting this limit. The limit includes the storage - /// proof, when proof recording is activated. - pub block_size_limit: Option, - /// Trie nodes that should not be recorded. - /// - /// Only applies when proof recording is enabled. - pub ignored_nodes_by_proof_recording: Option>, -} - -impl Default for ProposeArgs { - fn default() -> Self { - Self { - inherent_data: Default::default(), - inherent_digests: Default::default(), - max_duration: Default::default(), - block_size_limit: None, - ignored_nodes_by_proof_recording: None, - } + fn propose(self, args: ProposeArgs) -> Self::Proposal { + Self::propose_block(self, args).boxed() } } @@ -330,19 +242,18 @@ impl Default for ProposeArgs { /// It allows us to increase block utilization. const MAX_SKIPPED_TRANSACTIONS: usize = 8; -impl Proposer +impl Proposer where A: TransactionPool + 'static, Block: BlockT, C: HeaderBackend + ProvideRuntimeApi + CallApiAt + Send + Sync + 'static, C::Api: ApiExt + BlockBuilderApi, - PR: ProofRecording, { /// Propose a new block. pub async fn propose_block( self, args: ProposeArgs, - ) -> Result, sp_blockchain::Error> { + ) -> Result, sp_blockchain::Error> { let (tx, rx) = oneshot::channel(); let spawn_handle = self.spawn_handle.clone(); @@ -367,26 +278,26 @@ where async fn propose_with( self, - ProposeArgs { + args: ProposeArgs, + ) -> Result, sp_blockchain::Error> { + let ProposeArgs { inherent_data, inherent_digests, max_duration, block_size_limit, - ignored_nodes_by_proof_recording, - }: ProposeArgs, - ) -> Result, sp_blockchain::Error> { + storage_proof_recorder, + extra_extensions, + } = args; // leave some time for evaluation and block finalization (10%) let deadline = (self.now)() + max_duration - max_duration / 10; let block_timer = time::Instant::now(); + let mut block_builder = BlockBuilderBuilder::new(&*self.client) .on_parent_block(self.parent_hash) .with_parent_block_number(self.parent_number) - .with_proof_recorder(PR::ENABLED.then(|| { - ProofRecorder::::with_ignored_nodes( - ignored_nodes_by_proof_recording.unwrap_or_default(), - ) - })) + .with_proof_recorder(storage_proof_recorder) .with_inherent_digests(inherent_digests) + .with_extra_extensions(extra_extensions) .build()?; self.apply_inherents(&mut block_builder, inherent_data)?; @@ -397,14 +308,11 @@ where self.apply_extrinsics(&mut block_builder, deadline, block_size_limit).await?, ExtrinsicInclusionMode::OnlyInherents => EndProposingReason::TransactionForbidden, }; - let (block, storage_changes, proof) = block_builder.build()?.into_inner(); + let (block, storage_changes) = block_builder.build()?.into_inner(); let block_took = block_timer.elapsed(); - let proof = - PR::into_proof(proof).map_err(|e| sp_blockchain::Error::Application(Box::new(e)))?; - self.print_summary(&block, end_reason, block_took, block_timer.elapsed()); - Ok(Proposal { block, proof, storage_changes }) + Ok(Proposal { block, storage_changes }) } /// Apply all inherents to the block. @@ -513,8 +421,7 @@ where let pending_tx_data = (**pending_tx.data()).clone(); let pending_tx_hash = pending_tx.hash().clone(); - let block_size = - block_builder.estimate_block_size(self.include_proof_in_block_size_estimation); + let block_size = block_builder.estimate_block_size(); if block_size + pending_tx_data.encoded_size() > block_size_limit { pending_iterator.report_invalid(&pending_tx); limit_hit_reason = Some(EndProposingReason::HitBlockSizeLimit); @@ -1041,13 +948,8 @@ mod tests { // Without a block limit we should include all of them assert_eq!(block.extrinsics().len(), extrinsics_num); - let mut proposer_factory = ProposerFactory::with_proof_recording( - spawner.clone(), - client.clone(), - txpool.clone(), - None, - None, - ); + let mut proposer_factory = + ProposerFactory::new(spawner.clone(), client.clone(), txpool.clone(), None, None); let proposer = block_on(proposer_factory.init(&genesis_header)).unwrap(); @@ -1060,11 +962,12 @@ mod tests { .enable_proof_recording() .build() .unwrap(); - builder.estimate_block_size(true) + extrinsics[0].encoded_size() + builder.estimate_block_size() + extrinsics[0].encoded_size() }; let block = block_on(proposer.propose_block(ProposeArgs { max_duration: deadline, block_size_limit: Some(block_limit), + storage_proof_recorder: Some(Default::default()), ..Default::default() })) .map(|r| r.block) diff --git a/substrate/client/basic-authorship/src/lib.rs b/substrate/client/basic-authorship/src/lib.rs index b08b66e23aa13..bd1fe9f34bb86 100644 --- a/substrate/client/basic-authorship/src/lib.rs +++ b/substrate/client/basic-authorship/src/lib.rs @@ -22,7 +22,7 @@ //! //! ``` //! # use sc_basic_authorship::ProposerFactory; -//! # use sp_consensus::{Environment, Proposer}; +//! # use sp_consensus::{Environment, Proposer, ProposeArgs}; //! # use sp_runtime::generic::BlockId; //! # use std::{sync::Arc, time::Duration}; //! # use substrate_test_runtime_client::{ @@ -60,10 +60,10 @@ //! // The proposer will grab transactions from the transaction pool, and put them into the block. //! let future = Proposer::propose( //! proposer, -//! Default::default(), -//! Default::default(), -//! Duration::from_secs(2), -//! None, +//! ProposeArgs { +//! max_duration: Duration::from_secs(2), +//! ..Default::default() +//! } //! ); //! //! // We wait until the proposition is performed. @@ -73,6 +73,5 @@ mod basic_authorship; -pub use crate::basic_authorship::{ - ProposeArgs, Proposer, ProposerFactory, DEFAULT_BLOCK_SIZE_LIMIT, -}; +pub use crate::basic_authorship::{Proposer, ProposerFactory, DEFAULT_BLOCK_SIZE_LIMIT}; +pub use sp_consensus::ProposeArgs; diff --git a/substrate/client/block-builder/Cargo.toml b/substrate/client/block-builder/Cargo.toml index 85bc395179e59..8afc30e89b6c8 100644 --- a/substrate/client/block-builder/Cargo.toml +++ b/substrate/client/block-builder/Cargo.toml @@ -21,9 +21,9 @@ sp-api = { workspace = true, default-features = true } sp-block-builder = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } -sp-trie = { workspace = true, default-features = true } [dev-dependencies] sp-state-machine = { workspace = true, default-features = true } diff --git a/substrate/client/block-builder/src/lib.rs b/substrate/client/block-builder/src/lib.rs index 00b82382f5428..a06f38d618f66 100644 --- a/substrate/client/block-builder/src/lib.rs +++ b/substrate/client/block-builder/src/lib.rs @@ -30,10 +30,11 @@ use codec::Encode; use sp_api::{ ApiExt, ApiRef, CallApiAt, Core, ProofRecorder, ProvideRuntimeApi, StorageChanges, - StorageProof, TransactionOutcome, + TransactionOutcome, }; use sp_blockchain::{ApplyExtrinsicFailed, Error, HeaderBackend}; use sp_core::traits::CallContext; +use sp_externalities::Extensions; use sp_runtime::{ legacy, traits::{Block as BlockT, Hash, HashingFor, Header as HeaderT, NumberFor, One}, @@ -42,7 +43,6 @@ use sp_runtime::{ use std::marker::PhantomData; pub use sp_block_builder::BlockBuilder as BlockBuilderApi; -use sp_trie::proof_size_extension::ProofSizeExt; /// A builder for creating an instance of [`BlockBuilder`]. pub struct BlockBuilderBuilder<'a, B, C> { @@ -103,6 +103,7 @@ where inherent_digests: Default::default(), parent_block: self.parent_block, parent_number, + extra_extensions: Extensions::new(), }) } @@ -120,6 +121,7 @@ where inherent_digests: Default::default(), parent_block: self.parent_block, parent_number, + extra_extensions: Extensions::new(), } } } @@ -134,24 +136,22 @@ pub struct BlockBuilderBuilderStage2<'a, B: BlockT, C> { inherent_digests: Digest, parent_block: B::Hash, parent_number: NumberFor, + extra_extensions: Extensions, } impl<'a, B: BlockT, C> BlockBuilderBuilderStage2<'a, B, C> { - /// Enable proof recording for the block builder. - pub fn enable_proof_recording(mut self) -> Self { - self.proof_recorder = Some(Default::default()); - self - } - - /// Enable/disable proof recording for the block builder. - pub fn with_proof_recording(mut self, enable: bool) -> Self { - self.proof_recorder = enable.then(|| Default::default()); + /// Enable/disable proof recording for the block builder using the given proof recorder. + pub fn with_proof_recorder( + mut self, + proof_recorder: impl Into>>, + ) -> Self { + self.proof_recorder = proof_recorder.into(); self } - /// Enable/disable proof recording for the block builder using the given proof recorder. - pub fn with_proof_recorder(mut self, proof_recorder: Option>) -> Self { - self.proof_recorder = proof_recorder; + /// Enable proof recording. + pub fn enable_proof_recording(mut self) -> Self { + self.proof_recorder = Some(Default::default()); self } @@ -161,6 +161,12 @@ impl<'a, B: BlockT, C> BlockBuilderBuilderStage2<'a, B, C> { self } + /// Set the extra extensions to be registered with the runtime API during block building. + pub fn with_extra_extensions(mut self, extra_extensions: impl Into) -> Self { + self.extra_extensions = extra_extensions.into(); + self + } + /// Create the instance of the [`BlockBuilder`]. pub fn build(self) -> Result, Error> where @@ -173,6 +179,7 @@ impl<'a, B: BlockT, C> BlockBuilderBuilderStage2<'a, B, C> { self.parent_number, self.proof_recorder, self.inherent_digests, + self.extra_extensions, ) } } @@ -180,22 +187,18 @@ impl<'a, B: BlockT, C> BlockBuilderBuilderStage2<'a, B, C> { /// A block that was build by [`BlockBuilder`] plus some additional data. /// /// This additional data includes the `storage_changes`, these changes can be applied to the -/// backend to get the state of the block. Furthermore an optional `proof` is included which -/// can be used to proof that the build block contains the expected data. The `proof` will -/// only be set when proof recording was activated. +/// backend to get the state of the block. pub struct BuiltBlock { /// The actual block that was build. pub block: Block, /// The changes that need to be applied to the backend to get the state of the build block. pub storage_changes: StorageChanges, - /// An optional proof that was recorded while building the block. - pub proof: Option, } impl BuiltBlock { /// Convert into the inner values. - pub fn into_inner(self) -> (Block, StorageChanges, Option) { - (self.block, self.storage_changes, self.proof) + pub fn into_inner(self) -> (Block, StorageChanges) { + (self.block, self.storage_changes) } } @@ -229,6 +232,7 @@ where parent_number: NumberFor, proof_recorder: Option>, inherent_digests: Digest, + extra_extensions: Extensions, ) -> Result { let header = <::Header as HeaderT>::new( parent_number + One::one(), @@ -243,10 +247,13 @@ where let mut api = call_api_at.runtime_api(); if let Some(recorder) = proof_recorder { - api.record_proof_with_recorder(recorder.clone()); - api.register_extension(ProofSizeExt::new(recorder)); + api.record_proof_with_recorder(recorder); } + extra_extensions.into_extensions().for_each(|e| { + api.register_extension(e); + }); + api.set_call_context(CallContext::Onchain); let core_version = api @@ -316,7 +323,7 @@ where /// Returns the build `Block`, the changes to the storage and an optional `StorageProof` /// supplied by `self.api`, combined as [`BuiltBlock`]. /// The storage proof will be `Some(_)` when proof recording was enabled. - pub fn build(mut self) -> Result, Error> { + pub fn build(self) -> Result, Error> { let header = self.api.finalize_block(self.parent_hash)?; debug_assert_eq!( @@ -327,8 +334,6 @@ where ), ); - let proof = self.api.extract_proof(); - let state = self.call_api_at.state_at(self.parent_hash)?; let storage_changes = self @@ -336,11 +341,7 @@ where .into_storage_changes(&state, self.parent_hash) .map_err(sp_blockchain::Error::StorageChanges)?; - Ok(BuiltBlock { - block: ::new(header, self.extrinsics), - storage_changes, - proof, - }) + Ok(BuiltBlock { block: ::new(header, self.extrinsics), storage_changes }) } /// Create the inherents for the block. @@ -364,14 +365,15 @@ where /// /// If `include_proof` is `true`, the estimated size of the storage proof will be added /// to the estimation. - pub fn estimate_block_size(&self, include_proof: bool) -> usize { + pub fn estimate_block_size(&self) -> usize { let size = self.estimated_header_size + self.extrinsics.encoded_size(); - if include_proof { - size + self.api.proof_recorder().map(|pr| pr.estimate_encoded_size()).unwrap_or(0) - } else { - size - } + size + self.api.proof_recorder().map_or(0, |pr| pr.estimate_encoded_size()) + } + + /// Returns the [`ProofRecorder`] set for the block building. + pub fn proof_recorder(&self) -> Option> { + self.api.proof_recorder() } } @@ -382,7 +384,8 @@ mod tests { use sp_core::Blake2Hasher; use sp_state_machine::Backend; use substrate_test_runtime_client::{ - runtime::ExtrinsicBuilder, DefaultTestClientBuilderExt, TestClientBuilderExt, + runtime::{Block, ExtrinsicBuilder}, + DefaultTestClientBuilderExt, TestClientBuilderExt, }; #[test] @@ -392,16 +395,18 @@ mod tests { let genesis_hash = client.info().best_hash; - let block = BlockBuilderBuilder::new(&client) + let storage_proof_recorder = ProofRecorder::::default(); + + BlockBuilderBuilder::new(&client) .on_parent_block(genesis_hash) .with_parent_block_number(0) - .enable_proof_recording() + .with_proof_recorder(storage_proof_recorder.clone()) .build() .unwrap() .build() .unwrap(); - let proof = block.proof.expect("Proof is build on request"); + let proof = storage_proof_recorder.drain_storage_proof(); let genesis_state_root = client.header(genesis_hash).unwrap().unwrap().state_root; let backend = @@ -420,42 +425,48 @@ mod tests { let client = builder.build(); let genesis_hash = client.info().best_hash; + let proof_recorder = ProofRecorder::::default(); + let mut block_builder = BlockBuilderBuilder::new(&client) .on_parent_block(genesis_hash) .with_parent_block_number(0) - .enable_proof_recording() + .with_proof_recorder(proof_recorder.clone()) .build() .unwrap(); block_builder.push(ExtrinsicBuilder::new_read_and_panic(8).build()).unwrap_err(); - let block = block_builder.build().unwrap(); + block_builder.build().unwrap(); - let proof_with_panic = block.proof.expect("Proof is build on request").encoded_size(); + let proof_with_panic = proof_recorder.drain_storage_proof().encoded_size(); + + let proof_recorder = ProofRecorder::::default(); let mut block_builder = BlockBuilderBuilder::new(&client) .on_parent_block(genesis_hash) .with_parent_block_number(0) - .enable_proof_recording() + .with_proof_recorder(proof_recorder.clone()) .build() .unwrap(); block_builder.push(ExtrinsicBuilder::new_read(8).build()).unwrap(); - let block = block_builder.build().unwrap(); + block_builder.build().unwrap(); + + let proof_without_panic = proof_recorder.drain_storage_proof().encoded_size(); - let proof_without_panic = block.proof.expect("Proof is build on request").encoded_size(); + let proof_recorder = ProofRecorder::::default(); - let block = BlockBuilderBuilder::new(&client) + BlockBuilderBuilder::new(&client) .on_parent_block(genesis_hash) .with_parent_block_number(0) - .enable_proof_recording() + .with_proof_recorder(proof_recorder.clone()) .build() .unwrap() .build() .unwrap(); - let proof_empty_block = block.proof.expect("Proof is build on request").encoded_size(); + let proof_empty_block = proof_recorder.drain_storage_proof().encoded_size(); // Ensure that we rolled back the changes of the panicked transaction. assert!(proof_without_panic > proof_with_panic); diff --git a/substrate/client/consensus/aura/src/lib.rs b/substrate/client/consensus/aura/src/lib.rs index 6e8b6828e2513..fd8ae3d660de4 100644 --- a/substrate/client/consensus/aura/src/lib.rs +++ b/substrate/client/consensus/aura/src/lib.rs @@ -554,15 +554,11 @@ mod tests { use sc_keystore::LocalKeystore; use sc_network_test::{Block as TestBlock, *}; use sp_application_crypto::{key_types::AURA, AppCrypto}; - use sp_consensus::{DisableProofRecording, NoNetwork as DummyOracle, Proposal}; + use sp_consensus::{NoNetwork as DummyOracle, Proposal, ProposeArgs}; use sp_consensus_aura::sr25519::AuthorityPair; - use sp_inherents::InherentData; use sp_keyring::sr25519::Keyring; use sp_keystore::Keystore; - use sp_runtime::{ - traits::{Block as BlockT, Header as _}, - Digest, - }; + use sp_runtime::traits::{Block as BlockT, Header as _}; use sp_timestamp::Timestamp; use std::{ task::Poll, @@ -592,31 +588,21 @@ mod tests { impl Proposer for DummyProposer { type Error = Error; - type Proposal = future::Ready, Error>>; - type ProofRecording = DisableProofRecording; - type Proof = (); - - fn propose( - self, - _: InherentData, - digests: Digest, - _: Duration, - _: Option, - ) -> Self::Proposal { + type Proposal = future::Ready, Error>>; + + fn propose(self, args: ProposeArgs) -> Self::Proposal { let r = BlockBuilderBuilder::new(&*self.0) .on_parent_block(self.0.chain_info().best_hash) .fetch_parent_block_number(&*self.0) .unwrap() - .with_inherent_digests(digests) + .with_inherent_digests(args.inherent_digests) .build() .unwrap() .build(); - future::ready(r.map(|b| Proposal { - block: b.block, - proof: (), - storage_changes: b.storage_changes, - })) + future::ready( + r.map(|b| Proposal { block: b.block, storage_changes: b.storage_changes }), + ) } } @@ -864,7 +850,7 @@ mod tests { let head = client.expect_header(client.info().genesis_hash).unwrap(); - let res = worker + let block = worker .on_slot(SlotInfo { slot: 0.into(), ends_at: Instant::now() + Duration::from_secs(100), @@ -872,11 +858,12 @@ mod tests { duration: Duration::from_millis(1000), chain_head: head, block_size_limit: None, + storage_proof_recorder: None, }) .await .unwrap(); // The returned block should be imported and we should be able to get its header by now. - assert!(client.header(res.block.hash()).unwrap().is_some()); + assert!(client.header(block.hash()).unwrap().is_some()); } } diff --git a/substrate/client/consensus/babe/src/tests.rs b/substrate/client/consensus/babe/src/tests.rs index e5f8c26a4488c..c116acf3e83e1 100644 --- a/substrate/client/consensus/babe/src/tests.rs +++ b/substrate/client/consensus/babe/src/tests.rs @@ -28,21 +28,20 @@ use sc_consensus_slots::BackoffAuthoringOnFinalizedHeadLagging; use sc_network_test::{Block as TestBlock, *}; use sc_transaction_pool_api::RejectAllTxPool; use sp_application_crypto::key_types::BABE; -use sp_consensus::{DisableProofRecording, NoNetwork as DummyOracle, Proposal}; +use sp_consensus::{NoNetwork as DummyOracle, Proposal, ProposeArgs}; use sp_consensus_babe::{ inherents::{BabeCreateInherentDataProviders, InherentDataProvider}, make_vrf_sign_data, AllowedSlots, AuthorityId, AuthorityPair, Slot, }; use sp_consensus_slots::SlotDuration; use sp_core::crypto::Pair; -use sp_inherents::InherentData; use sp_keyring::Sr25519Keyring; use sp_keystore::{testing::MemoryKeystore, Keystore}; use sp_runtime::{ generic::{Digest, DigestItem}, traits::Block as BlockT, }; -use std::{cell::RefCell, task::Poll, time::Duration}; +use std::{cell::RefCell, task::Poll}; use substrate_test_runtime_client::DefaultTestClientBuilderExt; type Item = DigestItem; @@ -105,7 +104,7 @@ impl DummyProposer { fn propose_with( &mut self, pre_digests: Digest, - ) -> future::Ready, Error>> { + ) -> future::Ready, Error>> { let block_builder = BlockBuilderBuilder::new(&*self.factory.client) .on_parent_block(self.parent_hash) .fetch_parent_block_number(&*self.factory.client) @@ -122,24 +121,16 @@ impl DummyProposer { // mutate the block header according to the mutator. (self.factory.mutator)(&mut block.header, Stage::PreSeal); - future::ready(Ok(Proposal { block, proof: (), storage_changes: Default::default() })) + future::ready(Ok(Proposal { block, storage_changes: Default::default() })) } } impl Proposer for DummyProposer { type Error = Error; - type Proposal = future::Ready, Error>>; - type ProofRecording = DisableProofRecording; - type Proof = (); + type Proposal = future::Ready, Error>>; - fn propose( - mut self, - _: InherentData, - pre_digests: Digest, - _: Duration, - _: Option, - ) -> Self::Proposal { - self.propose_with(pre_digests) + fn propose(mut self, args: ProposeArgs) -> Self::Proposal { + self.propose_with(args.inherent_digests) } } diff --git a/substrate/client/consensus/manual-seal/Cargo.toml b/substrate/client/consensus/manual-seal/Cargo.toml index 4d232f7256cb7..27477390dafb8 100644 --- a/substrate/client/consensus/manual-seal/Cargo.toml +++ b/substrate/client/consensus/manual-seal/Cargo.toml @@ -16,7 +16,6 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] -assert_matches = { workspace = true } async-trait = { workspace = true } codec = { workspace = true, default-features = true } futures = { workspace = true } @@ -39,13 +38,16 @@ sp-consensus-aura = { workspace = true, default-features = true } sp-consensus-babe = { workspace = true, default-features = true } sp-consensus-slots = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-keystore = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-timestamp = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } thiserror = { workspace = true } [dev-dependencies] +assert_matches = { workspace = true } sc-basic-authorship = { workspace = true, default-features = true } substrate-test-runtime-client = { workspace = true } substrate-test-runtime-transaction-pool = { workspace = true } diff --git a/substrate/client/consensus/manual-seal/src/consensus.rs b/substrate/client/consensus/manual-seal/src/consensus.rs index 2cc2b902b1ce9..93177624c4986 100644 --- a/substrate/client/consensus/manual-seal/src/consensus.rs +++ b/substrate/client/consensus/manual-seal/src/consensus.rs @@ -20,6 +20,7 @@ use super::Error; use sc_consensus::BlockImportParams; +use sp_api::StorageProof; use sp_inherents::InherentData; use sp_runtime::{traits::Block as BlockT, Digest}; @@ -30,9 +31,6 @@ pub mod timestamp; /// Consensus data provider, manual seal uses this trait object for authoring blocks valid /// for any runtime. pub trait ConsensusDataProvider: Send + Sync { - /// The proof type. - type Proof; - /// Attempt to create a consensus digest. fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result; @@ -42,6 +40,6 @@ pub trait ConsensusDataProvider: Send + Sync { parent: &B::Header, params: &mut BlockImportParams, inherents: &InherentData, - proof: Self::Proof, + proof: StorageProof, ) -> Result<(), Error>; } diff --git a/substrate/client/consensus/manual-seal/src/consensus/aura.rs b/substrate/client/consensus/manual-seal/src/consensus/aura.rs index 1d66ab20e40f3..c0006c4ca4bc6 100644 --- a/substrate/client/consensus/manual-seal/src/consensus/aura.rs +++ b/substrate/client/consensus/manual-seal/src/consensus/aura.rs @@ -22,7 +22,7 @@ use crate::{ConsensusDataProvider, Error}; use sc_client_api::{AuxStore, UsageProvider}; use sc_consensus::BlockImportParams; -use sp_api::ProvideRuntimeApi; +use sp_api::{ProvideRuntimeApi, StorageProof}; use sp_consensus_aura::{ digests::CompatibleDigestItem, sr25519::{AuthorityId, AuthoritySignature}, @@ -36,14 +36,14 @@ use std::{marker::PhantomData, sync::Arc}; /// Consensus data provider for Aura. This allows to use manual-seal driven nodes to author valid /// AURA blocks. It will inspect incoming [`InherentData`] and look for included timestamps. Based /// on these timestamps, the [`AuraConsensusDataProvider`] will emit fitting digest items. -pub struct AuraConsensusDataProvider { +pub struct AuraConsensusDataProvider { // slot duration slot_duration: SlotDuration, // phantom data for required generics - _phantom: PhantomData<(B, P)>, + _phantom: PhantomData, } -impl AuraConsensusDataProvider +impl AuraConsensusDataProvider where B: BlockT, { @@ -66,13 +66,10 @@ where } } -impl ConsensusDataProvider for AuraConsensusDataProvider +impl ConsensusDataProvider for AuraConsensusDataProvider where B: BlockT, - P: Send + Sync, { - type Proof = P; - fn create_digest( &self, _parent: &B::Header, @@ -95,7 +92,7 @@ where _parent: &B::Header, _params: &mut BlockImportParams, _inherents: &InherentData, - _proof: Self::Proof, + _proof: StorageProof, ) -> Result<(), Error> { Ok(()) } diff --git a/substrate/client/consensus/manual-seal/src/consensus/babe.rs b/substrate/client/consensus/manual-seal/src/consensus/babe.rs index a68e46f0134d6..e8d06b917161e 100644 --- a/substrate/client/consensus/manual-seal/src/consensus/babe.rs +++ b/substrate/client/consensus/manual-seal/src/consensus/babe.rs @@ -30,10 +30,10 @@ use sc_consensus_epochs::{ descendent_query, EpochHeader, SharedEpochChanges, ViableEpochDescriptor, }; use sp_keystore::KeystorePtr; -use std::{marker::PhantomData, sync::Arc}; +use std::sync::Arc; use sc_consensus::{BlockImportParams, ForkChoiceStrategy, Verifier}; -use sp_api::ProvideRuntimeApi; +use sp_api::{ProvideRuntimeApi, StorageProof}; use sp_blockchain::{HeaderBackend, HeaderMetadata}; use sp_consensus_babe::{ digests::{NextEpochDescriptor, PreDigest, SecondaryPlainPreDigest}, @@ -51,7 +51,7 @@ use sp_timestamp::TimestampInherentData; /// Provides BABE-compatible predigests and BlockImportParams. /// Intended for use with BABE runtimes. -pub struct BabeConsensusDataProvider { +pub struct BabeConsensusDataProvider { /// shared reference to keystore keystore: KeystorePtr, @@ -69,7 +69,6 @@ pub struct BabeConsensusDataProvider { /// Authorities to be used for this babe chain. authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, - _phantom: PhantomData

, } /// Verifier to be used for babe chains @@ -131,7 +130,7 @@ where } } -impl BabeConsensusDataProvider +impl BabeConsensusDataProvider where B: BlockT, C: AuxStore @@ -153,14 +152,7 @@ where let config = sc_consensus_babe::configuration(&*client)?; - Ok(Self { - config, - client, - keystore, - epoch_changes, - authorities, - _phantom: Default::default(), - }) + Ok(Self { config, client, keystore, epoch_changes, authorities }) } fn epoch(&self, parent: &B::Header, slot: Slot) -> Result { @@ -186,7 +178,7 @@ where } } -impl ConsensusDataProvider for BabeConsensusDataProvider +impl ConsensusDataProvider for BabeConsensusDataProvider where B: BlockT, C: AuxStore @@ -195,10 +187,7 @@ where + UsageProvider + ProvideRuntimeApi, C::Api: BabeApi, - P: Send + Sync, { - type Proof = P; - fn create_digest(&self, parent: &B::Header, inherents: &InherentData) -> Result { let slot = inherents .babe_inherent_data()? @@ -265,7 +254,7 @@ where parent: &B::Header, params: &mut BlockImportParams, inherents: &InherentData, - _proof: Self::Proof, + _proof: StorageProof, ) -> Result<(), Error> { let slot = inherents .babe_inherent_data()? diff --git a/substrate/client/consensus/manual-seal/src/lib.rs b/substrate/client/consensus/manual-seal/src/lib.rs index af9bcc8d56d6f..1e09c70797611 100644 --- a/substrate/client/consensus/manual-seal/src/lib.rs +++ b/substrate/client/consensus/manual-seal/src/lib.rs @@ -87,7 +87,7 @@ where } /// Params required to start the manual sealing authorship task. -pub struct ManualSealParams, TP, SC, CS, CIDP, P> { +pub struct ManualSealParams, TP, SC, CS, CIDP> { /// Block import instance. pub block_import: BI, @@ -108,14 +108,14 @@ pub struct ManualSealParams, TP, SC, C pub select_chain: SC, /// Digest provider for inclusion in blocks. - pub consensus_data_provider: Option>>, + pub consensus_data_provider: Option>>, /// Something that can create the inherent data providers. pub create_inherent_data_providers: CIDP, } /// Params required to start the instant sealing authorship task. -pub struct InstantSealParams, TP, SC, CIDP, P> { +pub struct InstantSealParams, TP, SC, CIDP> { /// Block import instance for well. importing blocks. pub block_import: BI, @@ -132,7 +132,7 @@ pub struct InstantSealParams, TP, SC, pub select_chain: SC, /// Digest provider for inclusion in blocks. - pub consensus_data_provider: Option>>, + pub consensus_data_provider: Option>>, /// Something that can create the inherent data providers. pub create_inherent_data_providers: CIDP, @@ -151,7 +151,7 @@ pub struct DelayedFinalizeParams { } /// Creates the background authorship task for the manually seal engine. -pub async fn run_manual_seal( +pub async fn run_manual_seal( ManualSealParams { mut block_import, mut env, @@ -161,19 +161,18 @@ pub async fn run_manual_seal( select_chain, consensus_data_provider, create_inherent_data_providers, - }: ManualSealParams, + }: ManualSealParams, ) where B: BlockT + 'static, BI: BlockImport + Send + Sync + 'static, C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, CB: ClientBackend + 'static, E: Environment + 'static, - E::Proposer: Proposer, + E::Proposer: Proposer, CS: Stream::Hash>> + Unpin + 'static, SC: SelectChain + 'static, TP: TransactionPool, CIDP: CreateInherentDataProviders, - P: codec::Encode + Send + Sync + 'static, { while let Some(command) = commands_stream.next().await { match command { @@ -211,7 +210,7 @@ pub async fn run_manual_seal( /// runs the background authorship task for the instant seal engine. /// instant-seal creates a new block for every transaction imported into /// the transaction pool. -pub async fn run_instant_seal( +pub async fn run_instant_seal( InstantSealParams { block_import, env, @@ -220,18 +219,17 @@ pub async fn run_instant_seal( select_chain, consensus_data_provider, create_inherent_data_providers, - }: InstantSealParams, + }: InstantSealParams, ) where B: BlockT + 'static, BI: BlockImport + Send + Sync + 'static, C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, CB: ClientBackend + 'static, E: Environment + 'static, - E::Proposer: Proposer, + E::Proposer: Proposer, SC: SelectChain + 'static, TP: TransactionPool, CIDP: CreateInherentDataProviders, - P: codec::Encode + Send + Sync + 'static, { // instant-seal creates blocks as soon as transactions are imported // into the transaction pool. @@ -261,7 +259,7 @@ pub async fn run_instant_seal( /// /// This function will finalize the block immediately as well. If you don't /// want this behavior use `run_instant_seal` instead. -pub async fn run_instant_seal_and_finalize( +pub async fn run_instant_seal_and_finalize( InstantSealParams { block_import, env, @@ -270,18 +268,17 @@ pub async fn run_instant_seal_and_finalize( select_chain, consensus_data_provider, create_inherent_data_providers, - }: InstantSealParams, + }: InstantSealParams, ) where B: BlockT + 'static, BI: BlockImport + Send + Sync + 'static, C: HeaderBackend + Finalizer + ProvideRuntimeApi + 'static, CB: ClientBackend + 'static, E: Environment + 'static, - E::Proposer: Proposer, + E::Proposer: Proposer, SC: SelectChain + 'static, TP: TransactionPool, CIDP: CreateInherentDataProviders, - P: codec::Encode + Send + Sync + 'static, { // Creates and finalizes blocks as soon as transactions are imported // into the transaction pool. @@ -346,10 +343,12 @@ pub async fn run_delayed_finalize( #[cfg(test)] mod tests { use super::*; + use assert_matches::assert_matches; use sc_basic_authorship::ProposerFactory; use sc_consensus::ImportedAux; use sc_transaction_pool::{BasicPool, FullChainApi, Options, RevalidationType}; use sc_transaction_pool_api::{MaintainedTransactionPool, TransactionPool, TransactionSource}; + use sp_api::StorageProof; use sp_inherents::InherentData; use sp_runtime::generic::{Digest, DigestItem}; use substrate_test_runtime_client::{ @@ -371,8 +370,6 @@ mod tests { B: BlockT, C: ProvideRuntimeApi + Send + Sync, { - type Proof = (); - fn create_digest( &self, _parent: &B::Header, @@ -386,7 +383,7 @@ mod tests { _parent: &B::Header, params: &mut BlockImportParams, _inherents: &InherentData, - _proof: Self::Proof, + _proof: StorageProof, ) -> Result<(), Error> { params.post_digests.push(DigestItem::Other(vec![1])); Ok(()) @@ -448,10 +445,10 @@ mod tests { assert!(result.is_ok()); // assert that the background task returns ok let created_block = receiver.await.unwrap().unwrap(); - assert_eq!( + assert_matches!( created_block, CreatedBlock { - hash: created_block.hash, + hash: _, aux: ImportedAux { header_only: false, clear_justification_requests: false, @@ -459,7 +456,7 @@ mod tests { bad_justification: false, is_new_best: true, }, - proof_size: 0 + proof_size: _ } ); // assert that there's a new block in the db. @@ -612,10 +609,10 @@ mod tests { let created_block = rx.await.unwrap().unwrap(); // assert that the background task returns ok - assert_eq!( + assert_matches!( created_block, CreatedBlock { - hash: created_block.hash, + hash: _, aux: ImportedAux { header_only: false, clear_justification_requests: false, @@ -623,7 +620,7 @@ mod tests { bad_justification: false, is_new_best: true, }, - proof_size: 0 + proof_size: _ } ); // assert that there's a new block in the db. @@ -697,10 +694,10 @@ mod tests { let created_block = rx.await.unwrap().unwrap(); // assert that the background task returns ok - assert_eq!( + assert_matches!( created_block, CreatedBlock { - hash: created_block.hash, + hash: _, aux: ImportedAux { header_only: false, clear_justification_requests: false, @@ -708,7 +705,7 @@ mod tests { bad_justification: false, is_new_best: true }, - proof_size: 0 + proof_size: _ } ); @@ -732,7 +729,7 @@ mod tests { }) .await .is_ok()); - assert_matches::assert_matches!(rx1.await.expect("should be no error receiving"), Ok(_)); + assert_matches!(rx1.await.expect("should be no error receiving"), Ok(_)); assert!(pool.submit_one(created_block.hash, SOURCE, uxt(Bob, 0)).await.is_ok()); let (tx2, rx2) = futures::channel::oneshot::channel(); diff --git a/substrate/client/consensus/manual-seal/src/seal_block.rs b/substrate/client/consensus/manual-seal/src/seal_block.rs index 716e889ec0395..f0bb06bb9286b 100644 --- a/substrate/client/consensus/manual-seal/src/seal_block.rs +++ b/substrate/client/consensus/manual-seal/src/seal_block.rs @@ -19,21 +19,24 @@ //! Block sealing utilities use crate::{rpc, ConsensusDataProvider, CreatedBlock, Error}; +use codec::Encode; use futures::prelude::*; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, ImportResult, StateAction}; use sc_transaction_pool_api::TransactionPool; -use sp_api::ProvideRuntimeApi; +use sp_api::{ProofRecorder, ProvideRuntimeApi}; use sp_blockchain::HeaderBackend; -use sp_consensus::{self, BlockOrigin, Environment, Proposer, SelectChain}; +use sp_consensus::{self, BlockOrigin, Environment, ProposeArgs, Proposer, SelectChain}; +use sp_externalities::Extensions; use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use sp_trie::proof_size_extension::ProofSizeExt; use std::{sync::Arc, time::Duration}; /// max duration for creating a proposal in secs pub const MAX_PROPOSAL_DURATION: u64 = 10; /// params for sealing a new block -pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, TP, CIDP, P> { +pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, TP, CIDP> { /// if true, empty blocks(without extrinsics) will be created. /// otherwise, will return Error::EmptyTransactionPool. pub create_empty: bool, @@ -52,7 +55,7 @@ pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, TP /// SelectChain object pub select_chain: &'a SC, /// Digest provider for inclusion in blocks. - pub consensus_data_provider: Option<&'a dyn ConsensusDataProvider>, + pub consensus_data_provider: Option<&'a dyn ConsensusDataProvider>, /// block import object pub block_import: &'a mut BI, /// Something that can create the inherent data providers. @@ -60,7 +63,7 @@ pub struct SealBlockParams<'a, B: BlockT, BI, SC, C: ProvideRuntimeApi, E, TP } /// seals a new block with the given params -pub async fn seal_block( +pub async fn seal_block( SealBlockParams { create_empty, finalize, @@ -73,17 +76,16 @@ pub async fn seal_block( create_inherent_data_providers, consensus_data_provider: digest_provider, mut sender, - }: SealBlockParams<'_, B, BI, SC, C, E, TP, CIDP, P>, + }: SealBlockParams<'_, B, BI, SC, C, E, TP, CIDP>, ) where B: BlockT, BI: BlockImport + Send + Sync + 'static, C: HeaderBackend + ProvideRuntimeApi, E: Environment, - E::Proposer: Proposer, + E::Proposer: Proposer, TP: TransactionPool, SC: SelectChain, CIDP: CreateInherentDataProviders, - P: codec::Encode + Send + Sync + 'static, { let future = async { if pool.status().ready == 0 && !create_empty { @@ -109,19 +111,29 @@ pub async fn seal_block( let proposer = env.init(&parent).map_err(|err| Error::StringError(err.to_string())).await?; let inherents_len = inherent_data.len(); - let digest = if let Some(digest_provider) = digest_provider { + let inherent_digests = if let Some(digest_provider) = digest_provider { digest_provider.create_digest(&parent, &inherent_data)? } else { Default::default() }; + let storage_proof_recorder = ProofRecorder::::default(); + + let mut extra_extensions = Extensions::default(); + // Required by parachains + extra_extensions.register(ProofSizeExt::new(storage_proof_recorder.clone())); + + let propose_args = ProposeArgs { + inherent_data: inherent_data.clone(), + inherent_digests, + max_duration: Duration::from_secs(MAX_PROPOSAL_DURATION), + storage_proof_recorder: Some(storage_proof_recorder.clone()), + extra_extensions, + ..Default::default() + }; + let proposal = proposer - .propose( - inherent_data.clone(), - digest, - Duration::from_secs(MAX_PROPOSAL_DURATION), - None, - ) + .propose(propose_args) .map_err(|err| Error::StringError(err.to_string())) .await?; @@ -129,8 +141,9 @@ pub async fn seal_block( return Err(Error::EmptyTransactionPool) } + let proof = storage_proof_recorder.drain_storage_proof(); + let (header, body) = proposal.block.deconstruct(); - let proof = proposal.proof; let proof_size = proof.encoded_size(); let mut params = BlockImportParams::new(BlockOrigin::Own, header.clone()); params.body = Some(body); @@ -145,7 +158,7 @@ pub async fn seal_block( } // Make sure we return the same post-hash that will be calculated when importing the block - // This is important in case the digest_provider added any signature, seal, ect. + // This is important in case the digest_provider added any signature, seal, etc. let mut post_header = header.clone(); post_header.digest_mut().logs.extend(params.post_digests.iter().cloned()); diff --git a/substrate/client/consensus/pow/src/lib.rs b/substrate/client/consensus/pow/src/lib.rs index 0a32332426f47..cc68ab70a438d 100644 --- a/substrate/client/consensus/pow/src/lib.rs +++ b/substrate/client/consensus/pow/src/lib.rs @@ -56,7 +56,9 @@ use sc_consensus::{ use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder as BlockBuilderApi; use sp_blockchain::HeaderBackend; -use sp_consensus::{Environment, Error as ConsensusError, Proposer, SelectChain, SyncOracle}; +use sp_consensus::{ + Environment, Error as ConsensusError, ProposeArgs, Proposer, SelectChain, SyncOracle, +}; use sp_consensus_pow::{Seal, TotalDifficulty, POW_ENGINE_ID}; use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; use sp_runtime::{ @@ -493,10 +495,7 @@ pub fn start_mining_worker( create_inherent_data_providers: CIDP, timeout: Duration, build_time: Duration, -) -> ( - MiningHandle>::Proof>, - impl Future, -) +) -> (MiningHandle, impl Future) where Block: BlockT, C: BlockchainEvents + 'static, @@ -589,9 +588,9 @@ where }, }; - let mut inherent_digest = Digest::default(); + let mut inherent_digests = Digest::default(); if let Some(pre_runtime) = &pre_runtime { - inherent_digest.push(DigestItem::PreRuntime(POW_ENGINE_ID, pre_runtime.to_vec())); + inherent_digests.push(DigestItem::PreRuntime(POW_ENGINE_ID, pre_runtime.to_vec())); } let pre_runtime = pre_runtime.clone(); @@ -609,21 +608,29 @@ where }, }; - let proposal = - match proposer.propose(inherent_data, inherent_digest, build_time, None).await { - Ok(x) => x, - Err(err) => { - warn!( - target: LOG_TARGET, - "Unable to propose new block for authoring. \ - Creating proposal failed: {}", - err, - ); - continue - }, - }; + let propose_args = ProposeArgs { + inherent_data, + inherent_digests, + max_duration: build_time, + block_size_limit: None, + storage_proof_recorder: None, + extra_extensions: Default::default(), + }; + + let proposal = match proposer.propose(propose_args).await { + Ok(x) => x, + Err(err) => { + warn!( + target: LOG_TARGET, + "Unable to propose new block for authoring. \ + Creating proposal failed: {}", + err, + ); + continue + }, + }; - let build = MiningBuild:: { + let build = MiningBuild:: { metadata: MiningMetadata { best_hash, pre_hash: proposal.block.header().hash(), diff --git a/substrate/client/consensus/pow/src/worker.rs b/substrate/client/consensus/pow/src/worker.rs index 73400136483a7..f5700d91caef3 100644 --- a/substrate/client/consensus/pow/src/worker.rs +++ b/substrate/client/consensus/pow/src/worker.rs @@ -56,11 +56,11 @@ pub struct MiningMetadata { } /// A build of mining, containing the metadata and the block proposal. -pub struct MiningBuild, Proof> { +pub struct MiningBuild> { /// Mining metadata. pub metadata: MiningMetadata, /// Mining proposal. - pub proposal: Proposal, + pub proposal: Proposal, } /// Version of the mining worker. @@ -72,16 +72,15 @@ pub struct MiningHandle< Block: BlockT, Algorithm: PowAlgorithm, L: sc_consensus::JustificationSyncLink, - Proof, > { version: Arc, algorithm: Arc, justification_sync_link: Arc, - build: Arc>>>, + build: Arc>>>, block_import: Arc>>, } -impl MiningHandle +impl MiningHandle where Block: BlockT, Algorithm: PowAlgorithm, @@ -112,7 +111,7 @@ where self.increment_version(); } - pub(crate) fn on_build(&self, value: MiningBuild) { + pub(crate) fn on_build(&self, value: MiningBuild) { let mut build = self.build.lock(); *build = Some(value); self.increment_version(); @@ -216,7 +215,7 @@ where } } -impl Clone for MiningHandle +impl Clone for MiningHandle where Block: BlockT, Algorithm: PowAlgorithm, diff --git a/substrate/client/consensus/slots/Cargo.toml b/substrate/client/consensus/slots/Cargo.toml index cc39575efe828..b286a64956962 100644 --- a/substrate/client/consensus/slots/Cargo.toml +++ b/substrate/client/consensus/slots/Cargo.toml @@ -33,6 +33,7 @@ sp-core = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } [dev-dependencies] substrate-test-runtime-client = { workspace = true } diff --git a/substrate/client/consensus/slots/src/lib.rs b/substrate/client/consensus/slots/src/lib.rs index 4f7e85541777a..a69d2d68e3bbc 100644 --- a/substrate/client/consensus/slots/src/lib.rs +++ b/substrate/client/consensus/slots/src/lib.rs @@ -38,12 +38,11 @@ use log::{debug, info, warn}; use sc_consensus::{BlockImport, JustificationSyncLink}; use sc_telemetry::{telemetry, TelemetryHandle, CONSENSUS_DEBUG, CONSENSUS_INFO, CONSENSUS_WARN}; use sp_arithmetic::traits::BaseArithmetic; -use sp_consensus::{Proposal, Proposer, SelectChain, SyncOracle}; +use sp_consensus::{Proposal, ProposeArgs, Proposer, SelectChain, SyncOracle}; use sp_consensus_slots::{Slot, SlotDuration}; use sp_inherents::CreateInherentDataProviders; use sp_runtime::traits::{Block as BlockT, HashingFor, Header as HeaderT}; use std::{ - fmt::Debug, ops::Deref, time::{Duration, Instant}, }; @@ -55,26 +54,18 @@ const LOG_TARGET: &str = "slots"; /// See [`sp_state_machine::StorageChanges`] for more information. pub type StorageChanges = sp_state_machine::StorageChanges>; -/// The result of [`SlotWorker::on_slot`]. -#[derive(Debug, Clone)] -pub struct SlotResult { - /// The block that was built. - pub block: Block, - /// The storage proof that was recorded while building the block. - pub storage_proof: Proof, -} - /// A worker that should be invoked at every new slot. /// /// The implementation should not make any assumptions of the slot being bound to the time or /// similar. The only valid assumption is that the slot number is always increasing. #[async_trait::async_trait] -pub trait SlotWorker { +pub trait SlotWorker { /// Called when a new slot is triggered. /// - /// Returns a future that resolves to a [`SlotResult`] iff a block was successfully built in - /// the slot. Otherwise `None` is returned. - async fn on_slot(&mut self, slot_info: SlotInfo) -> Option>; + /// Returns a future that resolves to a block. + /// + /// If block production failed, `None` is returned. + async fn on_slot(&mut self, slot_info: SlotInfo) -> Option; } /// A skeleton implementation for `SlotWorker` which tries to claim a slot at @@ -185,7 +176,7 @@ pub trait SimpleSlotWorker { claim: &Self::Claim, slot_info: SlotInfo, end_proposing_at: Instant, - ) -> Option>::Proof>> { + ) -> Option> { let slot = slot_info.slot; let telemetry = self.telemetry(); let log_target = self.logging_target(); @@ -200,13 +191,17 @@ pub trait SimpleSlotWorker { // deadline our production to 98% of the total time left for proposing. As we deadline // the proposing below to the same total time left, the 2% margin should be enough for // the result to be returned. + let propose_args = ProposeArgs { + inherent_data, + inherent_digests: sp_runtime::generic::Digest { logs }, + max_duration: proposing_remaining_duration.mul_f32(0.98), + block_size_limit: slot_info.block_size_limit, + storage_proof_recorder: slot_info.storage_proof_recorder, + ..Default::default() + }; + let proposing = proposer - .propose( - inherent_data, - sp_runtime::generic::Digest { logs }, - proposing_remaining_duration.mul_f32(0.98), - slot_info.block_size_limit, - ) + .propose(propose_args) .map_err(|e| sp_consensus::Error::ClientImport(e.to_string())); let proposal = match futures::future::select( @@ -283,10 +278,7 @@ pub trait SimpleSlotWorker { } /// Implements [`SlotWorker::on_slot`]. - async fn on_slot( - &mut self, - slot_info: SlotInfo, - ) -> Option>::Proof>> + async fn on_slot(&mut self, slot_info: SlotInfo) -> Option where Self: Sync, { @@ -377,7 +369,7 @@ pub trait SimpleSlotWorker { let proposal = self.propose(proposer, &claim, slot_info, end_proposing_at).await?; - let (block, storage_proof) = (proposal.block, proposal.proof); + let block = proposal.block; let (header, body) = block.deconstruct(); let header_num = *header.number(); let header_hash = header.hash(); @@ -444,7 +436,7 @@ pub trait SimpleSlotWorker { }, } - Some(SlotResult { block: B::new(header, body), storage_proof }) + Some(B::new(header, body)) } } @@ -456,13 +448,10 @@ pub trait SimpleSlotWorker { pub struct SimpleSlotWorkerToSlotWorker(pub T); #[async_trait::async_trait] -impl + Send + Sync, B: BlockT> - SlotWorker>::Proof> for SimpleSlotWorkerToSlotWorker +impl + Send + Sync, B: BlockT> SlotWorker + for SimpleSlotWorkerToSlotWorker { - async fn on_slot( - &mut self, - slot_info: SlotInfo, - ) -> Option>::Proof>> { + async fn on_slot(&mut self, slot_info: SlotInfo) -> Option { self.0.on_slot(slot_info).await } } @@ -503,7 +492,7 @@ impl_inherent_data_provider_ext_tuple!(S, A, B, C, D, E, F, G, H, I, J); /// /// Every time a new slot is triggered, `worker.on_slot` is called and the future it returns is /// polled until completion, unless we are major syncing. -pub async fn start_slot_worker( +pub async fn start_slot_worker( slot_duration: SlotDuration, client: C, mut worker: W, @@ -512,7 +501,7 @@ pub async fn start_slot_worker( ) where B: BlockT, C: SelectChain, - W: SlotWorker, + W: SlotWorker, SO: SyncOracle + Send, CIDP: CreateInherentDataProviders + Send + 'static, CIDP::InherentDataProviders: InherentDataProviderExt + Send, @@ -829,6 +818,7 @@ mod test { Default::default(), ), block_size_limit: None, + storage_proof_recorder: None, } } diff --git a/substrate/client/consensus/slots/src/slots.rs b/substrate/client/consensus/slots/src/slots.rs index c0b412e8ad5b0..0d19108d1d2f2 100644 --- a/substrate/client/consensus/slots/src/slots.rs +++ b/substrate/client/consensus/slots/src/slots.rs @@ -23,7 +23,8 @@ use super::{InherentDataProviderExt, Slot, LOG_TARGET}; use sp_consensus::{SelectChain, SyncOracle}; use sp_inherents::{CreateInherentDataProviders, InherentDataProvider}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use sp_runtime::traits::{Block as BlockT, HashingFor, Header as HeaderT}; +use sp_trie::recorder::Recorder; use futures_timer::Delay; use std::time::{Duration, Instant}; @@ -62,6 +63,8 @@ pub struct SlotInfo { /// /// For more information see [`Proposer::propose`](sp_consensus::Proposer::propose). pub block_size_limit: Option, + /// Optional [`Recorder`] to use when build the block. + pub storage_proof_recorder: Option>>, } impl SlotInfo { @@ -82,6 +85,29 @@ impl SlotInfo { chain_head, block_size_limit, ends_at: Instant::now() + time_until_next_slot(duration), + storage_proof_recorder: None, + } + } + + /// Create a new [`SlotInfo`] with a storage proof recorder. + /// + /// `ends_at` is calculated using `timestamp` and `duration`. + pub fn with_storage_proof_recorder( + slot: Slot, + create_inherent_data: Box, + duration: Duration, + chain_head: B::Header, + block_size_limit: Option, + storage_proof_recorder: Recorder>, + ) -> Self { + Self { + slot, + create_inherent_data, + duration, + chain_head, + block_size_limit, + ends_at: Instant::now() + time_until_next_slot(duration), + storage_proof_recorder: Some(storage_proof_recorder), } } } diff --git a/substrate/primitives/api/test/tests/runtime_calls.rs b/substrate/primitives/api/test/tests/runtime_calls.rs index 33cb72249a826..4f663dcd4f99a 100644 --- a/substrate/primitives/api/test/tests/runtime_calls.rs +++ b/substrate/primitives/api/test/tests/runtime_calls.rs @@ -24,7 +24,7 @@ use std::{ }; use sc_block_builder::BlockBuilderBuilder; -use sp_api::{ApiExt, Core, ProvideRuntimeApi}; +use sp_api::{ApiExt, Core, ProofRecorder, ProvideRuntimeApi}; use sp_externalities::{decl_extension, TransactionType}; use sp_runtime::{ traits::{HashingFor, Header as HeaderT}, @@ -111,21 +111,23 @@ fn record_proof_works() { } .into_unchecked_extrinsic(); + let storage_proof_recorder = ProofRecorder::::default(); + // Build the block and record proof let mut builder = BlockBuilderBuilder::new(&client) .on_parent_block(client.chain_info().best_hash) .with_parent_block_number(client.chain_info().best_number) - .enable_proof_recording() + .with_proof_recorder(storage_proof_recorder.clone()) .build() .unwrap(); builder.push(transaction.clone()).unwrap(); - let (block, _, proof) = builder.build().expect("Bake block").into_inner(); - let backend = create_proof_check_backend::>( - storage_root, - proof.expect("Proof was generated"), - ) - .expect("Creates proof backend."); + let (block, _) = builder.build().expect("Bake block").into_inner(); + + let proof = storage_proof_recorder.drain_storage_proof(); + + let backend = create_proof_check_backend::>(storage_root, proof) + .expect("Creates proof backend."); // Use the proof backend to execute `execute_block`. let mut overlay = Default::default(); diff --git a/substrate/primitives/consensus/common/Cargo.toml b/substrate/primitives/consensus/common/Cargo.toml index 376ef8c04c231..caf995ed98070 100644 --- a/substrate/primitives/consensus/common/Cargo.toml +++ b/substrate/primitives/consensus/common/Cargo.toml @@ -20,9 +20,12 @@ targets = ["x86_64-unknown-linux-gnu"] async-trait = { workspace = true } futures = { features = ["thread-pool"], workspace = true } log = { workspace = true, default-features = true } +sp-api = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-runtime = { workspace = true, default-features = true } sp-state-machine = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } thiserror = { workspace = true } [dev-dependencies] diff --git a/substrate/primitives/consensus/common/src/lib.rs b/substrate/primitives/consensus/common/src/lib.rs index 37636b34b03df..dc40257f5fcb8 100644 --- a/substrate/primitives/consensus/common/src/lib.rs +++ b/substrate/primitives/consensus/common/src/lib.rs @@ -24,11 +24,12 @@ use std::{sync::Arc, time::Duration}; use futures::prelude::*; +use sp_api::ProofRecorder; +use sp_externalities::Extensions; use sp_runtime::{ traits::{Block as BlockT, HashingFor}, Digest, }; -use sp_state_machine::StorageProof; pub mod block_validation; pub mod error; @@ -83,7 +84,7 @@ pub trait Environment { + Unpin + 'static; /// Error which can occur upon creation. - type Error: From + std::error::Error + 'static; + type Error: From + Send + Sync + std::error::Error + 'static; /// Initialize the proposal logic on top of a specific header. Provide /// the authorities at that header. @@ -91,83 +92,47 @@ pub trait Environment { } /// A proposal that is created by a [`Proposer`]. -pub struct Proposal { +pub struct Proposal { /// The block that was build. pub block: Block, - /// Proof that was recorded while building the block. - pub proof: Proof, /// The storage changes while building this block. pub storage_changes: sp_state_machine::StorageChanges>, } -/// Error that is returned when [`ProofRecording`] requested to record a proof, -/// but no proof was recorded. -#[derive(Debug, thiserror::Error)] -#[error("Proof should be recorded, but no proof was provided.")] -pub struct NoProofRecorded; - -/// A trait to express the state of proof recording on type system level. -/// -/// This is used by [`Proposer`] to signal if proof recording is enabled. This can be used by -/// downstream users of the [`Proposer`] trait to enforce that proof recording is activated when -/// required. The only two implementations of this trait are [`DisableProofRecording`] and -/// [`EnableProofRecording`]. -/// -/// This trait is sealed and can not be implemented outside of this crate! -pub trait ProofRecording: Send + Sync + private::Sealed + 'static { - /// The proof type that will be used internally. - type Proof: Send + Sync + 'static; - /// Is proof recording enabled? - const ENABLED: bool; - /// Convert the given `storage_proof` into [`Self::Proof`]. +/// Arguments for [`Proposer::propose`]. +pub struct ProposeArgs { + /// The inherent data to pass to the block production. + pub inherent_data: InherentData, + /// The inherent digests to include in the produced block. + pub inherent_digests: Digest, + /// Max duration for building the block. + pub max_duration: Duration, + /// Optional size limit for the produced block. /// - /// Internally Substrate uses `Option` to express the both states of proof - /// recording (for now) and as [`Self::Proof`] is some different type, we need to provide a - /// function to convert this value. + /// When set, block production ends before hitting this limit. The limit includes the storage + /// proof, when proof recording is activated. + pub block_size_limit: Option, + /// Optional proof recorder for recording storage proofs during block production. /// - /// If the proof recording was requested, but `None` is given, this will return - /// `Err(NoProofRecorded)`. - fn into_proof(storage_proof: Option) -> Result; + /// When `Some`, the recorder will be used on block production to record all storage accesses. + pub storage_proof_recorder: Option>, + /// Extra extensions for the runtime environment. + pub extra_extensions: Extensions, } -/// Express that proof recording is disabled. -/// -/// For more information see [`ProofRecording`]. -pub struct DisableProofRecording; - -impl ProofRecording for DisableProofRecording { - type Proof = (); - const ENABLED: bool = false; - - fn into_proof(_: Option) -> Result { - Ok(()) +impl Default for ProposeArgs { + fn default() -> Self { + Self { + inherent_data: Default::default(), + inherent_digests: Default::default(), + max_duration: Default::default(), + block_size_limit: Default::default(), + storage_proof_recorder: Default::default(), + extra_extensions: Default::default(), + } } } -/// Express that proof recording is enabled. -/// -/// For more information see [`ProofRecording`]. -pub struct EnableProofRecording; - -impl ProofRecording for EnableProofRecording { - type Proof = sp_state_machine::StorageProof; - const ENABLED: bool = true; - - fn into_proof(proof: Option) -> Result { - proof.ok_or(NoProofRecorded) - } -} - -/// Provides `Sealed` trait to prevent implementing trait [`ProofRecording`] outside of this crate. -mod private { - /// Special trait that prevents the implementation of [`super::ProofRecording`] outside of this - /// crate. - pub trait Sealed {} - - impl Sealed for super::DisableProofRecording {} - impl Sealed for super::EnableProofRecording {} -} - /// Logic for a proposer. /// /// This will encapsulate creation and evaluation of proposals at a specific @@ -176,41 +141,19 @@ mod private { /// Proposers are generic over bits of "consensus data" which are engine-specific. pub trait Proposer { /// Error type which can occur when proposing or evaluating. - type Error: From + std::error::Error + 'static; + type Error: From + Send + Sync + std::error::Error + 'static; /// Future that resolves to a committed proposal with an optional proof. - type Proposal: Future, Self::Error>> - + Send - + Unpin - + 'static; - /// The supported proof recording by the implementor of this trait. See [`ProofRecording`] - /// for more information. - type ProofRecording: self::ProofRecording + Send + Sync + 'static; - /// The proof type used by [`Self::ProofRecording`]. - type Proof: Send + Sync + 'static; + type Proposal: Future, Self::Error>> + Send + Unpin + 'static; /// Create a proposal. /// - /// Gets the `inherent_data` and `inherent_digests` as input for the proposal. Additionally - /// a maximum duration for building this proposal is given. If building the proposal takes - /// longer than this maximum, the proposal will be very likely discarded. - /// - /// If `block_size_limit` is given, the proposer should push transactions until the block size - /// limit is hit. Depending on the `finalize_block` implementation of the runtime, it probably - /// incorporates other operations (that are happening after the block limit is hit). So, - /// when the block size estimation also includes a proof that is recorded alongside the block - /// production, the proof can still grow. This means that the `block_size_limit` should not be - /// the hard limit of what is actually allowed. + /// Takes a [`ProposeArgs`] struct containing all the necessary parameters for block production + /// including inherent data, digests, duration limits, storage proof recorder, and extensions. /// /// # Return /// /// Returns a future that resolves to a [`Proposal`] or to [`Error`]. - fn propose( - self, - inherent_data: InherentData, - inherent_digests: Digest, - max_duration: Duration, - block_size_limit: Option, - ) -> Self::Proposal; + fn propose(self, args: ProposeArgs) -> Self::Proposal; } /// An oracle for when major synchronization work is being undertaken. diff --git a/substrate/primitives/externalities/src/extensions.rs b/substrate/primitives/externalities/src/extensions.rs index 48891efcf03d6..6b9bda9874603 100644 --- a/substrate/primitives/externalities/src/extensions.rs +++ b/substrate/primitives/externalities/src/extensions.rs @@ -29,6 +29,7 @@ use alloc::{ }; use core::{ any::{Any, TypeId}, + iter::FromIterator, ops::DerefMut, }; @@ -169,6 +170,14 @@ macro_rules! decl_extension { )* } + impl $ext_name { + /// Returns the `TypeId` of this extension. + #[allow(dead_code)] + pub fn type_id() -> core::any::TypeId { + core::any::TypeId::of::() + } + } + impl core::ops::Deref for $ext_name { type Target = $inner; @@ -205,6 +214,14 @@ macro_rules! decl_extension { core::any::Any::type_id(self) } } + + impl $ext_name { + /// Returns the `TypeId` of this extension. + #[allow(dead_code)] + pub fn type_id() -> core::any::TypeId { + core::any::TypeId::of::() + } + } } } @@ -258,6 +275,11 @@ impl Extensions { self.extensions.insert(type_id, Box::new(ext)); } + /// Returns `true` if an extension for the given `type_id` is already registered. + pub fn is_registered(&self, type_id: TypeId) -> bool { + self.extensions.contains_key(&type_id) + } + /// Register extension `extension` using the given `type_id`. pub fn register_with_type_id( &mut self, @@ -315,6 +337,11 @@ impl Extensions { pub fn rollback_transaction(&mut self, ty: TransactionType) { self.extensions.values_mut().for_each(|e| e.rollback_transaction(ty)); } + + /// Returns an iterator that returns all stored extensions. + pub fn into_extensions(self) -> impl Iterator> { + self.extensions.into_values() + } } impl Extend for Extensions { @@ -324,6 +351,45 @@ impl Extend for Extensions { } } +impl From for Extensions { + fn from(ext: A) -> Self { + Self { + extensions: FromIterator::from_iter( + [(Extension::type_id(&ext), Box::new(ext) as Box)].into_iter(), + ), + } + } +} + +impl From<(A, B)> for Extensions { + fn from((ext, ext2): (A, B)) -> Self { + Self { + extensions: FromIterator::from_iter( + [ + (Extension::type_id(&ext), Box::new(ext) as Box), + (Extension::type_id(&ext2), Box::new(ext2) as Box), + ] + .into_iter(), + ), + } + } +} + +impl From<(A, B, C)> for Extensions { + fn from((ext, ext2, ext3): (A, B, C)) -> Self { + Self { + extensions: FromIterator::from_iter( + [ + (Extension::type_id(&ext), Box::new(ext) as Box), + (Extension::type_id(&ext2), Box::new(ext2) as Box), + (Extension::type_id(&ext3), Box::new(ext3) as Box), + ] + .into_iter(), + ), + } + } +} + #[cfg(test)] mod tests { use super::*; @@ -367,4 +433,12 @@ mod tests { assert_eq!(ext_ty2.0, 2); } } + + #[test] + fn from_boxed_extensions() { + let exts = Extensions::from((DummyExt(1), DummyExt2(2))); + + assert!(exts.is_registered(DummyExt::type_id())); + assert!(exts.is_registered(DummyExt2::type_id())); + } } diff --git a/substrate/utils/frame/benchmarking-cli/src/extrinsic/bench.rs b/substrate/utils/frame/benchmarking-cli/src/extrinsic/bench.rs index 9086f86c24390..dc00b1f2104fe 100644 --- a/substrate/utils/frame/benchmarking-cli/src/extrinsic/bench.rs +++ b/substrate/utils/frame/benchmarking-cli/src/extrinsic/bench.rs @@ -20,7 +20,7 @@ use sc_block_builder::{BlockBuilderApi, BlockBuilderBuilder, BuiltBlock}; use sc_cli::{Error, Result}; use sc_client_api::UsageProvider; -use sp_api::{ApiExt, CallApiAt, Core, ProvideRuntimeApi}; +use sp_api::{ApiExt, CallApiAt, Core, ProofRecorder, ProvideRuntimeApi}; use sp_blockchain::{ ApplyExtrinsicFailed::Validity, Error::{ApplyExtrinsicFailed, RuntimeApiError}, @@ -137,12 +137,22 @@ where ext_builder: Option<&dyn ExtrinsicBuilder>, ) -> Result<(Block, Option, u64)> { let chain = self.client.usage_info().chain; - let mut builder = BlockBuilderBuilder::new(&*self.client) + + let storage_proof_recorder = self.record_proof.then(|| ProofRecorder::::default()); + + let builder = BlockBuilderBuilder::new(&*self.client) .on_parent_block(chain.best_hash) .with_parent_block_number(chain.best_number) .with_inherent_digests(Digest { logs: self.digest_items.clone() }) - .with_proof_recording(self.record_proof) - .build()?; + .with_proof_recorder(storage_proof_recorder.clone()); + + let builder = if let Some(proof_recorder) = &storage_proof_recorder { + builder.with_extra_extensions(ProofSizeExt::new(proof_recorder.clone())) + } else { + builder + }; + + let mut builder = builder.build()?; // Create and insert the inherents. let inherents = builder.create_inherents(self.inherent_data.clone())?; @@ -175,7 +185,9 @@ where None => None, }; - let BuiltBlock { block, proof, .. } = builder.build()?; + let BuiltBlock { block, .. } = builder.build()?; + + let proof = storage_proof_recorder.map(|r| r.drain_storage_proof()); Ok(( block, diff --git a/templates/parachain/node/src/service.rs b/templates/parachain/node/src/service.rs index 1509241e7692b..8bf058bbebde1 100644 --- a/templates/parachain/node/src/service.rs +++ b/templates/parachain/node/src/service.rs @@ -188,7 +188,7 @@ fn start_consensus( overseer_handle: OverseerHandle, announce_block: Arc>) + Send + Sync>, ) -> Result<(), sc_service::Error> { - let proposer = sc_basic_authorship::ProposerFactory::with_proof_recording( + let proposer = sc_basic_authorship::ProposerFactory::new( task_manager.spawn_handle(), client.clone(), transaction_pool, diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml index 3a0d98065453d..5814cd70c40b4 100644 --- a/umbrella/Cargo.toml +++ b/umbrella/Cargo.toml @@ -862,7 +862,6 @@ node = [ "cumulus-client-collator", "cumulus-client-consensus-aura", "cumulus-client-consensus-common", - "cumulus-client-consensus-proposer", "cumulus-client-consensus-relay-chain", "cumulus-client-network", "cumulus-client-parachain-inherent", @@ -2226,11 +2225,6 @@ default-features = false optional = true path = "../cumulus/client/consensus/common" -[dependencies.cumulus-client-consensus-proposer] -default-features = false -optional = true -path = "../cumulus/client/consensus/proposer" - [dependencies.cumulus-client-consensus-relay-chain] default-features = false optional = true diff --git a/umbrella/src/lib.rs b/umbrella/src/lib.rs index 7c85a238c5b69..125939c2edf76 100644 --- a/umbrella/src/lib.rs +++ b/umbrella/src/lib.rs @@ -92,10 +92,6 @@ pub use cumulus_client_consensus_aura; #[cfg(feature = "cumulus-client-consensus-common")] pub use cumulus_client_consensus_common; -/// A Substrate `Proposer` for building parachain blocks. -#[cfg(feature = "cumulus-client-consensus-proposer")] -pub use cumulus_client_consensus_proposer; - /// The relay-chain provided consensus algorithm. #[cfg(feature = "cumulus-client-consensus-relay-chain")] pub use cumulus_client_consensus_relay_chain;