diff --git a/.github/zombienet-tests/zombienet_cumulus_tests.yml b/.github/zombienet-tests/zombienet_cumulus_tests.yml index b4a290dc491b2..384108cb3953a 100644 --- a/.github/zombienet-tests/zombienet_cumulus_tests.yml +++ b/.github/zombienet-tests/zombienet_cumulus_tests.yml @@ -3,13 +3,11 @@ runner-type: "default" cumulus-image: "test-parachain" - - job-name: "zombienet-cumulus-0002-pov_recovery" test-filter: "zombie_ci::pov_recovery::pov_recovery" runner-type: "default" cumulus-image: "test-parachain" - - job-name: "zombienet-cumulus-0003-full_node_catching_up" test-filter: "zombie_ci::full_node_catching_up::full_node_catching_up" runner-type: "default" @@ -17,35 +15,30 @@ - job-name: "zombienet-cumulus-0004-runtime_upgrade" test-filter: "zombie_ci::runtime_upgrade::runtime_upgrade" - runner-type: "default" + runner-type: "large" cumulus-image: "test-parachain" needs-wasm-binary: true - - job-name: "zombienet-cumulus-0005-migrate_solo_to_para" test-filter: "zombie_ci::migrate_solo::migrate_solo_to_para" runner-type: "default" cumulus-image: "test-parachain" - - job-name: "zombienet-cumulus-0006-rpc_collator_builds_blocks" test-filter: "zombie_ci::rpc_collator_build_blocks::rpc_collator_builds_blocks" runner-type: "large" cumulus-image: "test-parachain" - - job-name: "zombienet-cumulus-0007-full_node_warp_sync" test-filter: "zombie_ci::full_node_warp_sync::full_node_warp_sync" runner-type: "large" cumulus-image: "test-parachain" - - job-name: "zombienet-cumulus-0008-elastic_authoring" test-filter: "zombie_ci::elastic_scaling::slot_based_authoring::elastic_scaling_slot_based_authoring" runner-type: "default" cumulus-image: "test-parachain" - # Disabled, occasionally fails # See https://github.com/paritytech/polkadot-sdk/issues/8986 - job-name: "zombienet-cumulus-0009-elastic_scaling_pov_recovery" @@ -53,7 +46,6 @@ runner-type: "default" cumulus-image: "test-parachain" - # Disabled, occasionally fails. # See https://github.com/paritytech/polkadot-sdk/issues/8999 - job-name: "zombienet-cumulus-0010-elastic_scaling_multiple_block_per_slot" @@ -61,33 +53,79 @@ runner-type: "default" cumulus-image: "test-parachain" - - job-name: "zombienet-cumulus-0011-dht-bootnodes" test-filter: "zombie_ci::bootnodes::dht_bootnodes_test" runner-type: "default" cumulus-image: "polkadot-parachain-debug" - - job-name: "zombienet-cumulus-0013-elastic_scaling_slot_based_rp_offset" test-filter: "zombie_ci::elastic_scaling::slot_based_rp_offset::elastic_scaling_slot_based_relay_parent_offset_test" runner-type: "default" cumulus-image: "test-parachain" - - job-name: "zombienet-cumulus-0014-elastic_scaling_upgrade_to_3_cores" test-filter: "zombie_ci::elastic_scaling::upgrade_to_3_cores::elastic_scaling_upgrade_to_3_cores" - runner-type: "default" + runner-type: "large" cumulus-image: "test-parachain" - + use-zombienet-sdk: true needs-wasm-binary: true - job-name: "zombienet-cumulus-0015-parachain-runtime-upgrade" test-filter: "zombie_ci::parachain_runtime_upgrade_slot_duration_18s::parachain_runtime_upgrade_slot_duration_18s" + runner-type: "large" + cumulus-image: "test-parachain" + use-zombienet-sdk: true + needs-wasm-binary: true + +- job-name: "zombienet-cumulus-0016-block_bundling_basic" + test-filter: "zombie_ci::block_bundling::basic::block_bundling_basic" + runner-type: "large" + cumulus-image: "test-parachain" + use-zombienet-sdk: true + +- job-name: "zombienet-cumulus-0017-block_bundling_pov_recovery" + test-filter: "zombie_ci::block_bundling::pov_recovery::block_bundling_pov_recovery" runner-type: "default" cumulus-image: "test-parachain" + use-zombienet-sdk: true +- job-name: "zombienet-cumulus-0018-block_bundling_full_core_usage_scenarios" + test-filter: "zombie_ci::block_bundling::full_core_usage_scenarios::block_bundling_full_core_usage_scenarios" + runner-type: "large" + cumulus-image: "test-parachain" + use-zombienet-sdk: true + +- job-name: "zombienet-cumulus-0019-block_bundling_tracing_block" + test-filter: "zombie_ci::block_bundling::tracing_block::block_bundling_tracing_block" + runner-type: "default" + cumulus-image: "test-parachain" + use-zombienet-sdk: true + +- job-name: "zombienet-cumulus-0020-block_bundling_relay_parent_offset" + test-filter: "zombie_ci::block_bundling::relay_parent_offset::block_bundling_relay_parent_offset" + runner-type: "default" + cumulus-image: "test-parachain" + use-zombienet-sdk: true + +- job-name: "zombienet-cumulus-0021-block_bundling_runtime_upgrade" + test-filter: "zombie_ci::block_bundling::runtime_upgrade::block_bundling_runtime_upgrade" + runner-type: "large" + cumulus-image: "test-parachain" + use-zombienet-sdk: true needs-wasm-binary: true +- job-name: "zombienet-cumulus-0022-block_bundling_three_cores_glutton" + test-filter: "zombie_ci::block_bundling::three_cores_glutton::block_bundling_three_cores_glutton" + runner-type: "default" + cumulus-image: "test-parachain" + use-zombienet-sdk: true + +- job-name: "zombienet-cumulus-0023-block_bundling_warp_sync" + test-filter: "zombie_ci::block_bundling::warp_sync::warp_sync_with_bundled_blocks" + runner-type: "large" + cumulus-image: "test-parachain" + use-zombienet-sdk: true + - job-name: "zombienet-cumulus-0016-statement_store_basic_propagation" test-filter: "zombie_ci::statement_store::integration::statement_store_basic_propagation" runner-type: "default" diff --git a/Cargo.lock b/Cargo.lock index 60dd0d1295162..df1ed4cce2ecd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4720,6 +4720,7 @@ dependencies = [ "cumulus-client-collator", "cumulus-client-consensus-common", "cumulus-client-parachain-inherent", + "cumulus-client-proof-size-recording", "cumulus-primitives-aura", "cumulus-primitives-core", "cumulus-relay-chain-interface", @@ -4914,6 +4915,17 @@ dependencies = [ "tracing", ] +[[package]] +name = "cumulus-client-proof-size-recording" +version = "0.1.0" +dependencies = [ + "parity-scale-codec", + "sc-client-api 28.0.0", + "sp-blockchain 28.0.0", + "sp-runtime 31.0.1", + "sp-trie 29.0.0", +] + [[package]] name = "cumulus-client-service" version = "0.7.0" @@ -4924,6 +4936,7 @@ dependencies = [ "cumulus-client-consensus-common", "cumulus-client-network", "cumulus-client-pov-recovery", + "cumulus-client-proof-size-recording", "cumulus-primitives-core", "cumulus-primitives-proof-size-hostfunction", "cumulus-relay-chain-inprocess-interface", @@ -5413,6 +5426,7 @@ dependencies = [ name = "cumulus-test-client" version = "0.1.0" dependencies = [ + "cumulus-pallet-parachain-system", "cumulus-pallet-weight-reclaim", "cumulus-primitives-core", "cumulus-primitives-parachain-inherent", @@ -5437,6 +5451,7 @@ dependencies = [ "sp-blockchain 28.0.0", "sp-consensus-aura", "sp-core 28.0.0", + "sp-externalities 0.25.0", "sp-inherents 26.0.0", "sp-io 30.0.0", "sp-keyring", @@ -5473,6 +5488,7 @@ dependencies = [ "cumulus-pallet-weight-reclaim", "cumulus-primitives-aura", "cumulus-primitives-core", + "cumulus-primitives-storage-weight-reclaim", "frame-executive", "frame-support", "frame-system", @@ -5486,6 +5502,7 @@ dependencies = [ "pallet-sudo", "pallet-timestamp", "pallet-transaction-payment", + "pallet-utility", "parity-scale-codec", "polkadot-primitives", "scale-info", @@ -5505,6 +5522,7 @@ dependencies = [ "sp-version 29.0.0", "staging-parachain-info", "substrate-wasm-builder", + "tracing", ] [[package]] @@ -5604,14 +5622,19 @@ name = "cumulus-zombienet-sdk-tests" version = "0.1.0" dependencies = [ "anyhow", + "cumulus-primitives-core", "cumulus-test-runtime", "cumulus-zombienet-sdk-helpers", "env_logger 0.11.3", + "frame-support", "futures", "log", "parity-scale-codec", + "parity-wasm", "polkadot-primitives", "rstest", + "sc-executor 0.32.0", + "sc-executor-common 0.29.0", "sc-statement-store", "scale-info", "serde", @@ -5620,8 +5643,11 @@ dependencies = [ "sp-consensus-slots", "sp-core 28.0.0", "sp-keyring", + "sp-maybe-compressed-blob 11.0.0", + "sp-rpc", "sp-runtime 31.0.1", "sp-statement-store", + "sp-version 29.0.0", "subxt 0.50.0", "subxt-signer 0.50.0", "tokio", @@ -17111,6 +17137,7 @@ dependencies = [ "cumulus-client-network", "cumulus-client-parachain-inherent", "cumulus-client-pov-recovery", + "cumulus-client-proof-size-recording", "cumulus-client-service", "cumulus-pallet-aura-ext", "cumulus-pallet-dmp-queue", @@ -24253,6 +24280,7 @@ dependencies = [ name = "sp-block-builder" version = "26.0.0" dependencies = [ + "parity-scale-codec", "sp-api 26.0.0", "sp-inherents 26.0.0", "sp-runtime 31.0.1", diff --git a/Cargo.toml b/Cargo.toml index 4d1ff120a6e60..c399f0f961ff6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -70,6 +70,7 @@ members = [ "cumulus/client/network", "cumulus/client/parachain-inherent", "cumulus/client/pov-recovery", + "cumulus/client/proof-size-recording", "cumulus/client/relay-chain-inprocess-interface", "cumulus/client/relay-chain-interface", "cumulus/client/relay-chain-minimal-node", @@ -765,6 +766,7 @@ cumulus-client-consensus-relay-chain = { path = "cumulus/client/consensus/relay- cumulus-client-network = { path = "cumulus/client/network", default-features = false } cumulus-client-parachain-inherent = { path = "cumulus/client/parachain-inherent", default-features = false } cumulus-client-pov-recovery = { path = "cumulus/client/pov-recovery", default-features = false } +cumulus-client-proof-size-recording = { path = "cumulus/client/proof-size-recording", default-features = false } cumulus-client-service = { path = "cumulus/client/service", default-features = false } cumulus-pallet-aura-ext = { path = "cumulus/pallets/aura-ext", default-features = false } cumulus-pallet-parachain-system = { path = "cumulus/pallets/parachain-system", default-features = false } diff --git a/cumulus/client/collator/src/lib.rs b/cumulus/client/collator/src/lib.rs index 8a07c3a86337c..ca84fb8275ffb 100644 --- a/cumulus/client/collator/src/lib.rs +++ b/cumulus/client/collator/src/lib.rs @@ -16,7 +16,6 @@ // along with Cumulus. If not, see . //! Cumulus Collator implementation for Substrate. - use polkadot_node_primitives::CollationGenerationConfig; use polkadot_node_subsystem::messages::{CollationGenerationMessage, CollatorProtocolMessage}; use polkadot_overseer::Handle as OverseerHandle; diff --git a/cumulus/client/collator/src/service.rs b/cumulus/client/collator/src/service.rs index 831686246aed3..38755a706774a 100644 --- a/cumulus/client/collator/src/service.rs +++ b/cumulus/client/collator/src/service.rs @@ -21,8 +21,9 @@ use cumulus_client_network::WaitToAnnounce; use cumulus_primitives_core::{CollationInfo, CollectCollationInfo, ParachainBlockData}; +use polkadot_primitives::UMP_SEPARATOR; use sc_client_api::BlockBackend; -use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_api::{ApiExt, ProvideRuntimeApi, StorageProof}; use sp_consensus::BlockStatus; use sp_core::traits::SpawnNamed; use sp_runtime::traits::{Block as BlockT, HashingFor, Header as HeaderT, Zero}; @@ -36,7 +37,6 @@ use codec::Encode; use futures::channel::oneshot; use parking_lot::Mutex; use std::sync::Arc; - /// The logging target. const LOG_TARGET: &str = "cumulus-collator"; @@ -59,6 +59,17 @@ pub trait ServiceInterface { candidate: ParachainCandidate, ) -> Option<(Collation, ParachainBlockData)>; + /// Build a multi-block collation. + /// + /// Does the same as [`Self::build_collation`], but includes multiple blocks into one collation. + /// The given `parent_header` should be the header from the parent of the first block. + fn build_multi_block_collation( + &self, + parent_header: &Block::Header, + blocks: Vec, + proof: StorageProof, + ) -> Option<(Collation, ParachainBlockData)>; + /// Inform networking systems that the block should be announced after a signal has /// been received to indicate the block has been seconded by a relay-chain validator. /// @@ -103,6 +114,11 @@ where RA: ProvideRuntimeApi, RA::Api: CollectCollationInfo, { + fn split_at_separator(messages: Vec>) -> (Vec>, Vec>) { + let mut parts = messages.splitn(2, |m: &Vec| m.is_empty()); + (parts.next().unwrap_or(&[]).to_vec(), parts.next().unwrap_or(&[]).to_vec()) + } + /// Create a new instance. pub fn new( block_status: Arc, @@ -215,53 +231,80 @@ where /// as it fetches underlying runtime API data. /// /// This also returns the unencoded parachain block data, in case that is desired. - pub fn build_collation( + fn build_multi_block_collation( &self, parent_header: &Block::Header, - block_hash: Block::Hash, - candidate: ParachainCandidate, + blocks: Vec, + proof: StorageProof, ) -> Option<(Collation, ParachainBlockData)> { - let block = candidate.block; + let compact_proof = + match proof.into_compact_proof::>(*parent_header.state_root()) { + Ok(proof) => proof, + Err(e) => { + tracing::error!(target: "cumulus-collator", "Failed to compact proof: {:?}", e); + return None; + }, + }; - let compact_proof = match candidate - .proof - .into_compact_proof::>(*parent_header.state_root()) - { - Ok(proof) => proof, - Err(e) => { - tracing::error!(target: "cumulus-collator", "Failed to compact proof: {:?}", e); - return None; - }, - }; + let mut api_version = 0; + let mut upward_messages = Vec::new(); + let mut upward_message_signals = Vec::>::with_capacity(4); + let mut horizontal_messages = Vec::new(); + let mut new_validation_code = None; + let mut processed_downward_messages = 0; + let mut hrmp_watermark = None; + let mut head_data = None; + + for block in &blocks { + // Create the parachain block data for the validators. + let (collation_info, _api_version) = self + .fetch_collation_info(block.hash(), block.header()) + .map_err(|e| { + tracing::error!( + target: LOG_TARGET, + error = ?e, + "Failed to collect collation info.", + ) + }) + .ok() + .flatten()?; + + // We are always using the `api_version` of the parent block. The `api_version` can only + // change with a runtime upgrade and this is when we want to observe the old + // `api_version`. Because this old `api_version` is the one used to validate this + // block. Otherwise, we already assume the `api_version` is higher than what the relay + // chain will use and this will lead to validation errors. + api_version = self + .runtime_api + .runtime_api() + .api_version::>(parent_header.hash()) + .ok() + .flatten()?; + + let (messages, signals) = Self::split_at_separator(collation_info.upward_messages); + + upward_messages.extend(messages); + upward_message_signals.extend(signals); + horizontal_messages.extend(collation_info.horizontal_messages); + + if let Some(new_code) = collation_info.new_validation_code { + if new_validation_code.replace(new_code).is_some() { + tracing::warn!( + target: LOG_TARGET, + block = ?block.hash(), + "Overwriting validation code from an earlier block in the bundle.", + ); + } + } + processed_downward_messages += collation_info.processed_downward_messages; + hrmp_watermark = Some(collation_info.hrmp_watermark); + head_data = Some(collation_info.head_data); + } - // Create the parachain block data for the validators. - let (collation_info, _api_version) = self - .fetch_collation_info(block_hash, block.header()) - .map_err(|e| { - tracing::error!( - target: LOG_TARGET, - error = ?e, - "Failed to collect collation info.", - ) - }) - .ok() - .flatten()?; - - // Workaround for: https://github.com/paritytech/polkadot-sdk/issues/64 - // - // We are always using the `api_version` of the parent block. The `api_version` can only - // change with a runtime upgrade and this is when we want to observe the old `api_version`. - // Because this old `api_version` is the one used to validate this block. Otherwise we - // already assume the `api_version` is higher than what the relay chain will use and this - // will lead to validation errors. - let api_version = self - .runtime_api - .runtime_api() - .api_version::>(parent_header.hash()) - .ok() - .flatten()?; - - let block_data = ParachainBlockData::::new(vec![block], compact_proof); + // Sort by recipient as required by the relay chain rules. + horizontal_messages.sort_by(|a, b| a.recipient.cmp(&b.recipient)); + + let block_data = ParachainBlockData::::new(blocks, compact_proof); let pov = polkadot_node_primitives::maybe_compress_pov(PoV { block_data: BlockData(if api_version >= 3 { @@ -280,8 +323,13 @@ where }), }); - let upward_messages = collation_info - .upward_messages + // If we got some signals, push them now. + if !upward_message_signals.is_empty() { + upward_messages.push(UMP_SEPARATOR); + upward_messages.extend(upward_message_signals.into_iter()); + } + + let upward_messages = upward_messages .try_into() .map_err(|e| { tracing::error!( @@ -291,8 +339,7 @@ where ) }) .ok()?; - let horizontal_messages = collation_info - .horizontal_messages + let horizontal_messages = horizontal_messages .try_into() .map_err(|e| { tracing::error!( @@ -305,11 +352,12 @@ where let collation = Collation { upward_messages, - new_validation_code: collation_info.new_validation_code, - processed_downward_messages: collation_info.processed_downward_messages, + new_validation_code, + processed_downward_messages, horizontal_messages, - hrmp_watermark: collation_info.hrmp_watermark, - head_data: collation_info.head_data, + // If these are `None`, there was no block. + hrmp_watermark: hrmp_watermark?, + head_data: head_data?, proof_of_validity: MaybeCompressedPoV::Compressed(pov), }; @@ -342,10 +390,15 @@ where fn build_collation( &self, parent_header: &Block::Header, - block_hash: Block::Hash, + _: Block::Hash, candidate: ParachainCandidate, ) -> Option<(Collation, ParachainBlockData)> { - CollatorService::build_collation(self, parent_header, block_hash, candidate) + CollatorService::build_multi_block_collation( + self, + parent_header, + vec![candidate.block], + candidate.proof, + ) } fn announce_with_barrier( @@ -358,4 +411,13 @@ where fn announce_block(&self, block_hash: Block::Hash, data: Option>) { (self.announce_block)(block_hash, data) } + + fn build_multi_block_collation( + &self, + parent_header: &::Header, + blocks: Vec, + proof: StorageProof, + ) -> Option<(Collation, ParachainBlockData)> { + CollatorService::build_multi_block_collation(self, parent_header, blocks, proof) + } } diff --git a/cumulus/client/consensus/aura/Cargo.toml b/cumulus/client/consensus/aura/Cargo.toml index c64aa5455b323..053577b0aeefc 100644 --- a/cumulus/client/consensus/aura/Cargo.toml +++ b/cumulus/client/consensus/aura/Cargo.toml @@ -49,6 +49,7 @@ sp-trie = { workspace = true, default-features = true } cumulus-client-collator = { workspace = true, default-features = true } cumulus-client-consensus-common = { workspace = true, default-features = true } cumulus-client-parachain-inherent = { workspace = true, default-features = true } +cumulus-client-proof-size-recording = { workspace = true, default-features = true } cumulus-primitives-aura = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true, default-features = true } cumulus-relay-chain-interface = { workspace = true, default-features = true } diff --git a/cumulus/client/consensus/aura/src/collator.rs b/cumulus/client/consensus/aura/src/collator.rs index c5435c2309242..1fbe1f2bb424d 100644 --- a/cumulus/client/consensus/aura/src/collator.rs +++ b/cumulus/client/consensus/aura/src/collator.rs @@ -25,6 +25,7 @@ //! This module also exposes some standalone functions for common operations when building //! aura-based collators. +use crate::collators::RelayParentData; use codec::Codec; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; use cumulus_client_consensus_common::{ @@ -36,24 +37,19 @@ use cumulus_primitives_core::{ RelayProofRequest, }; use cumulus_relay_chain_interface::RelayChainInterface; -use sc_client_api::BackendTransaction; -use sp_consensus::{Environment, ProposeArgs, Proposer}; - +use futures::prelude::*; use polkadot_node_primitives::{Collation, MaybeCompressedPoV}; use polkadot_primitives::{Header as PHeader, Id as ParaId}; -use sp_externalities::Extensions; -use sp_trie::proof_size_extension::ProofSizeExt; - -use crate::collators::RelayParentData; -use futures::prelude::*; +use sc_client_api::BackendTransaction; use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy, StateAction}; use sc_consensus_aura::standalone as aura_internal; use sc_network_types::PeerId; use sp_api::{ApiExt, ProofRecorder, ProvideRuntimeApi, StorageProof}; use sp_application_crypto::AppPublic; -use sp_consensus::BlockOrigin; +use sp_consensus::{BlockOrigin, Environment, ProposeArgs, Proposer}; use sp_consensus_aura::{AuraApi, Slot, SlotDuration}; use sp_core::crypto::Pair; +use sp_externalities::Extensions; use sp_inherents::{CreateInherentDataProviders, InherentData, InherentDataProvider}; use sp_keystore::KeystorePtr; use sp_runtime::{ @@ -62,6 +58,7 @@ use sp_runtime::{ }; use sp_state_machine::StorageChanges; use sp_timestamp::Timestamp; +use sp_trie::proof_size_extension::ProofSizeExt; use std::{error::Error, time::Duration}; /// Parameters for instantiating a [`Collator`]. @@ -103,7 +100,7 @@ pub struct BuildBlockAndImportParams<'a, Block: BlockT, P: Pair> { pub max_pov_size: usize, /// Optional [`ProofRecorder`] to use. /// - /// If not set, a default recorder will be used internally and [`ProofSizeExt`] will be + /// If not set, one will be initialized internally and [`ProofSizeExt`] will be /// registered. pub storage_proof_recorder: Option>, /// Extra extensions to forward to the block production. @@ -168,7 +165,7 @@ where } /// Explicitly creates the inherent data for parachain block authoring and overrides - /// the timestamp inherent data with the one provided, if any. Additionally allows to specify + /// the timestamp inherent data with the one provided, if any. Additionally, allows to specify /// relay parent descendants that can be used to prevent authoring at the tip of the relay /// chain. pub async fn create_inherent_data_with_rp_offset( @@ -245,8 +242,25 @@ where /// Build and import a parachain block using the given parameters. pub async fn build_block_and_import( &mut self, - mut params: BuildBlockAndImportParams<'_, Block, P>, + params: BuildBlockAndImportParams<'_, Block, P>, ) -> Result>, Box> { + let Some((built_block, import_block)) = self.build_block(params).await? else { + return Ok(None); + }; + + self.import_block(import_block).await?; + + Ok(Some(built_block)) + } + + /// Build a parachain block using the given parameters. + pub async fn build_block( + &mut self, + mut params: BuildBlockAndImportParams<'_, Block, P>, + ) -> Result< + Option<(BuiltBlock, BlockImportParams)>, + Box, + > { let mut digest = params.additional_pre_digest; digest.push(params.slot_claim.pre_digest.clone()); @@ -274,7 +288,7 @@ where params .extra_extensions .register(ProofSizeExt::new(storage_proof_recorder.clone())); - } else if proof_size_ext_registered && !recorder_passed { + } else if !recorder_passed { return Err( Box::from("`ProofSizeExt` registered, but no `storage_proof_recorder` provided. This is a bug.") as Box @@ -297,8 +311,6 @@ where .await .map_err(|e| Box::new(e) as Box)?; - let backend_transaction = proposal.storage_changes.transaction.clone(); - let sealed_importable = seal::<_, P>( proposal.block, proposal.storage_changes, @@ -316,14 +328,31 @@ where .clone(), ); - self.block_import - .import_block(sealed_importable) - .map_err(|e| Box::new(e) as Box) - .await?; + let Some(backend_transaction) = sealed_importable + .state_action + .as_storage_changes() + .map(|c| c.transaction.clone()) + else { + tracing::error!(target: crate::LOG_TARGET, "Building a block should return storage changes!"); + + return Ok(None); + }; let proof = storage_proof_recorder.drain_storage_proof(); - Ok(Some(BuiltBlock { block, proof, backend_transaction })) + Ok(Some((BuiltBlock { block, proof, backend_transaction }, sealed_importable))) + } + + /// Import the given `import_block`. + pub async fn import_block( + &mut self, + import_block: BlockImportParams, + ) -> Result<(), Box> { + self.block_import + .import_block(import_block) + .map_err(|e| Box::new(e) as Box) + .await + .map(drop) } /// Propose, seal, import a block and packaging it into a collation. diff --git a/cumulus/client/consensus/aura/src/collators/lookahead.rs b/cumulus/client/consensus/aura/src/collators/lookahead.rs index 23405e93d9b62..e8756cdacc076 100644 --- a/cumulus/client/consensus/aura/src/collators/lookahead.rs +++ b/cumulus/client/consensus/aura/src/collators/lookahead.rs @@ -336,6 +336,7 @@ where params.para_id, &*params.para_backend, ¶ms.relay_client, + |_| true, ) .await { @@ -346,24 +347,7 @@ where let included_header = &parent_search_result.included_header; let para_client = &*params.para_client; let keystore = ¶ms.keystore; - let can_build_upon = |block_hash| { - let (slot_now, relay_slot, timestamp) = get_parachain_slot::<_, _, P::Public>( - para_client, - block_hash, - &relay_parent_header, - params.relay_chain_slot_duration, - )?; - - Some(super::can_build_upon::<_, _, P>( - slot_now, - relay_slot, - timestamp, - block_hash, - included_header.hash(), - para_client, - &keystore, - )) - }; + let included_block_hash = included_header.hash(); // Build in a loop until not allowed. Note that the authorities can change // at any block, so we need to re-claim our slot every time. @@ -397,14 +381,39 @@ where // This needs to change to support elastic scaling, but for continuously // scheduled chains this ensures that the backlog will grow steadily. for n_built in 0..2u32 { - let slot_claim = match can_build_upon(parent_hash) { - Some(fut) => match fut.await { - None => break, - Some(c) => c, - }, - None => break, + let Some((slot_now, relay_slot, timestamp)) = get_parachain_slot::<_, _, P::Public>( + para_client, + parent_hash, + &relay_parent_header, + params.relay_chain_slot_duration, + ) else { + break; + }; + + let Some(slot_claim) = super::claim_slot::<_, _, P>( + slot_now, + timestamp, + parent_hash, + para_client, + &keystore, + ) + .await + else { + break; }; + if !super::can_build_upon::<_, _>( + parent_hash, + included_block_hash, + relay_slot, + slot_now, + para_client, + ) + .await + { + break; + } + tracing::debug!( target: crate::LOG_TARGET, ?relay_parent, diff --git a/cumulus/client/consensus/aura/src/collators/mod.rs b/cumulus/client/consensus/aura/src/collators/mod.rs index 37afe105e1ef3..f5c7fc535aca2 100644 --- a/cumulus/client/consensus/aura/src/collators/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/mod.rs @@ -35,10 +35,12 @@ use polkadot_primitives::{ Hash as RelayHash, Id as ParaId, OccupiedCoreAssumption, ValidationCodeHash, DEFAULT_SCHEDULING_LOOKAHEAD, }; +use sc_client_api::HeaderBackend; use sc_consensus_aura::{standalone as aura_internal, AuraApi}; use sp_api::{ApiExt, ProvideRuntimeApi, RuntimeApiInfo}; use sp_core::Pair; use sp_keystore::KeystorePtr; +use sp_runtime::traits::Header; use sp_timestamp::Timestamp; pub mod basic; @@ -213,20 +215,17 @@ async fn claim_queue_at( } } -// Checks if we own the slot at the given block and whether there -// is space in the unincluded segment. -async fn can_build_upon( +// Checks if we own the slot at the given block. +async fn claim_slot( para_slot: Slot, - relay_slot: Slot, timestamp: Timestamp, parent_hash: Block::Hash, - included_block: Block::Hash, client: &Client, keystore: &KeystorePtr, ) -> Option> where Client: ProvideRuntimeApi, - Client::Api: AuraApi + AuraUnincludedSegmentApi + ApiExt, + Client::Api: AuraApi + ApiExt, P: Pair, P::Public: Codec, P::Signature: Codec, @@ -235,36 +234,58 @@ where runtime_api.set_call_context(sp_core::traits::CallContext::Onchain { import: false }); let authorities = runtime_api.authorities(parent_hash).ok()?; let author_pub = aura_internal::claim_slot::

(para_slot, &authorities, keystore).await?; + Some(SlotClaim::unchecked::

(author_pub, para_slot, timestamp)) +} +// Checks if there is space in the unincluded segment. +async fn can_build_upon( + parent_hash: Block::Hash, + included_block: Block::Hash, + relay_slot: Slot, + para_slot: Slot, + client: &Client, +) -> bool +where + Client: ProvideRuntimeApi, + Client::Api: AuraUnincludedSegmentApi + ApiExt, +{ // This function is typically called when we want to build block N. At that point, the // unincluded segment in the runtime is unaware of the hash of block N-1. If the unincluded // segment in the runtime is full, but block N-1 is the included block, the unincluded segment // should have length 0 and we can build. Since the hash is not available to the runtime // however, we need this extra check here. if parent_hash == included_block { - return Some(SlotClaim::unchecked::

(author_pub, para_slot, timestamp)); + return true; } - let api_version = runtime_api + let runtime_api = client.runtime_api(); + let Some(api_version) = runtime_api .api_version::>(parent_hash) .ok() - .flatten()?; + .flatten() + else { + return false; + }; let slot = if api_version > 1 { relay_slot } else { para_slot }; runtime_api .can_build_upon(parent_hash, included_block, slot) - .ok()? - .then(|| SlotClaim::unchecked::

(author_pub, para_slot, timestamp)) + .ok() + .unwrap_or(false) } /// Use [`cumulus_client_consensus_common::find_parent_for_building`] to find the best parachain /// block to build on. +/// +/// If the best parent does not pass `filter_parent`, walks backwards through ancestors +/// until finding one that does, or reaching the included block. async fn find_parent( relay_parent: RelayHash, para_id: ParaId, para_backend: &impl sc_client_api::Backend, relay_client: &impl RelayChainInterface, + filter_parent: impl Fn(&Block::Header) -> bool, ) -> Option> where Block: BlockT, @@ -278,21 +299,21 @@ where .saturating_sub(1) as usize, }; - match cumulus_client_consensus_common::find_parent_for_building::( + let mut result = match cumulus_client_consensus_common::find_parent_for_building::( parent_search_params, para_backend, relay_client, ) .await { - Ok(Some(result)) => Some(result), + Ok(Some(result)) => result, Ok(None) => { tracing::warn!( target: crate::LOG_TARGET, ?relay_parent, "Could not find parent to build upon.", ); - None + return None; }, Err(e) => { tracing::error!( @@ -301,22 +322,44 @@ where err = ?e, "Could not find parent to build upon" ); - None + return None; }, + }; + + // If the best parent doesn't pass the filter (e.g. it's a middle block in a bundle), + // walk backwards towards the included block until we find one that does. + // This avoids falling all the way back to the included block when there are valid + // last-in-core ancestors closer to the chain tip. + while !filter_parent(&result.best_parent_header) { + let parent_hash = *result.best_parent_header.parent_hash(); + match para_backend.blockchain().header(parent_hash) { + Ok(Some(header)) => { + result.best_parent_header = header; + if parent_hash == result.included_header.hash() { + break; + } + }, + _ => { + result.best_parent_header = result.included_header.clone(); + break; + }, + } } + + Some(result) } #[cfg(test)] mod tests { use super::*; - use crate::collators::{can_build_upon, BackingGroupConnectionHelper}; + use crate::collators::BackingGroupConnectionHelper; use codec::Encode; use cumulus_primitives_aura::Slot; use cumulus_primitives_core::BlockT; use cumulus_relay_chain_interface::PHash; use cumulus_test_client::{ runtime::{Block, Hash}, - Client, DefaultTestClientBuilderExt, InitBlockBuilder, TestClientBuilder, + BuildBlockBuilder, Client, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, }; use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; @@ -326,7 +369,6 @@ mod tests { use sc_consensus::{BlockImport, BlockImportParams, ForkChoiceStrategy}; use sp_consensus::BlockOrigin; use sp_keystore::{Keystore, KeystorePtr}; - use sp_timestamp::Timestamp; use std::sync::{Arc, Mutex}; async fn import_block>( @@ -355,7 +397,11 @@ mod tests { async fn build_and_import_block(client: &Client, included: Hash) -> Block { let sproof = sproof_with_parent_by_hash(client, included); - let block_builder = client.init_block_builder(None, sproof).block_builder; + let block_builder = client + .init_block_builder_builder() + .with_relay_sproof_builder(sproof) + .build() + .block_builder; let block = block_builder.build().unwrap().block; @@ -384,23 +430,22 @@ mod tests { /// we are ensuring on the node side that we are are always able to build on the included block. #[tokio::test] async fn test_can_build_upon() { - let (client, keystore) = set_up_components(6); + sp_tracing::try_init_simple(); + + let (client, _keystore) = set_up_components(6); let genesis_hash = client.chain_info().genesis_hash; let mut last_hash = genesis_hash; // Fill up the unincluded segment tracker in the runtime. - while can_build_upon::<_, _, sp_consensus_aura::sr25519::AuthorityPair>( - Slot::from(u64::MAX), - Slot::from(u64::MAX), - Timestamp::default(), + while can_build_upon::<_, _>( last_hash, genesis_hash, + Slot::from(u64::MAX), + Slot::from(u64::MAX), &*client, - &keystore, ) .await - .is_some() { let block = build_and_import_block(&client, genesis_hash).await; last_hash = block.header().hash(); @@ -408,17 +453,15 @@ mod tests { // Blocks were built with the genesis hash set as included block. // We call `can_build_upon` with the last built block as the included block. - let result = can_build_upon::<_, _, sp_consensus_aura::sr25519::AuthorityPair>( - Slot::from(u64::MAX), - Slot::from(u64::MAX), - Timestamp::default(), + let result = can_build_upon::<_, _>( last_hash, last_hash, + Slot::from(u64::MAX), + Slot::from(u64::MAX), &*client, - &keystore, ) .await; - assert!(result.is_some()); + assert!(result); } /// Helper to create a mock overseer handle and message recorder @@ -656,7 +699,7 @@ mod tests { /// (both top-level and child trie keys) should be included in the relay chain state proof. /// /// Falls back to an empty request if the runtime API call fails or is not implemented. -fn get_relay_proof_request( +pub(crate) fn get_relay_proof_request( client: &Client, parent_hash: Block::Hash, ) -> RelayProofRequest @@ -676,6 +719,7 @@ where } /// Holds a relay parent and its descendants. +#[derive(Clone)] pub struct RelayParentData { /// The relay parent block header relay_parent: RelayHeader, diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs index 248167e6d995c..7f6b4b4142ccc 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_builder_task.rs @@ -15,27 +15,27 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . -use codec::{Codec, Encode}; - use super::CollatorMessage; use crate::{ - collator::{self as collator_util, BuildBlockAndImportParams}, + collator::{self as collator_util, BuildBlockAndImportParams, Collator, SlotClaim}, collators::{ check_validation_code_or_log, slot_based::{ - relay_chain_data_cache::{RelayChainData, RelayChainDataCache}, + relay_chain_data_cache::RelayChainDataCache, slot_timer::{SlotInfo, SlotTimer}, }, - BackingGroupConnectionHelper, RelayParentData, + BackingGroupConnectionHelper, RelayHash, RelayParentData, }, LOG_TARGET, }; +use codec::{Codec, Encode}; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; +use cumulus_client_proof_size_recording::prepare_proof_size_recording_aux_data; use cumulus_primitives_aura::{AuraUnincludedSegmentApi, Slot}; use cumulus_primitives_core::{ - extract_relay_parent, rpsr_digest, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, - KeyToIncludeInRelayProof, PersistedValidationData, RelayParentOffsetApi, + BlockBundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, + PersistedValidationData, RelayParentOffsetApi, TargetBlockRate, }; use cumulus_relay_chain_interface::RelayChainInterface; use futures::prelude::*; @@ -44,19 +44,29 @@ use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf, UsageProvider}; use sc_consensus::BlockImport; use sc_consensus_aura::SlotDuration; use sc_network_types::PeerId; -use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_api::{ApiExt, ProofRecorder, ProvideRuntimeApi, StorageProof}; use sp_application_crypto::AppPublic; +use sp_block_builder::BlockBuilder; use sp_blockchain::HeaderBackend; use sp_consensus::Environment; use sp_consensus_aura::AuraApi; use sp_core::crypto::Pair; +use sp_externalities::Extensions; use sp_inherents::CreateInherentDataProviders; use sp_keystore::KeystorePtr; use sp_runtime::{ - traits::{Block as BlockT, Header as HeaderT, Member, Zero}, + traits::{Block as BlockT, HashingFor, Header as HeaderT, Member}, Saturating, }; -use std::{collections::VecDeque, sync::Arc, time::Duration}; +use sp_trie::{ + proof_size_extension::{ProofSizeExt, RecordingProofSizeProvider}, + recorder::IgnoredNodes, +}; +use std::{ + collections::VecDeque, + sync::Arc, + time::{Duration, Instant}, +}; /// Parameters for [`run_block_builder`]. pub struct BuilderTaskParams< @@ -94,8 +104,6 @@ pub struct BuilderTaskParams< pub proposer: Proposer, /// The generic collator service used to plug into this consensus engine. pub collator_service: CS, - /// The amount of time to spend authoring each block. - pub authoring_duration: Duration, /// Channel to send built blocks to the collation task. pub collator_sender: sc_utils::mpsc::TracingUnboundedSender>, /// Slot duration of the relay chain. @@ -131,7 +139,9 @@ where Client::Api: AuraApi + RelayParentOffsetApi + AuraUnincludedSegmentApi - + KeyToIncludeInRelayProof, + + TargetBlockRate + + BlockBuilder + + cumulus_primitives_core::KeyToIncludeInRelayProof, Backend: sc_client_api::Backend + 'static, RelayClient: RelayChainInterface + Clone + 'static, CIDP: CreateInherentDataProviders + 'static, @@ -139,7 +149,7 @@ where BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, Proposer: Environment + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + 'static, - CHP: consensus_common::ValidationCodeHashProvider + Send + 'static, + CHP: consensus_common::ValidationCodeHashProvider + Send + Sync + 'static, P: Pair + Send + Sync + 'static, P::Public: AppPublic + Member + Codec, P::Signature: TryFrom> + Member + Codec, @@ -158,18 +168,13 @@ where collator_service, collator_sender, code_hash_provider, - authoring_duration, relay_chain_slot_duration, para_backend, slot_offset, max_pov_percentage, } = params; - let mut slot_timer = SlotTimer::<_, _, P>::new_with_offset( - para_client.clone(), - slot_offset, - relay_chain_slot_duration, - ); + let mut slot_timer = SlotTimer::new_with_offset(slot_offset, relay_chain_slot_duration); let mut collator = { let params = collator_util::Params { @@ -210,7 +215,7 @@ where loop { // We wait here until the next slot arrives. - if slot_timer.wait_until_next_slot().await.is_err() { + let Ok(slot_time) = slot_timer.wait_until_next_slot().await else { tracing::error!(target: LOG_TARGET, "Unable to wait for next slot."); return; }; @@ -251,6 +256,7 @@ where continue; }; + // Use the slot calculated from relay parent let Some(para_slot) = adjust_para_to_relay_parent_slot( rp_data.relay_parent(), relay_chain_slot_duration, @@ -262,72 +268,53 @@ where let relay_parent = rp_data.relay_parent().hash(); let relay_parent_header = rp_data.relay_parent().clone(); - let Some(parent_search_result) = - crate::collators::find_parent(relay_parent, para_id, &*para_backend, &relay_client) - .await + let Some(parent_search_result) = crate::collators::find_parent( + relay_parent, + para_id, + &*para_backend, + &relay_client, + |parent| { + // We never want to build on any "middle block" that isn't the last block in a + // core. + // When the digest item doesn't exist, we are running in compatibility + // mode and all parents are valid. + CumulusDigestItem::is_last_block_in_core(parent.digest()).unwrap_or(true) + }, + ) + .await else { continue; }; - let parent_hash = parent_search_result.best_parent_header.hash(); let included_header = parent_search_result.included_header; - let parent_header = &parent_search_result.best_parent_header; - // Distance from included block to best parent (unincluded segment length). + let initial_parent_hash = parent_search_result.best_parent_header.hash(); + let initial_parent_header = parent_search_result.best_parent_header; let unincluded_segment_len = - parent_header.number().saturating_sub(*included_header.number()); - - // Retrieve the core. - let core = match determine_core( - &mut relay_chain_data_cache, - &relay_parent_header, - para_id, - parent_header, - relay_parent_offset, - ) - .await - { - Err(()) => { - tracing::debug!( - target: LOG_TARGET, - ?relay_parent, - "Failed to determine core" - ); + initial_parent_header.number().saturating_sub(*included_header.number()); - continue; - }, - Ok(Some(cores)) => { - tracing::debug!( - target: LOG_TARGET, - ?relay_parent, - core_selector = ?cores.selector, - claim_queue_offset = ?cores.claim_queue_offset, - "Going to claim core", - ); - - cores - }, - Ok(None) => { - tracing::debug!( - target: LOG_TARGET, - ?relay_parent, - "No core scheduled" - ); - - continue; - }, - }; - - let Ok(RelayChainData { max_pov_size, last_claimed_core_selector, .. }) = - relay_chain_data_cache.get_mut_relay_chain_data(relay_parent).await + let Ok(max_pov_size) = relay_chain_data_cache + .get_mut_relay_chain_data(relay_parent) + .await + .map(|d| d.max_pov_size) else { continue; }; - slot_timer.update_scheduling(core.total_cores().into()); + let allowed_pov_size = if let Some(max_pov_percentage) = max_pov_percentage { + max_pov_size * max_pov_percentage / 100 + } else { + // Set the block limit to 85% of the maximum PoV size. + // + // Once https://github.com/paritytech/polkadot-sdk/issues/6020 issue is + // fixed, this should be removed. + max_pov_size * 85 / 100 + } as usize; // We mainly call this to inform users at genesis if there is a mismatch with the // on-chain data. - collator.collator_service().check_block_status(parent_hash, parent_header); + collator + .collator_service() + .check_block_status(initial_parent_hash, &initial_parent_header); let Ok(relay_slot) = sc_consensus_babe::find_pre_digest::(&relay_parent_header) @@ -343,167 +330,518 @@ where let mut runtime_api = para_client.runtime_api(); runtime_api .set_call_context(sp_core::traits::CallContext::Onchain { import: false }); - if let Ok(authorities) = runtime_api.authorities(parent_hash) { + if let Ok(authorities) = runtime_api.authorities(initial_parent_hash) { connection_helper.update::

(para_slot.slot, &authorities).await; } } - let slot_claim = match crate::collators::can_build_upon::<_, _, P>( + let Some(slot_claim) = crate::collators::claim_slot::<_, _, P>( para_slot.slot, - relay_slot, para_slot.timestamp, - parent_hash, - included_header_hash, + initial_parent_hash, &*para_client, &keystore, ) .await - { - Some(slot) => slot, - None => { - tracing::debug!( - target: crate::LOG_TARGET, - ?unincluded_segment_len, - relay_parent = ?relay_parent, - relay_parent_num = %relay_parent_header.number(), - included_hash = ?included_header_hash, - included_num = %included_header.number(), - parent = ?parent_hash, - slot = ?para_slot.slot, - "Not building block." - ); - continue; - }, + else { + tracing::debug!( + target: crate::LOG_TARGET, + ?unincluded_segment_len, + relay_parent = ?relay_parent, + relay_parent_num = %relay_parent_header.number(), + included_hash = ?included_header_hash, + included_num = %included_header.number(), + initial_parent = ?initial_parent_hash, + slot = ?para_slot.slot, + "Not eligible to claim slot." + ); + continue; }; tracing::debug!( target: crate::LOG_TARGET, ?unincluded_segment_len, - relay_parent = %relay_parent, + relay_parent = ?relay_parent, relay_parent_num = %relay_parent_header.number(), relay_parent_offset, - included_hash = %included_header_hash, + included_hash = ?included_header_hash, included_num = %included_header.number(), - parent = %parent_hash, + initial_parent = ?initial_parent_hash, slot = ?para_slot.slot, - "Building block." + "Claiming slot." ); - let validation_data = PersistedValidationData { - parent_head: parent_header.encode().into(), - relay_parent_number: *relay_parent_header.number(), - relay_parent_storage_root: *relay_parent_header.state_root(), - max_pov_size: *max_pov_size, - }; - - let relay_proof_request = - super::super::get_relay_proof_request(&*para_client, parent_hash); - - let (parachain_inherent_data, other_inherent_data) = match collator - .create_inherent_data_with_rp_offset( - relay_parent, - &validation_data, - parent_hash, - slot_claim.timestamp(), - Some(rp_data), - relay_proof_request, - collator_peer_id, - ) - .await + let mut cores = match determine_cores( + &mut relay_chain_data_cache, + &relay_parent_header, + para_id, + relay_parent_offset, + ) + .await { - Err(err) => { - tracing::error!(target: crate::LOG_TARGET, ?err); - break; + Ok(Some(core)) => core, + Ok(None) => { + tracing::debug!( + target: crate::LOG_TARGET, + relay_parent = ?relay_parent, + "No cores scheduled." + ); + continue; }, - Ok(x) => x, - }; + Err(()) => { + tracing::error!( + target: crate::LOG_TARGET, + relay_parent = ?relay_parent, + "Failed to determine cores." + ); - let validation_code_hash = match code_hash_provider.code_hash_at(parent_hash) { - None => { - tracing::error!(target: crate::LOG_TARGET, ?parent_hash, "Could not fetch validation code hash"); break; }, - Some(v) => v, }; - check_validation_code_or_log( - &validation_code_hash, - para_id, - &relay_client, - relay_parent, + let number_of_blocks = + match para_client.runtime_api().target_block_rate(initial_parent_hash) { + Ok(interval) => interval, + Err(error) => { + tracing::debug!( + target: crate::LOG_TARGET, + block = ?initial_parent_hash, + ?error, + "Failed to fetch `slot_schedule`, assuming one block per core" + ); + + // Backwards compatible we use the number of cores as number of blocks. + cores.total_cores() + }, + }; + + // In total we want to have at max `number_of_blocks` cores to use. + cores.truncate_cores(number_of_blocks); + let raw_blocks_per_core = (number_of_blocks / cores.total_cores()).max(1); + let left_over_blocks = number_of_blocks % cores.total_cores(); + let blocks_per_cores = (0..cores.total_cores()) + .map(|i| { + // We distribute the left over blocks across the cores. + raw_blocks_per_core + u32::from(i < left_over_blocks) + }) + .collect::>(); + + tracing::debug!( + target: crate::LOG_TARGET, + ?blocks_per_cores, + core_indices = ?cores.core_indices(), + "Core configuration", + ); + + let mut pov_parent_header = initial_parent_header; + let mut pov_parent_hash = initial_parent_hash; + let block_time = relay_chain_slot_duration / number_of_blocks; + + for blocks_per_core in blocks_per_cores { + let time_for_core = slot_time.time_left() / cores.cores_left(); + + match build_collation_for_core(BuildCollationParams { + pov_parent_header, + pov_parent_hash, + relay_parent_header: &relay_parent_header, + relay_parent_hash: relay_parent, + max_pov_size, + para_id, + relay_client: &relay_client, + code_hash_provider: &code_hash_provider, + slot_claim: &slot_claim, + collator_sender: &collator_sender, + collator: &mut collator, + allowed_pov_size, + core_info: cores.core_info(), + core_index: cores.core_index(), + block_time, + blocks_per_core, + time_for_core, + is_last_core_in_parachain_slot: cores.is_last_core() && + slot_time.is_parachain_slot_ending(para_slot_duration.as_duration()), + collator_peer_id, + relay_parent_data: rp_data.clone(), + total_number_of_blocks: number_of_blocks, + included_header_hash, + relay_slot, + para_slot: para_slot.slot, + para_client: &*para_client, + }) + .await + { + Ok(Some(header)) => { + pov_parent_header = header; + pov_parent_hash = pov_parent_header.hash(); + }, + // Let's wait for the next slot + Ok(None) => break, + Err(()) => return, + } + + if !cores.advance() { + break; + } + } + } + } +} + +/// Parameters for [`build_collation_for_core`]. +struct BuildCollationParams< + 'a, + Block: BlockT, + P: Pair, + RelayClient, + BI, + CIDP, + Proposer, + CS, + CHP, + Client, +> { + pov_parent_header: Block::Header, + pov_parent_hash: Block::Hash, + relay_parent_header: &'a RelayHeader, + relay_parent_hash: RelayHash, + max_pov_size: u32, + para_id: ParaId, + relay_client: &'a RelayClient, + code_hash_provider: &'a CHP, + slot_claim: &'a SlotClaim, + collator_sender: &'a sc_utils::mpsc::TracingUnboundedSender>, + collator: &'a mut Collator, + allowed_pov_size: usize, + core_info: CoreInfo, + core_index: CoreIndex, + block_time: Duration, + blocks_per_core: u32, + /// Time allocated for the core. + time_for_core: Duration, + is_last_core_in_parachain_slot: bool, + collator_peer_id: PeerId, + relay_parent_data: RelayParentData, + total_number_of_blocks: u32, + included_header_hash: Block::Hash, + relay_slot: cumulus_primitives_aura::Slot, + para_slot: cumulus_primitives_aura::Slot, + para_client: &'a Client, +} + +/// Build a collation for one core. +/// +/// One collation can be composed of multiple blocks. +async fn build_collation_for_core< + Block: BlockT, + P, + RelayClient, + BI, + CIDP, + Proposer, + CS, + CHP, + Client, +>( + BuildCollationParams { + pov_parent_header, + pov_parent_hash, + relay_parent_header, + relay_parent_hash, + max_pov_size, + para_id, + relay_client, + code_hash_provider, + slot_claim, + collator_sender, + collator, + allowed_pov_size, + core_info, + core_index, + block_time, + blocks_per_core, + time_for_core: slot_time_for_core, + is_last_core_in_parachain_slot, + collator_peer_id, + relay_parent_data, + total_number_of_blocks, + included_header_hash, + relay_slot, + para_slot, + para_client, + }: BuildCollationParams<'_, Block, P, RelayClient, BI, CIDP, Proposer, CS, CHP, Client>, +) -> Result, ()> +where + RelayClient: RelayChainInterface + 'static, + P: Pair, + P::Public: AppPublic + Member + Codec, + P::Signature: TryFrom> + Member + Codec, + CIDP: CreateInherentDataProviders + 'static, + CIDP::InherentDataProviders: Send, + BI: BlockImport + ParachainBlockImportMarker + Send + Sync + 'static, + Proposer: Environment + Send + Sync + 'static, + CS: CollatorServiceInterface + Send + Sync + 'static, + CHP: consensus_common::ValidationCodeHashProvider + Send + Sync + 'static, + Client: ProvideRuntimeApi, + Client::Api: AuraUnincludedSegmentApi + + ApiExt + + cumulus_primitives_core::KeyToIncludeInRelayProof, +{ + let core_start = Instant::now(); + + let validation_data = PersistedValidationData { + parent_head: pov_parent_header.encode().into(), + relay_parent_number: *relay_parent_header.number(), + relay_parent_storage_root: *relay_parent_header.state_root(), + max_pov_size, + }; + + let Some(validation_code_hash) = code_hash_provider.code_hash_at(pov_parent_hash) else { + tracing::error!( + target: crate::LOG_TARGET, + ?pov_parent_hash, + "Could not fetch validation code hash", + ); + + return Err(()); + }; + + check_validation_code_or_log(&validation_code_hash, para_id, relay_client, relay_parent_hash) + .await; + + let mut blocks = Vec::new(); + let mut proofs = Vec::new(); + let mut ignored_nodes = IgnoredNodes::default(); + + let mut parent_hash = pov_parent_hash; + let mut parent_header = pov_parent_header.clone(); + + for block_index in 0..blocks_per_core { + // Check if we can build the next block + if !crate::collators::can_build_upon::( + parent_hash, + included_header_hash, + relay_slot, + para_slot, + para_client, + ) + .await + { + tracing::debug!( + target: LOG_TARGET, + ?parent_hash, + ?included_header_hash, + "Cannot build next block due to unincluded segment constraints, skipping entire bundle. Will continue at the next slot." + ); + + return Ok(None); + } + + // Create schedule for this block to determine timing decisions + let schedule = BlockProductionSchedule::new( + block_index, + blocks_per_core, + total_number_of_blocks, + is_last_core_in_parachain_slot, + ); + + if schedule.should_skip_production() { + tracing::debug!( + target: LOG_TARGET, + "Skipping block production so that the next node is able to import all blocks before its slot." + ); + break; + } + + tracing::trace!( + target: LOG_TARGET, + %block_index, + core_index = %core_index.0, + "Preparing to build block" + ); + + let relay_proof_request = + crate::collators::get_relay_proof_request::(para_client, parent_hash); + + let (parachain_inherent_data, other_inherent_data) = match collator + .create_inherent_data_with_rp_offset( + relay_parent_hash, + &validation_data, + parent_hash, + slot_claim.timestamp(), + Some(relay_parent_data.clone()), + relay_proof_request, + collator_peer_id, ) - .await; + .await + { + Err(err) => { + tracing::error!(target: crate::LOG_TARGET, ?err, "Failed to create inherent data."); + return Ok(None); + }, + Ok(x) => x, + }; - let allowed_pov_size = if let Some(max_pov_percentage) = max_pov_percentage { - validation_data.max_pov_size * max_pov_percentage / 100 - } else { - // Set the block limit to 85% of the maximum PoV size. - // - // Once https://github.com/paritytech/polkadot-sdk/issues/6020 issue is - // fixed, this should be removed. - validation_data.max_pov_size * 85 / 100 - } as usize; + let storage_proof_recorder = + ProofRecorder::::with_ignored_nodes(ignored_nodes.clone()); + + let proof_size_recorder = RecordingProofSizeProvider::new(storage_proof_recorder.clone()); + + let mut extra_extensions = Extensions::default(); + extra_extensions.register(ProofSizeExt::new(proof_size_recorder.clone())); + + let block_production_start = Instant::now(); + // The time we have left to spent for the block. + let time_left_for_block = slot_time_for_core.saturating_sub(core_start.elapsed()) / + (blocks_per_core - block_index) as u32; + + // The first block on a multi-block core gets the full remaining core time so that the + // runtime's `FullCore` weight mode can actually be utilized. Subsequent blocks are + // capped at `block_time` because they only carry fractional weight. + // + // Single-block cores (blocks_per_core == 1) go through schedule.authoring_duration() + // so that slot handover adjustments (e.g., Shorten) are applied on the last core. + let authoring_duration = if block_index == 0 && blocks_per_core > 1 { + slot_time_for_core.saturating_sub(core_start.elapsed()) + } else { + schedule.authoring_duration(time_left_for_block, block_time) + }; - let adjusted_authoring_duration = - slot_timer.adjust_authoring_duration(authoring_duration); - tracing::debug!(target: crate::LOG_TARGET, duration = ?adjusted_authoring_duration, "Adjusted proposal duration."); + tracing::trace!( + target: LOG_TARGET, + ?authoring_duration, + "Building block" + ); - let Some(adjusted_authoring_duration) = adjusted_authoring_duration else { - tracing::debug!( - target: crate::LOG_TARGET, - ?unincluded_segment_len, - relay_parent = ?relay_parent, - relay_parent_num = %relay_parent_header.number(), - included_hash = ?included_header_hash, - included_num = %included_header.number(), - parent = ?parent_hash, - slot = ?para_slot.slot, - "Not building block due to insufficient authoring duration." - ); + let Ok(Some((built_block, mut import_block))) = collator + .build_block(BuildBlockAndImportParams { + parent_header: &parent_header, + slot_claim, + additional_pre_digest: vec![ + CumulusDigestItem::CoreInfo(core_info.clone()).to_digest_item(), + CumulusDigestItem::BlockBundleInfo(BlockBundleInfo { + index: block_index as u8, + is_last: schedule.block_ends_bundle(), + }) + .to_digest_item(), + ], + parachain_inherent_data, + extra_inherent_data: other_inherent_data, + proposal_duration: authoring_duration, + max_pov_size: allowed_pov_size, + storage_proof_recorder: storage_proof_recorder.into(), + extra_extensions, + }) + .await + else { + tracing::error!(target: crate::LOG_TARGET, "Unable to build block at slot."); + return Ok(None); + }; - continue; - }; + parent_hash = built_block.block.header().hash(); + parent_header = built_block.block.header().clone(); - let Ok(Some(candidate)) = collator - .build_block_and_import(BuildBlockAndImportParams { - parent_header: &parent_header, - slot_claim: &slot_claim, - additional_pre_digest: vec![ - CumulusDigestItem::CoreInfo(core.core_info()).to_digest_item() - ], - parachain_inherent_data, - extra_inherent_data: other_inherent_data, - proposal_duration: adjusted_authoring_duration, - max_pov_size: allowed_pov_size, - storage_proof_recorder: None, - extra_extensions: Default::default(), - }) - .await - else { - tracing::error!(target: crate::LOG_TARGET, "Unable to build block at slot."); - continue; - }; + // Extract and add proof size recordings to the import block + let recorded_sizes = proof_size_recorder + .recorded_estimations() + .into_iter() + .map(|size| size as u32) + .collect::>(); - let new_block_hash = candidate.block.header().hash(); + if !recorded_sizes.is_empty() { + prepare_proof_size_recording_aux_data(parent_hash, recorded_sizes).for_each( + |(k, v)| { + import_block.auxiliary.push((k, Some(v))); + }, + ); + } - // Announce the newly built block to our peers. - collator.collator_service().announce_block(new_block_hash, None); + if let Err(error) = collator.import_block(import_block).await { + tracing::error!(target: crate::LOG_TARGET, ?error, "Failed to import built block."); + return Ok(None); + } - *last_claimed_core_selector = Some(core.core_selector()); + // Announce the newly built block to our peers. + collator.collator_service().announce_block(parent_hash, None); - if let Err(err) = collator_sender.unbounded_send(CollatorMessage { - relay_parent, - parent_header: parent_header.clone(), - parachain_candidate: candidate.into(), - validation_code_hash, - core_index: core.core_index(), - validation_data, - }) { - tracing::error!(target: crate::LOG_TARGET, ?err, "Unable to send block to collation task."); - return; - } + blocks.push(built_block.block); + proofs.push(built_block.proof); + + let full_core_digest = CumulusDigestItem::contains_use_full_core(parent_header.digest()); + let runtime_upgrade_digest = parent_header + .digest() + .logs + .iter() + .any(|it| matches!(it, sp_runtime::DigestItem::RuntimeEnvironmentUpdated)); + + if full_core_digest || runtime_upgrade_digest { + tracing::trace!( + target: crate::LOG_TARGET, + block_hash = ?parent_hash, + time_used_by_block_in_secs = %block_production_start.elapsed().as_secs_f32(), + %full_core_digest, + %runtime_upgrade_digest, + "Stopping block production for core", + ); + break; + } + + ignored_nodes.extend(IgnoredNodes::from_storage_proof::>( + proofs.last().expect("We just pushed the proof into the vector; qed"), + )); + ignored_nodes.extend(IgnoredNodes::from_memory_db(built_block.backend_transaction)); + + // If there is still time left for the block in the slot, we sleep the rest of the time. + // This ensures that we have some steady block rate. + if let Some(sleep) = time_left_for_block + .checked_sub(block_production_start.elapsed()) + // Let's not sleep for the last block here, to send out the collation as early as + // possible. + .filter(|_| !schedule.is_effective_last_block()) + { + tokio::time::sleep(sleep).await; } } + + if blocks.is_empty() { + tracing::debug!( + target: LOG_TARGET, + ?core_index, + relay_parent = ?relay_parent_hash, + "Did not build any blocks, returning" + ); + + return Ok(None); + } + + let proof = StorageProof::merge(proofs); + + tracing::trace!( + target: LOG_TARGET, + ?core_index, + relay_parent = ?relay_parent_hash, + blocks = ?blocks.iter().map(|b| b.hash()).collect::>(), + "Sending out PoV" + ); + + if let Err(err) = collator_sender.unbounded_send(CollatorMessage { + relay_parent: relay_parent_hash, + parent_header: pov_parent_header.clone(), + blocks, + proof, + validation_code_hash, + core_index, + validation_data, + }) { + tracing::error!(target: crate::LOG_TARGET, ?err, "Unable to send block to collation task."); + Err(()) + } else { + // Now let's sleep for the rest of the core. + if let Some(sleep) = slot_time_for_core.checked_sub(core_start.elapsed()) { + tokio::time::sleep(sleep).await; + } + + Ok(Some(parent_header)) + } } /// Translate the slot of the relay parent to the slot of the parachain. @@ -617,7 +955,7 @@ where /// /// The function traverses backwards from the best block until it finds the block at the specified /// offset, collecting all blocks in between to maintain the chain of ancestry. -pub(crate) async fn offset_relay_parent_find_descendants( +pub async fn offset_relay_parent_find_descendants( relay_chain_data_cache: &mut RelayChainDataCache, mut relay_header: RelayHeader, relay_parent_offset: u32, @@ -631,7 +969,12 @@ where } if sc_consensus_babe::contains_epoch_change::(&relay_header) { - tracing::debug!(target: LOG_TARGET, ?relay_best_block, relay_best_block_number = relay_header.number(), "Relay parent is in previous session."); + tracing::debug!( + target: LOG_TARGET, + ?relay_best_block, + relay_best_block_number = relay_header.number(), + "Relay parent is in previous session.", + ); return Ok(None); } @@ -644,7 +987,13 @@ where .relay_parent_header .clone(); if sc_consensus_babe::contains_epoch_change::(&next_header) { - tracing::debug!(target: LOG_TARGET, ?relay_best_block, ancestor = %next_header.hash(), ancestor_block_number = next_header.number(), "Ancestor of best block is in previous session."); + tracing::debug!( + target: LOG_TARGET, + ?relay_best_block, ancestor = %next_header.hash(), + ancestor_block_number = next_header.number(), + "Ancestor of best block is in previous session.", + ); + return Ok(None); } required_ancestors.push_front(next_header.clone()); @@ -668,98 +1017,449 @@ where Ok(Some(RelayParentData::new_with_descendants(relay_parent, required_ancestors.into()))) } -/// Return value of [`determine_core`]. -pub(crate) struct Core { +/// Return value of [`determine_cores`]. +pub struct Cores { selector: CoreSelector, claim_queue_offset: ClaimQueueOffset, - core_index: CoreIndex, - number_of_cores: u16, + core_indices: Vec, } -impl Core { +impl Cores { /// Returns the current [`CoreInfo`]. - fn core_info(&self) -> CoreInfo { + pub fn core_info(&self) -> CoreInfo { CoreInfo { selector: self.selector, claim_queue_offset: self.claim_queue_offset, - number_of_cores: self.number_of_cores.into(), + number_of_cores: (self.core_indices.len() as u16).into(), } } - /// Returns the current [`CoreSelector`]. - pub(crate) fn core_selector(&self) -> CoreSelector { - self.selector + /// Returns the core indices. + fn core_indices(&self) -> &[CoreIndex] { + &self.core_indices } /// Returns the current [`CoreIndex`]. - pub(crate) fn core_index(&self) -> CoreIndex { - self.core_index + pub fn core_index(&self) -> CoreIndex { + self.core_indices[self.selector.0 as usize] + } + + /// Advance to the next available core. + /// + /// Returns `false` if there is no core left. + fn advance(&mut self) -> bool { + if self.selector.0 as usize + 1 < self.core_indices.len() { + self.selector.0 += 1; + true + } else { + false + } } /// Returns the total number of cores. - pub(crate) fn total_cores(&self) -> u16 { - self.number_of_cores + pub fn total_cores(&self) -> u32 { + self.core_indices.len() as u32 + } + + /// Truncate `cores` to `max_cores`. + pub fn truncate_cores(&mut self, max_cores: u32) { + self.core_indices.truncate(max_cores as usize); + } + + /// Returns the number of cores left. + fn cores_left(&self) -> u32 { + self.total_cores() - self.selector.0 as u32 + } + + /// Returns if the current core is the last core. + fn is_last_core(&self) -> bool { + self.cores_left() == 1 } } -/// Determine the core for the given `para_id`. -pub(crate) async fn determine_core( +/// Slot handover adjustment strategy based on total block rate. +/// +/// These adjustments exist because without transaction streaming, the next author +/// must sequentially import all blocks before building their own. Each variant +/// uses a different strategy to provide import buffer time. +// TODO: Once transaction streaming is implemented, this can be removed. +#[derive(Debug, Clone, Copy)] +enum SlotHandoverAdjustment { + /// 0-1 blocks per slot - no adjustment needed. + /// The next author has plenty of time to import. + None, + + /// 2-3 blocks per slot (~2-3s block time) - shorten authoring time. + Shorten { + /// Time adjustment factor of last block authoring time. + time_factor: f32, + }, + + /// >3 blocks per slot (<2s block time) - skip last block. + /// + /// Block time is too fast for time reduction alone, so we skip + /// producing the last block in each parachain slot entirely. + Skip, +} + +impl SlotHandoverAdjustment { + /// Determine the appropriate adjustment based on total blocks per relay slot and blocks per + /// core. + fn from_total_blocks(total_blocks: u32, blocks_per_core: u32) -> Self { + match total_blocks { + 0..=1 => Self::None, + 2..=3 if blocks_per_core == 1 || blocks_per_core == total_blocks => { + Self::Shorten { time_factor: 0.5 } + }, + _ => Self::Skip, + } + } + + /// Whether this adjustment skips the last block (vs adjusting time). + fn skips_last_block(&self) -> bool { + matches!(self, Self::Skip) + } +} + +/// Policy object that determines block production timing decisions. +/// +/// Encapsulates the complex timing logic for block production, making decisions +/// about when to skip blocks, how long to spend authoring, and when to sleep. +#[derive(Debug, Clone, Copy)] +struct BlockProductionSchedule { + mode: SlotHandoverAdjustment, + block_index: u32, + blocks_per_core: u32, + is_last_core_in_parachain_slot: bool, +} + +impl BlockProductionSchedule { + fn new( + block_index: u32, + blocks_per_core: u32, + total_blocks: u32, + is_last_core_in_parachain_slot: bool, + ) -> Self { + Self { + mode: SlotHandoverAdjustment::from_total_blocks(total_blocks, blocks_per_core), + block_index, + blocks_per_core, + is_last_core_in_parachain_slot, + } + } + + /// Whether this is the actual last block index in the core. + fn is_last_block_in_core(&self) -> bool { + self.block_index + 1 == self.blocks_per_core + } + + /// Whether this is the second-to-last block index. + fn is_second_to_last(&self) -> bool { + self.block_index + 2 == self.blocks_per_core + } + + /// Whether to skip producing this block entirely. + /// + /// In Bundling mode, we skip the last block in the parachain slot + /// to give the next author time to import all previous blocks. + fn should_skip_production(&self) -> bool { + self.mode.skips_last_block() && + self.is_last_block_in_core() && + self.is_last_core_in_parachain_slot + } + + /// Whether this is effectively the last block we'll produce for this core. + /// + /// Used for `BundleInfo { is_last }` - validators need to know which + /// block might be final. Also used for sleep decisions - we don't sleep + /// after the last or second-to-last block to speed up the final stretch. + /// + /// The second-to-last block is always included because: + /// 1. In Bundling mode on the last core, we skip the actual last block + /// 2. Even when not skipping, avoiding sleep on the last two blocks speeds things up + fn is_effective_last_block(&self) -> bool { + self.is_last_block_in_core() || self.is_second_to_last() + } + + /// Whether the node stops block production after this block for this bundle. + /// + /// Returns `true` when: + /// - This is the last block in the core, OR + /// - This is the second-to-last and the actual last will be skipped (Skip mode on the last core + /// of the parachain slot). + fn block_ends_bundle(&self) -> bool { + self.is_last_block_in_core() || + (self.is_second_to_last() && + self.mode.skips_last_block() && + self.is_last_core_in_parachain_slot) + } + + /// Compute the authoring duration given available time. + fn authoring_duration(&self, time_left: Duration, block_time: Duration) -> Duration { + let adjusted = match &self.mode { + SlotHandoverAdjustment::Shorten { time_factor } + if self.is_last_core_in_parachain_slot => + { + time_left.mul_f32(*time_factor) + }, + _ => time_left, + }; + + block_time.min(adjusted) + } +} + +/// Determine the cores for the given `para_id`. +/// +/// Takes into account the `parent` core to find the next available cores. +pub async fn determine_cores( relay_chain_data_cache: &mut RelayChainDataCache, relay_parent: &RelayHeader, para_id: ParaId, - para_parent: &H, relay_parent_offset: u32, -) -> Result, ()> { - let cores_at_offset = &relay_chain_data_cache +) -> Result, ()> { + let claim_queue = &relay_chain_data_cache .get_mut_relay_chain_data(relay_parent.hash()) .await? - .claim_queue - .iter_claims_at_depth_for_para(relay_parent_offset as usize, para_id) + .claim_queue; + + let core_indices = claim_queue + .iter_claims_at_depth_for_para(relay_parent_offset as _, para_id) .collect::>(); - let is_new_relay_parent = if para_parent.number().is_zero() { - true + Ok(if core_indices.is_empty() { + None } else { - match extract_relay_parent(para_parent.digest()) { - Some(last_relay_parent) => last_relay_parent != relay_parent.hash(), - None => { - rpsr_digest::extract_relay_parent_storage_root(para_parent.digest()) - .ok_or(())? - .0 != *relay_parent.state_root() - }, + Some(Cores { + selector: CoreSelector(0), + claim_queue_offset: ClaimQueueOffset(relay_parent_offset as u8), + core_indices, + }) + }) +} + +#[cfg(test)] +mod block_production_schedule_tests { + use super::*; + + mod mode_tests { + use super::*; + + #[test] + fn mode_selection_from_total_blocks() { + // 0-1 blocks = None + assert!(matches!( + SlotHandoverAdjustment::from_total_blocks(0, 1), + SlotHandoverAdjustment::None + )); + assert!(matches!( + SlotHandoverAdjustment::from_total_blocks(1, 1), + SlotHandoverAdjustment::None + )); + + // 2-3 blocks = Shorten with half time + assert!(matches!( + SlotHandoverAdjustment::from_total_blocks(2, 1), + SlotHandoverAdjustment::Shorten { time_factor: 0.5 } + )); + assert!(matches!( + SlotHandoverAdjustment::from_total_blocks(3, 1), + SlotHandoverAdjustment::Shorten { time_factor: 0.5 } + )); + + assert!(matches!( + SlotHandoverAdjustment::from_total_blocks(3, 2), + SlotHandoverAdjustment::Skip + )); + + // >3 blocks = Skip + assert!(matches!( + SlotHandoverAdjustment::from_total_blocks(4, 2), + SlotHandoverAdjustment::Skip + )); + assert!(matches!( + SlotHandoverAdjustment::from_total_blocks(12, 4), + SlotHandoverAdjustment::Skip + )); } - }; - let core_info = CumulusDigestItem::find_core_info(para_parent.digest()); + #[test] + fn mode_behavior_flags() { + assert!(!SlotHandoverAdjustment::None.skips_last_block()); - // If we are using a new relay parent, we can start over from the start. - let (selector, core_index) = if is_new_relay_parent { - let Some(core_index) = cores_at_offset.get(0) else { return Ok(None) }; + let shorten = SlotHandoverAdjustment::Shorten { time_factor: 0.5 }; + assert!(!shorten.skips_last_block()); - (0, *core_index) - } else if let Some(core_info) = core_info { - let selector = core_info.selector.0 as usize + 1; - let Some(core_index) = cores_at_offset.get(selector) else { return Ok(None) }; + assert!(SlotHandoverAdjustment::Skip.skips_last_block()); + } + } - (selector, *core_index) - } else { - let last_claimed_core_selector = relay_chain_data_cache - .get_mut_relay_chain_data(relay_parent.hash()) - .await? - .last_claimed_core_selector; + mod schedule_tests { + use super::*; - let selector = last_claimed_core_selector.map_or(0, |cs| cs.0 as usize) + 1; - let Some(core_index) = cores_at_offset.get(selector) else { return Ok(None) }; + #[test] + fn skip_production_only_in_fast_mode_last_core_last_block() { + // Should skip: Fast mode, last core, last block + assert!(BlockProductionSchedule::new(0, 1, 4, true).should_skip_production()); - (selector, *core_index) - }; + // Should NOT skip: not last core in parachain slot + assert!(!BlockProductionSchedule::new(0, 1, 4, false).should_skip_production()); - Ok(Some(Core { - selector: CoreSelector(selector as u8), - core_index, - claim_queue_offset: ClaimQueueOffset(relay_parent_offset as u8), - number_of_cores: cores_at_offset.len() as u16, - })) + // Should NOT skip: Medium mode (uses time adjustment instead) + assert!(!BlockProductionSchedule::new(0, 1, 3, true).should_skip_production()); + + // Should NOT skip: not last block in core + assert!(!BlockProductionSchedule::new(0, 2, 4, true).should_skip_production()); + + // Should skip: Fast mode, last core, last block + assert!(BlockProductionSchedule::new(3, 4, 12, true).should_skip_production()); + // Should skip: Fast mode, last core, second to last block + assert!(!BlockProductionSchedule::new(2, 4, 12, true).should_skip_production()); + + // Should NOT skip: Fast mode, not last core, last block + assert!(!BlockProductionSchedule::new(3, 4, 12, false).should_skip_production()); + assert!(!BlockProductionSchedule::new(2, 4, 12, false).should_skip_production()); + } + + #[test] + fn effective_last_block_includes_second_to_last() { + // block_index 2 is second-to-last (2+2 == 4), always effective last + let schedule = BlockProductionSchedule::new(2, 4, 12, true); + assert!(schedule.is_effective_last_block()); + assert!(!schedule.is_last_block_in_core()); + assert!(schedule.is_second_to_last()); + + // Same config but not last core - second-to-last is STILL effective last + // (original logic doesn't gate on is_last_core_in_parachain_slot) + let schedule = BlockProductionSchedule::new(2, 4, 12, false); + assert!(schedule.is_effective_last_block()); + + let schedule = BlockProductionSchedule::new(3, 4, 12, false); + assert!(schedule.is_effective_last_block()); + + // First block is not effective last + let schedule = BlockProductionSchedule::new(0, 4, 12, true); + assert!(!schedule.is_effective_last_block()); + + // With only 1 block per core, there's no second-to-last + let schedule = BlockProductionSchedule::new(0, 1, 3, true); + assert!(schedule.is_effective_last_block()); // actual last + assert!(!schedule.is_second_to_last()); + } + + #[test] + fn authoring_duration_halved_in_medium_mode() { + let time_left = Duration::from_millis(2000); + let block_time = Duration::from_millis(3000); + + // Medium mode, last block, 1 block per core -> halved + let schedule = BlockProductionSchedule::new(0, 1, 2, true); + assert_eq!( + schedule.authoring_duration(time_left, block_time), + Duration::from_millis(1000) // halved, capped by time_left/2 + ); + + // Medium mode but NOT last block -> full time + let schedule = BlockProductionSchedule::new(0, 2, 2, true); + assert_eq!( + schedule.authoring_duration(time_left, block_time), + Duration::from_millis(1000) // halved + ); + + // Fast mode -> no time adjustment (uses skip instead) + let schedule = BlockProductionSchedule::new(0, 1, 4, true); + assert_eq!( + schedule.authoring_duration(time_left, block_time), + Duration::from_millis(2000) + ); + } + + #[test] + fn block_ends_bundle_only_on_true_last_block() { + // 6 blocks per core, Skip mode, last core: + // only the actual last (index 5) and second-to-last (index 4, because last + // will be skipped) should return true. + assert!(!BlockProductionSchedule::new(0, 6, 12, true).block_ends_bundle()); + assert!(!BlockProductionSchedule::new(3, 6, 12, true).block_ends_bundle()); + assert!(BlockProductionSchedule::new(4, 6, 12, true).block_ends_bundle()); + assert!(BlockProductionSchedule::new(5, 6, 12, true).block_ends_bundle()); + + // Same config but NOT last core: second-to-last must NOT end the bundle + // (skip only applies on last core). + assert!(!BlockProductionSchedule::new(4, 6, 12, false).block_ends_bundle()); + assert!(BlockProductionSchedule::new(5, 6, 12, false).block_ends_bundle()); + + // Shorten mode (2 blocks, 1 per core, last core): no skipping, so only the + // actual last block ends the bundle. + assert!(BlockProductionSchedule::new(0, 1, 2, true).block_ends_bundle()); + + // None mode (1 block total): trivially the last. + assert!(BlockProductionSchedule::new(0, 1, 1, true).block_ends_bundle()); + assert!(BlockProductionSchedule::new(0, 1, 1, false).block_ends_bundle()); + + // 2 blocks on 1 core (Shorten mode): only index 1 ends the bundle. + assert!(!BlockProductionSchedule::new(0, 2, 2, true).block_ends_bundle()); + assert!(BlockProductionSchedule::new(1, 2, 2, true).block_ends_bundle()); + } + + /// This test verifies that the new schedule logic matches the original inline logic + /// for various block/core configurations. + #[test] + fn schedule_matches_original_logic() { + // Test various configurations to ensure schedule matches original behavior + let test_cases = [ + // (block_index, blocks_per_core, total_blocks, is_last_core) + (0, 1, 1, false), // Normal: 1 block, not last core + (0, 1, 1, true), // Normal: 1 block, last core + (0, 1, 2, true), // Medium: 2 blocks, last core + (0, 1, 3, true), // Medium: 3 blocks, last core + (0, 1, 4, true), // Fast: 4 blocks, last core (should skip) + (0, 1, 4, false), // Fast: 4 blocks, not last core + (0, 2, 6, true), // Fast: 6 blocks, 2 per core, block 0 + (1, 2, 6, true), // Fast: 6 blocks, 2 per core, block 1 (last) + (0, 4, 12, true), // Fast: 12 blocks, 4 per core, block 0 + (2, 4, 12, true), // Fast: 12 blocks, 4 per core, block 2 (second-to-last) + (3, 4, 12, true), // Fast: 12 blocks, 4 per core, block 3 (last, should skip) + ]; + + for (block_index, blocks_per_core, total_blocks, is_last_core) in test_cases { + let schedule = BlockProductionSchedule::new( + block_index, + blocks_per_core, + total_blocks, + is_last_core, + ); + + // Original is_last_block_in_core logic + let original_is_last = block_index + 1 == blocks_per_core || + (block_index + 2 == blocks_per_core && blocks_per_core > 1); + + // Original skip logic + let original_skip = + block_index + 1 == blocks_per_core && total_blocks > 3 && is_last_core; + + assert_eq!( + schedule.is_effective_last_block(), + original_is_last, + "is_effective_last_block mismatch for ({}, {}, {}, {})", + block_index, + blocks_per_core, + total_blocks, + is_last_core + ); + + assert_eq!( + schedule.should_skip_production(), + original_skip, + "should_skip_production mismatch for ({}, {}, {}, {})", + block_index, + blocks_per_core, + total_blocks, + is_last_core + ); + } + } + } } #[cfg(test)] diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs index 29cf59a6fadd7..399beae88f2d8 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/block_import.rs @@ -15,14 +15,59 @@ // You should have received a copy of the GNU General Public License // along with Cumulus. If not, see . +use crate::LOG_TARGET; +use codec::{Decode, Encode}; +use cumulus_client_proof_size_recording::prepare_proof_size_recording_aux_data; +use cumulus_primitives_core::{BlockBundleInfo, CoreInfo, CumulusDigestItem, RelayBlockIdentifier}; use futures::{stream::FusedStream, StreamExt}; +use sc_client_api::{ + backend::AuxStore, + client::{AuxDataOperations, FinalityNotification, PreCommitActions}, + HeaderBackend, +}; use sc_consensus::{BlockImport, StateAction}; use sc_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; -use sp_api::{ApiExt, CallApiAt, CallContext, Core, ProvideRuntimeApi, StorageProof}; -use sp_runtime::traits::{Block as BlockT, Header as _}; -use sp_trie::proof_size_extension::ProofSizeExt; +use sp_api::{ + ApiExt, CallApiAt, CallContext, Core, ProofRecorder, ProofRecorderIgnoredNodes, + ProvideRuntimeApi, StorageProof, +}; +use sp_blockchain::{Error as ClientError, Result as ClientResult}; +use sp_consensus::BlockOrigin; +use sp_runtime::traits::{Block as BlockT, HashingFor, Header as _}; +use sp_trie::proof_size_extension::{ProofSizeExt, RecordingProofSizeProvider}; use std::sync::Arc; +/// The aux storage key used to store the ignored nodes for the given block hash. +fn ignored_nodes_key(block_hash: H) -> Vec { + (b"cumulus_slot_based_nodes_to_ignore", block_hash).encode() +} + +/// Prepare a transaction to write the ignored nodes to the aux storage. +/// +/// Returns the key-value pairs that need to be written to the aux storage. +fn prepare_ignored_nodes_transaction( + block_hash: Block::Hash, + ignored_nodes: ProofRecorderIgnoredNodes, +) -> impl Iterator, Vec)> { + let key = ignored_nodes_key(block_hash); + let encoded_nodes = as Encode>::encode(&ignored_nodes); + + [(key, encoded_nodes)].into_iter() +} + +/// Load the ignored nodes associated with a block. +fn load_ignored_nodes( + backend: &B, + block_hash: Block::Hash, +) -> ClientResult>> { + match backend.get_aux(&ignored_nodes_key(block_hash))? { + None => Ok(None), + Some(t) => ProofRecorderIgnoredNodes::::decode(&mut &t[..]) + .map(Some) + .map_err(|e| ClientError::Backend(format!("Failed to decode ignored nodes: {}", e))), + } +} + /// Handle for receiving the block and the storage proof from the [`SlotBasedBlockImport`]. /// /// This handle should be passed to [`Params`](super::Params) or can also be dropped if the node is @@ -46,27 +91,238 @@ impl SlotBasedBlockImportHandle { } } +/// Register the clean up method for cleaning ignored nodes from blocks on which no further blocks +/// will be imported. +fn register_ignored_nodes_cleanup(client: Arc) +where + C: PreCommitActions + HeaderBackend + 'static, + Block: BlockT, +{ + let client_for_closure = client.clone(); + let on_finality = move |notification: &FinalityNotification| -> AuxDataOperations { + // The old finalized block is the parent of the first block in the tree route, + // or the parent of the finalized block if the tree route is empty. + let old_finalized_hash = notification + .tree_route + .first() + .and_then(|hash| client_for_closure.header(*hash).ok().flatten()) + .map(|h| *h.parent_hash()) + .unwrap_or_else(|| *notification.header.parent_hash()); + + notification + .stale_blocks + .iter() + // Delete the ignored nodes for all stale blocks. + .map(|b| (ignored_nodes_key(b.hash), None)) + // We can not delete the ignored nodes for the finalized block, because blocks can still + // be imported on top of this block. However, once multiple blocks are finalized at + // once, blocks on the route to the finalized parent can no longer become parents + // either. + .chain( + notification + .tree_route + .iter() + .copied() + .map(|hash| (ignored_nodes_key(hash), None)), + ) + .chain(std::iter::once((ignored_nodes_key(old_finalized_hash), None))) + .collect() + }; + + client.register_finality_action(Box::new(on_finality)); +} + /// Special block import for the slot based collator. -pub struct SlotBasedBlockImport { +pub struct SlotBasedBlockImport { inner: BI, client: Arc, sender: TracingUnboundedSender<(Block, StorageProof)>, } -impl SlotBasedBlockImport { +impl SlotBasedBlockImport { /// Create a new instance. /// /// The returned [`SlotBasedBlockImportHandle`] needs to be passed to the /// [`Params`](super::Params), so that this block import instance can communicate with the /// collation task. If the node is not running as a collator, just dropping the handle is fine. - pub fn new(inner: BI, client: Arc) -> (Self, SlotBasedBlockImportHandle) { + pub fn new(inner: BI, client: Arc) -> (Self, SlotBasedBlockImportHandle) + where + Client: PreCommitActions + HeaderBackend + 'static, + { let (sender, receiver) = tracing_unbounded("SlotBasedBlockImportChannel", 1000); + register_ignored_nodes_cleanup(client.clone()); + (Self { sender, client, inner }, SlotBasedBlockImportHandle { receiver }) } + + /// Get the [`ProofRecorderIgnoredNodes`] for `parent`. + /// + /// If `parent` was not part of the same block bundle, the [`ProofRecorderIgnoredNodes`] are not + /// required and `None` will be returned. + fn get_ignored_nodes( + &self, + parent: Block::Hash, + core_info: &CoreInfo, + bundle_info: &BlockBundleInfo, + relay_block_identifier: &RelayBlockIdentifier, + ) -> Option> + where + Client: AuxStore + HeaderBackend + Send + Sync, + { + let parent_header = self.client.header(parent).ok().flatten()?; + let parent_core_info = CumulusDigestItem::find_core_info(parent_header.digest())?; + let parent_bundle_info = CumulusDigestItem::find_block_bundle_info(parent_header.digest())?; + let parent_relay_block_identifier = + CumulusDigestItem::find_relay_block_identifier(parent_header.digest())?; + + if parent_relay_block_identifier != *relay_block_identifier { + tracing::trace!(target: LOG_TARGET, ?parent_relay_block_identifier, ?relay_block_identifier, "Relay block identifier doesn't match"); + return None; + } + + if parent_core_info != *core_info { + tracing::trace!(target: LOG_TARGET, ?parent_core_info, ?core_info, "Core info doesn't match"); + return None; + } + + if parent_bundle_info.index.saturating_add(1) != bundle_info.index { + tracing::trace!(target: LOG_TARGET, ?parent_bundle_info, ?bundle_info, "Block is not a child, based on the index"); + return None; + } + + match load_ignored_nodes::(&*self.client, parent) { + Ok(nodes) => nodes, + Err(error) => { + tracing::trace!(target: LOG_TARGET, ?parent, ?error, "Failed to load `IgnoredNodes` from aux store"); + None + }, + } + } + + /// Execute the given block and collect the storage proof. + /// + /// We need to execute the block on this level here, because we are collecting the storage + /// proofs and combining them for blocks on the same core. So, blocks on the same core do not + /// need to include the same trie nodes multiple times and thus, not wasting storage proof size. + /// + /// The proof must be recorded in exactly the same manner as during block building, because the + /// proof size is tracked via `ProofSizeExt` and affects runtime state. Without identical proof + /// recording, the computed state root would differ and block import would fail. + fn execute_block_and_collect_storage_proof( + &self, + params: &mut sc_consensus::BlockImportParams, + ) -> Result<(), sp_consensus::Error> + where + Client: ProvideRuntimeApi + + CallApiAt + + AuxStore + + HeaderBackend + + Send + + Sync, + Client::StateBackend: Send, + Client::Api: Core, + { + let core_info = CumulusDigestItem::find_core_info(params.header.digest()); + let bundle_info = CumulusDigestItem::find_block_bundle_info(params.header.digest()); + let relay_block_identifier = + CumulusDigestItem::find_relay_block_identifier(params.header.digest()); + + let (Some(core_info), Some(bundle_info), Some(relay_block_identifier)) = + (core_info, bundle_info, relay_block_identifier) + else { + tracing::debug!( + target: LOG_TARGET, + number = ?params.header.number(), + "no bundle digests, skipping execute_block_and_collect_storage_proof", + ); + return Ok(()); + }; + + let parent_hash = *params.header.parent_hash(); + + let mut nodes_to_ignore = self + .get_ignored_nodes(parent_hash, &core_info, &bundle_info, &relay_block_identifier) + .unwrap_or_default(); + + let recorder = ProofRecorder::::with_ignored_nodes(nodes_to_ignore.clone()); + let proof_size_recorder = RecordingProofSizeProvider::new(recorder.clone()); + + let mut runtime_api = self.client.runtime_api(); + + // `record_proof_with_recorder` captures trie accesses, while `ProofSizeExt` replays the + // proof-size estimations in the same order they were observed during block building. + runtime_api.set_call_context(CallContext::Onchain { import: true }); + runtime_api.record_proof_with_recorder(recorder.clone()); + runtime_api.register_extension(ProofSizeExt::new(proof_size_recorder.clone())); + + let block = Block::new(params.header.clone(), params.body.clone().unwrap_or_default()); + + tracing::debug!( + target: LOG_TARGET, + ?parent_hash, + number = ?params.header.number(), + ?core_info, + ?bundle_info, + "execute_block_and_collect_storage_proof: calling runtime_api.execute_block", + ); + + runtime_api + .execute_block(parent_hash, block.into()) + .map_err(|e| Box::new(e) as Box<_>)?; + + let storage_proof = + runtime_api.extract_proof().expect("Proof recording was enabled above; qed"); + + let state = self.client.state_at(parent_hash).map_err(|e| Box::new(e) as Box<_>)?; + let gen_storage_changes = runtime_api + .into_storage_changes(&state, parent_hash) + .map_err(sp_consensus::Error::ChainLookup)?; + + if params.header.state_root() != &gen_storage_changes.transaction_storage_root { + return Err(sp_consensus::Error::Other(Box::new( + sp_blockchain::Error::InvalidStateRoot, + ))); + } + + // Extend the ignored nodes with the nodes from the storage proof and the generated + // storage changes. This ensures that subsequent blocks in the same bundle don't + // redundantly include the same trie nodes in their proof. + nodes_to_ignore.extend(ProofRecorderIgnoredNodes::::from_storage_proof::< + HashingFor, + >(&storage_proof)); + nodes_to_ignore.extend(ProofRecorderIgnoredNodes::::from_memory_db( + gen_storage_changes.transaction.clone(), + )); + + let block_hash = params.post_hash(); + prepare_ignored_nodes_transaction::(block_hash, nodes_to_ignore).for_each( + |(k, v)| { + params.auxiliary.push((k, Some(v))); + }, + ); + + // Extract and store proof size recordings + let recorded_sizes = proof_size_recorder + .recorded_estimations() + .into_iter() + .map(|size| size as u32) + .collect::>(); + + if !recorded_sizes.is_empty() { + prepare_proof_size_recording_aux_data(block_hash, recorded_sizes).for_each(|(k, v)| { + params.auxiliary.push((k, Some(v))); + }); + } + + params.state_action = + StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes(gen_storage_changes)); + + Ok(()) + } } -impl Clone for SlotBasedBlockImport { +impl Clone for SlotBasedBlockImport { fn clone(&self) -> Self { Self { inner: self.inner.clone(), client: self.client.clone(), sender: self.sender.clone() } } @@ -78,7 +334,8 @@ where Block: BlockT, BI: BlockImport + Send + Sync, BI::Error: Into, - Client: ProvideRuntimeApi + CallApiAt + Send + Sync, + Client: + ProvideRuntimeApi + CallApiAt + AuxStore + HeaderBackend + Send + Sync, Client::StateBackend: Send, Client::Api: Core, { @@ -95,191 +352,13 @@ where &self, mut params: sc_consensus::BlockImportParams, ) -> Result { - // If the channel exists and it is required to execute the block, we will execute the block - // here. This is done to collect the storage proof and to prevent re-execution, we push - // downwards the state changes. - // - // The following states are ignored: - // - `StateAction::ApplyChanges`: means that the node produced the block itself or the - // block was imported via state sync. - // - `StateAction::Skip`: means that the block should be skipped. This is evident in the - // context of gap-sync with collators running in non-archive modes. The state of the - // parent block has already been discarded and therefore any import would fail. - if !self.sender.is_closed() && - !matches!(params.state_action, StateAction::ApplyChanges(_) | StateAction::Skip) + if !(params.origin == BlockOrigin::Own || + params.with_state() || + params.state_action.skip_execution_checks()) { - let mut runtime_api = self.client.runtime_api(); - - runtime_api.set_call_context(CallContext::Onchain { import: true }); - - runtime_api.record_proof(); - let recorder = runtime_api - .proof_recorder() - .expect("Proof recording is enabled in the line above; qed."); - runtime_api.register_extension(ProofSizeExt::new(recorder)); - - let parent_hash = *params.header.parent_hash(); - - let block = Block::new(params.header.clone(), params.body.clone().unwrap_or_default()); - - runtime_api - .execute_block(parent_hash, block.clone().into()) - .map_err(|e| Box::new(e) as Box<_>)?; - - let storage_proof = - runtime_api.extract_proof().expect("Proof recording was enabled above; qed"); - - let state = self.client.state_at(parent_hash).map_err(|e| Box::new(e) as Box<_>)?; - let gen_storage_changes = runtime_api - .into_storage_changes(&state, parent_hash) - .map_err(sp_consensus::Error::ChainLookup)?; - - if params.header.state_root() != &gen_storage_changes.transaction_storage_root { - return Err(sp_consensus::Error::Other(Box::new( - sp_blockchain::Error::InvalidStateRoot, - ))); - } - - params.state_action = StateAction::ApplyChanges(sc_consensus::StorageChanges::Changes( - gen_storage_changes, - )); - - let _ = self.sender.unbounded_send((block, storage_proof)); + self.execute_block_and_collect_storage_proof(&mut params)?; } self.inner.import_block(params).await.map_err(Into::into) } } - -#[cfg(test)] -mod tests { - use super::*; - use codec::Encode; - use cumulus_test_client::{ - runtime::Block, DefaultTestClientBuilderExt, InitBlockBuilder, TestClientBuilder, - TestClientBuilderExt, - }; - use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; - use polkadot_primitives::HeadData; - use sc_consensus::{BlockImportParams, ImportResult, StateAction}; - use sp_blockchain::HeaderBackend; - use sp_consensus::BlockOrigin; - - fn sproof_with_best_parent(client: &cumulus_test_client::Client) -> RelayStateSproofBuilder { - let best_hash = client.info().best_hash; - let header = client.header(best_hash).ok().flatten().expect("No header for best block"); - let mut builder = RelayStateSproofBuilder::default(); - builder.para_id = cumulus_test_client::runtime::PARACHAIN_ID.into(); - builder.included_para_head = Some(HeadData(header.encode())); - builder - } - - /// Mock inner block import that always succeeds. - #[derive(Clone)] - struct MockBlockImport; - - #[async_trait::async_trait] - impl BlockImport for MockBlockImport { - type Error = sp_consensus::Error; - - async fn check_block( - &self, - _block: sc_consensus::BlockCheckParams, - ) -> Result { - Ok(ImportResult::imported(false)) - } - - async fn import_block( - &self, - _block: BlockImportParams, - ) -> Result { - Ok(ImportResult::imported(true)) - } - } - - /// Regression test for the gap-sync infinite loop issue. - /// - /// When a non-archive collator has a block gap of size 1, gap-sync downloads - /// the block and marks it with `skip_execution: true` (which translates to - /// `StateAction::Skip`). Before the fix, `SlotBasedBlockImport` would attempt - /// to execute such blocks, fail with a consensus error ("State already - /// discarded for parent"), and trigger a chain-sync restart that re-creates - /// the same gap — leading to an infinite retry loop. - /// - /// This test verifies that `StateAction::Skip` blocks are forwarded to the - /// inner block import without attempting runtime execution. - #[tokio::test] - async fn gap_sync_block_with_skip_execution_does_not_attempt_runtime_call() { - sp_tracing::try_init_simple(); - - let client = Arc::new(TestClientBuilder::new().build()); - - // Build a valid block so we have realistic headers/bodies. - let sproof = sproof_with_best_parent(&client); - let block_builder_data = client.init_block_builder(None, sproof); - let block = block_builder_data.block_builder.build().unwrap().block; - - let (slot_based_import, mut handle) = - SlotBasedBlockImport::new(MockBlockImport, client.clone()); - - // Simulate the gap-sync scenario: a block arrives with StateAction::Skip - // because the parent state has been pruned. - let mut params = BlockImportParams::new(BlockOrigin::NetworkInitialSync, block.header); - params.body = Some(block.extrinsics); - params.state_action = StateAction::Skip; - params.import_existing = true; - - // Before the fix, this would fail with a consensus error because - // SlotBasedBlockImport would try to call `runtime_api.execute_block()` - // on the parent hash whose state is no longer available. - // - // After the fix, StateAction::Skip is recognized and the block is - // forwarded directly to the inner import without execution. - let result = slot_based_import.import_block(params).await; - assert!(result.is_ok(), "Gap-sync block with StateAction::Skip must not fail: {result:?}"); - - // The channel must be empty — execution should have been skipped entirely, - // so no (block, proof) was sent. This is the key assertion: without the - // StateAction::Skip guard, execute_block() would run and send a message. - // - // Drop the sender side so the channel closes, then verify no message was queued. - drop(slot_based_import); - assert!( - handle.receiver.next().await.is_none(), - "No block+proof should be sent through the channel for StateAction::Skip" - ); - } - - /// Verify that `StateAction::Execute` still triggers runtime execution. - /// - /// This complements the gap-sync regression test by ensuring we did not - /// accidentally disable execution for normal blocks. - #[tokio::test] - async fn normal_block_with_execute_action_triggers_runtime_execution() { - sp_tracing::try_init_simple(); - - let client = Arc::new(TestClientBuilder::new().build()); - - let sproof = sproof_with_best_parent(&client); - let block_builder_data = client.init_block_builder(None, sproof); - let block = block_builder_data.block_builder.build().unwrap().block; - - let (slot_based_import, mut handle) = - SlotBasedBlockImport::new(MockBlockImport, client.clone()); - - // Normal import with StateAction::Execute should trigger execution - // and send the block + proof through the channel. - let mut params = - BlockImportParams::new(BlockOrigin::NetworkInitialSync, block.header.clone()); - params.body = Some(block.extrinsics.clone()); - params.state_action = StateAction::Execute; - - let result = slot_based_import.import_block(params).await; - assert!(result.is_ok(), "Normal block import should succeed: {result:?}"); - - // The block and proof should have been sent through the channel, - // confirming that execution actually happened. - let (received_block, _proof) = handle.next().await; - assert_eq!(*received_block.header(), block.header); - } -} diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs index c0fa0846de001..a77fb2b89531a 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/collation_task.rs @@ -121,20 +121,19 @@ async fn handle_collation_message collation, None => { - tracing::warn!(target: LOG_TARGET, %hash, ?number, ?core_index, "Unable to build collation."); + tracing::warn!(target: LOG_TARGET, ?core_index, "Unable to build collation."); return; }, }; @@ -165,6 +164,7 @@ async fn handle_collation_message>(), "Compressed PoV size: {}kb", pov.block_data.0.len() as f64 / 1024f64, ); @@ -183,7 +183,12 @@ async fn handle_collation_message>(), + "Submitting collation for core.", + ); overseer_handle .send_msg( diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs index c9ce5404db03f..38eb099e7532d 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/mod.rs @@ -43,7 +43,9 @@ //! //! - Parachain slot duration //! - Number of assigned parachain cores -//! - Parachain runtime configuration +//! - The `target_block_rate` runtime API, which determines how many blocks to produce per relay +//! chain slot. When this API is unavailable, the block builder falls back to one block per core. +//! When the target exceeds the number of cores, multiple blocks are bundled per core. //! //! ## Timing Examples //! @@ -69,23 +71,26 @@ use self::{block_builder_task::run_block_builder, collation_task::run_collation_task}; pub use block_import::{SlotBasedBlockImport, SlotBasedBlockImportHandle}; use codec::Codec; -use consensus_common::ParachainCandidate; use cumulus_client_collator::service::ServiceInterface as CollatorServiceInterface; use cumulus_client_consensus_common::{self as consensus_common, ParachainBlockImportMarker}; +use cumulus_client_proof_size_recording::register_proof_size_recording_cleanup; use cumulus_primitives_aura::AuraUnincludedSegmentApi; -use cumulus_primitives_core::{KeyToIncludeInRelayProof, RelayParentOffsetApi}; +use cumulus_primitives_core::{KeyToIncludeInRelayProof, RelayParentOffsetApi, TargetBlockRate}; use cumulus_relay_chain_interface::RelayChainInterface; use futures::FutureExt; use polkadot_primitives::{ CollatorPair, CoreIndex, Hash as RelayHash, Id as ParaId, PersistedValidationData, ValidationCodeHash, }; -use sc_client_api::{backend::AuxStore, BlockBackend, BlockOf, UsageProvider}; +use sc_client_api::{ + backend::AuxStore, client::PreCommitActions, BlockBackend, BlockOf, UsageProvider, +}; use sc_consensus::BlockImport; use sc_network_types::PeerId; use sc_utils::mpsc::tracing_unbounded; -use sp_api::ProvideRuntimeApi; +use sp_api::{ProvideRuntimeApi, StorageProof}; use sp_application_crypto::AppPublic; +use sp_block_builder::BlockBuilder; use sp_blockchain::HeaderBackend; use sp_consensus::Environment; use sp_consensus_aura::AuraApi; @@ -132,8 +137,6 @@ pub struct Params + BlockBackend + UsageProvider + + PreCommitActions + Send + Sync + 'static, Client::Api: AuraApi + AuraUnincludedSegmentApi + RelayParentOffsetApi + + TargetBlockRate + + BlockBuilder + KeyToIncludeInRelayProof, Backend: sc_client_api::Backend + 'static, RClient: RelayChainInterface + Clone + 'static, @@ -177,7 +183,7 @@ pub fn run + ParachainBlockImportMarker + Send + Sync + 'static, Proposer: Environment + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + Clone + 'static, - CHP: consensus_common::ValidationCodeHashProvider + Send + 'static, + CHP: consensus_common::ValidationCodeHashProvider + Send + Sync + 'static, P: Pair + Send + Sync + 'static, P::Public: AppPublic + Member + Codec, P::Signature: TryFrom> + Member + Codec, @@ -196,7 +202,6 @@ pub fn run { pub relay_parent: RelayHash, /// The header of the parent block. pub parent_header: Block::Header, - /// The parachain block candidate. - pub parachain_candidate: ParachainCandidate, + /// The built blocks. + pub blocks: Vec, + /// The storage proof that was collected while building all the blocks. + pub proof: StorageProof, /// The validation code hash at the parent block. pub validation_code_hash: ValidationCodeHash, /// Core index that this block should be submitted on diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/relay_chain_data_cache.rs b/cumulus/client/consensus/aura/src/collators/slot_based/relay_chain_data_cache.rs index fb22251e4f5c4..17e49479a2648 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/relay_chain_data_cache.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/relay_chain_data_cache.rs @@ -18,7 +18,6 @@ //! Utility for caching [`RelayChainData`] for different relay blocks. use crate::collators::claim_queue_at; -use cumulus_primitives_core::CoreSelector; use cumulus_relay_chain_interface::RelayChainInterface; use polkadot_node_subsystem_util::runtime::ClaimQueueSnapshot; use polkadot_primitives::{ @@ -35,8 +34,6 @@ pub struct RelayChainData { pub claim_queue: ClaimQueueSnapshot, /// Maximum configured PoV size on the relay chain. pub max_pov_size: u32, - /// The last [`CoreSelector`] we used. - pub last_claimed_core_selector: Option, } /// Simple helper to fetch relay chain data and cache it based on the current relay chain best block @@ -107,16 +104,11 @@ where }, }; - Ok(RelayChainData { - relay_parent_header, - claim_queue, - max_pov_size, - last_claimed_core_selector: None, - }) + Ok(RelayChainData { relay_parent_header, claim_queue, max_pov_size }) } #[cfg(test)] - pub(crate) fn insert_test_data(&mut self, relay_parent: RelayHash, data: RelayChainData) { - self.cached_data.insert(relay_parent, data); + pub fn insert_test_data(&mut self, relay_parent_hash: RelayHash, data: RelayChainData) { + self.cached_data.insert(relay_parent_hash, data); } } diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs b/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs index a5357dc8d4ef5..810ab247f5e06 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/slot_timer.rs @@ -16,44 +16,11 @@ // along with Cumulus. If not, see . use crate::LOG_TARGET; -use codec::Codec; use cumulus_primitives_aura::Slot; -use cumulus_primitives_core::BlockT; -use sc_client_api::UsageProvider; use sc_consensus_aura::SlotDuration; -use sp_api::{ApiExt, ProvideRuntimeApi}; -use sp_application_crypto::AppPublic; -use sp_consensus_aura::AuraApi; -use sp_core::Pair; -use sp_runtime::traits::Member; + use sp_timestamp::Timestamp; -use std::{ - cmp::{max, min}, - sync::Arc, - time::Duration, -}; - -/// Lower limits of allowed block production interval. -/// Defensive mechanism, corresponds to 12 cores at 6 second block time. -const BLOCK_PRODUCTION_MINIMUM_INTERVAL_MS: Duration = Duration::from_millis(500); - -/// Theoretically, the block production is capped at `BLOCK_PRODUCTION_MINIMUM_INTERVAL_MS`. -/// In practice, there might be slight deviations due to timing inaccuracies and delays. -/// -/// This constant is taken into account while adjusting the authoring duration to fit into the slot. -/// Therefore, it will only reduce the authoring duration if we are within the -/// `BLOCK_PRODUCTION_ADJUSTMENT_MS` threshold of the next slot. -/// -/// ### 12 cores 500ms blocks -/// -/// For example, for 12 cores 500ms blocks: the next slot is scheduled in 490ms due to delays. -/// In that case, we still want to attempt producing the block, as missing the slot would be worse -/// than producing slightly too fast. -const BLOCK_PRODUCTION_THRESHOLD_MS: Duration = Duration::from_millis(100); - -/// The amount of time the authoring duration of the last block production attempt -/// should be reduced by to fit into the slot timing. -const BLOCK_PRODUCTION_ADJUSTMENT_MS: Duration = Duration::from_millis(1000); +use std::time::Duration; #[derive(Debug)] pub(crate) struct SlotInfo { @@ -61,82 +28,70 @@ pub(crate) struct SlotInfo { pub slot: Slot, } -/// Manages block-production timings based on chain parameters and assigned cores. -#[derive(Debug)] -pub(crate) struct SlotTimer { - /// Parachain client that is used for runtime calls - client: Arc, - /// Offset the current time by this duration. - time_offset: Duration, - /// Last reported core count. - last_reported_core_num: Option, - /// Slot duration of the relay chain. This is used to compute how man block-production - /// attempts we should trigger per relay chain block. +/// Information about a slot timing, including the relay chain slot duration and exact start +/// timestamp. +#[derive(Debug, Clone)] +pub(crate) struct SlotTime { + /// The relay chain slot duration used for this timing relay_slot_duration: Duration, - /// Stores the latest slot that was reported by [`Self::wait_until_next_slot`]. - last_reported_slot: Option, - _marker: std::marker::PhantomData<(Block, Box)>, + /// The exact timestamp when this relay chain slot started + slot_start_timestamp: Timestamp, + /// Time offset to apply when calculating time remaining + time_offset: Duration, } -/// Compute when to try block-authoring next. -/// The exact time point is determined by the slot duration of relay- and parachain as -/// well as the last observed core count. If more cores are available, we attempt to author blocks -/// for them. -/// -/// Returns a tuple with: -/// - `Duration`: How long to wait until the next slot. -/// - `Slot`: The AURA slot used for authoring -fn compute_next_wake_up_time( - para_slot_duration: SlotDuration, - relay_slot_duration: Duration, - core_count: Option, - time_now: Duration, - time_offset: Duration, -) -> (Duration, Slot) { - let para_slots_per_relay_block = - (relay_slot_duration.as_millis() / para_slot_duration.as_millis() as u128) as u32; - let assigned_core_num = core_count.unwrap_or(1); - - // Trigger at least once per relay block, if we have for example 12 second slot duration, - // we should still produce two blocks if we are scheduled on every relay block. - let mut block_production_interval = min(para_slot_duration.as_duration(), relay_slot_duration); - - if assigned_core_num > para_slots_per_relay_block && - para_slot_duration.as_duration() >= relay_slot_duration - { - block_production_interval = - max(relay_slot_duration / assigned_core_num, BLOCK_PRODUCTION_MINIMUM_INTERVAL_MS); - tracing::debug!( - target: LOG_TARGET, - ?block_production_interval, - "Expected to produce for {assigned_core_num} cores but only have {para_slots_per_relay_block} slots. Attempting to produce multiple blocks per slot." - ); +impl SlotTime { + /// Create a new SlotTime + pub fn new( + relay_slot_duration: Duration, + slot_start_timestamp: Timestamp, + time_offset: Duration, + ) -> Self { + Self { relay_slot_duration, slot_start_timestamp, time_offset } } - let (duration, timestamp) = - time_until_next_attempt(time_now, block_production_interval, time_offset); - let aura_slot = Slot::from_timestamp(timestamp, para_slot_duration); - (duration, aura_slot) -} + /// Get the time remaining in this slot + pub fn time_left(&self) -> Duration { + self.time_left_internal(duration_now()) + } -/// Compute the time until the next slot changes. -/// -/// Returns None if the next slot cannot be computed. -fn compute_time_until_next_slot_change( - para_slot_duration: SlotDuration, - time_now: Duration, - time_offset: Duration, - last_reported_slot: Slot, -) -> Option<(Duration, Slot)> { - let now = time_now.saturating_sub(time_offset); - let next_slot = last_reported_slot + Slot::from(1); + /// Internal implementation of [`Self::time_left`] that takes `now` as parameter. + fn time_left_internal(&self, now: Duration) -> Duration { + let now = now.saturating_sub(self.time_offset); + let slot_end_time_millis = + self.slot_start_timestamp.as_millis() + self.relay_slot_duration.as_millis() as u64; + let slot_end_time = Duration::from_millis(slot_end_time_millis); + + slot_end_time.saturating_sub(now) + } + + /// Check if the next relay chain slot would be in a different parachain slot. + pub fn is_parachain_slot_ending(&self, parachain_slot_duration: Duration) -> bool { + let now = duration_now().saturating_sub(self.time_offset); + let next_relay_slot_start_time = + self.slot_start_timestamp.as_duration() + self.relay_slot_duration; - let Some(next_slot_timestamp) = next_slot.timestamp(para_slot_duration) else { - return None; - }; - let remaining_time = next_slot_timestamp.as_duration().saturating_sub(now); + // Calculate current parachain slot + let current_parachain_slot = now.as_millis() / parachain_slot_duration.as_millis(); - Some((remaining_time, next_slot)) + // Calculate parachain slot for next relay slot + let next_parachain_slot = + next_relay_slot_start_time.as_millis() / parachain_slot_duration.as_millis() as u128; + + current_parachain_slot != next_parachain_slot + } +} + +/// Manages block-production slots based on the relay chain slot duration. +#[derive(Debug)] +pub(crate) struct SlotTimer { + /// Offset the current time by this duration. + time_offset: Duration, + /// Slot duration of the relay chain. This is used to compute when to wake up for + /// block production attempts. + relay_slot_duration: Duration, + /// Stores the latest slot that was reported by [`Self::wait_until_next_slot`]. + last_reported_slot: Option, } /// Returns current duration since Unix epoch. @@ -148,90 +103,13 @@ pub(super) fn duration_now() -> Duration { }) } -/// Adjust the authoring duration. -fn adjust_authoring_duration( - mut authoring_duration: Duration, - next_block: (Duration, Slot), - next_slot_change: (Duration, Slot), - different_authors: bool, -) -> Option { - let (duration, next_block_slot) = next_block; - let (duration_until_next_slot, next_slot) = next_slot_change; - - // The authoring of blocks must stop 1 second before the slot ends. - let duration_until_deadline = - duration_until_next_slot.saturating_sub(BLOCK_PRODUCTION_ADJUSTMENT_MS); - tracing::debug!( - target: LOG_TARGET, - ?authoring_duration, - ?duration, - ?next_block_slot, - ?duration_until_next_slot, - ?next_slot, - ?duration_until_deadline, - ?different_authors, - "Adjusting authoring duration for slot.", - ); - - // Ensure no blocks are produced in the last second of the slot, - // regardless of authoring duration. - if duration_until_deadline == Duration::ZERO { - if different_authors { - tracing::debug!( - target: LOG_TARGET, - ?duration_until_next_slot, - ?next_slot, - "Not enough time left in the slot to adjust authoring duration. Skipping block production for the slot." - ); - - return None; - } - - // If authors are the same, we can still attempt producing the block - // considering the next block duration. - return Some(authoring_duration.min(duration)); - } - - // Clamp the authoring duration to fit into the slot deadline only if authors are different. - // For most cases, the deadline is farther in the future than the authoring duration. - if different_authors && authoring_duration >= duration_until_deadline { - authoring_duration = duration_until_deadline; - - // Ensure we are not going below the minimum interval within a reasonable threshold. - // For 12 cores, we might have a scenario where the last 3 blocks are skipped: - // - Block 10: next slot change in 1.493s: - // - After adjusting the deadline: 1.493s - 1s = 0.493s the block could be produced - // without issues. - // - Block 11: next slot change in 0.993s - skipped by the deadline - // - Block 12: next slot change in 0.493s - skipped by the deadline - if authoring_duration < - BLOCK_PRODUCTION_MINIMUM_INTERVAL_MS.saturating_sub(BLOCK_PRODUCTION_THRESHOLD_MS) - { - tracing::debug!( - target: LOG_TARGET, - ?authoring_duration, - ?next_slot, - "Authoring duration is below minimum. Skipping block production for the slot." - ); - return None; - } - } - - // The `duration` intends to slightly adjust when then block production - // attempt happens. This goes slightly below the `BLOCK_PRODUCTION_MINIMUM_INTERVAL_MS` - // threshold. - Some(authoring_duration.min(duration)) -} - -/// Returns the duration until the next block production should be attempted. -/// Returns: -/// - Duration: The duration until the next attempt. -fn time_until_next_attempt( +/// Returns the duration until the next block production slot and the timestamp at this slot. +fn time_until_next_slot( now: Duration, block_production_interval: Duration, offset: Duration, ) -> (Duration, Timestamp) { - let now = now.as_millis().saturating_sub(offset.as_millis()); + let now = now.saturating_sub(offset).as_millis(); let next_slot_time = ((now + block_production_interval.as_millis()) / block_production_interval.as_millis()) * @@ -240,158 +118,68 @@ fn time_until_next_attempt( (Duration::from_millis(remaining_millis as u64), Timestamp::from(next_slot_time as u64)) } -impl SlotTimer -where - Block: BlockT, - Client: ProvideRuntimeApi + UsageProvider + Send + Sync + 'static, - Client::Api: AuraApi, - P: Pair, - P::Public: AppPublic + Member + Codec, - P::Signature: TryFrom> + Member + Codec, -{ +impl SlotTimer { /// Create a new slot timer. - pub fn new_with_offset( - client: Arc, - time_offset: Duration, - relay_slot_duration: Duration, - ) -> Self { - Self { - client, - time_offset, - last_reported_core_num: None, - relay_slot_duration, - last_reported_slot: Default::default(), - _marker: Default::default(), - } - } - - /// Inform the slot timer about the last seen number of cores. - pub fn update_scheduling(&mut self, num_cores_next_block: u32) { - self.last_reported_core_num = Some(num_cores_next_block); - } - - /// Returns the slot and how much time left until the next block production attempt. - pub fn time_until_next_block(&mut self, slot_duration: SlotDuration) -> (Duration, Slot) { - compute_next_wake_up_time( - slot_duration, - self.relay_slot_duration, - self.last_reported_core_num, - duration_now(), - self.time_offset, - ) - } - - /// Compute the time until the next slot changes. - fn time_until_next_slot_change( - &mut self, - slot_duration: SlotDuration, - ) -> Option<(Duration, Slot)> { - compute_time_until_next_slot_change( - slot_duration, - duration_now(), - self.time_offset, - self.last_reported_slot.unwrap_or_default(), - ) - } - - /// Check if two slots have different authors based on AURA round-robin algorithm. - /// - /// Returns true if the authors for the two slots are different. - fn check_different_slot_authors(&self, slot: Slot, next_slot: Slot) -> bool { - let best_hash = self.client.usage_info().chain.best_hash; - - let mut runtime_api = self.client.runtime_api(); - runtime_api.set_call_context(sp_core::traits::CallContext::Onchain { import: false }); - let Ok(authorities) = runtime_api.authorities(best_hash) else { - // Presume they are different, this will adjust the slot authoring duration more - // conservatively. - return true; - }; - - let authorities_len = authorities.len() as u64; - if authorities_len <= 1 { - return false; - } - - let author1_idx = *slot % authorities_len; - let author2_idx = *next_slot % authorities_len; - - author1_idx != author2_idx - } - - /// Adjust the authoring duration to fit into the slot timing. - /// - /// Returns the adjusted authoring duration and the slot that it corresponds to. - pub fn adjust_authoring_duration(&mut self, authoring_duration: Duration) -> Option { - let Ok(slot_duration) = crate::slot_duration(&*self.client) else { - tracing::error!(target: LOG_TARGET, "Failed to fetch slot duration from runtime."); - return None; - }; - - let next_block = self.time_until_next_block(slot_duration); - let Some(next_slot_change) = self.time_until_next_slot_change(slot_duration) else { - tracing::error!( - target: LOG_TARGET, - "Failed to compute time until next slot change. Using unadjusted authoring duration." - ); - return Some(authoring_duration); - }; - - // Check if authors at current and next slots are different - let current_slot = self.last_reported_slot.unwrap_or(next_block.1); - let different_authors = self.check_different_slot_authors(current_slot, next_slot_change.1); - - adjust_authoring_duration( - authoring_duration, - next_block, - next_slot_change, - different_authors, - ) + pub fn new_with_offset(time_offset: Duration, relay_slot_duration: Duration) -> Self { + Self { time_offset, relay_slot_duration, last_reported_slot: None } } /// Returns a future that resolves when the next block production should be attempted. - pub async fn wait_until_next_slot(&mut self) -> Result<(), ()> { - let slot_duration = match crate::slot_duration(&*self.client) { - Ok(d) => d, - Err(error) => { - tracing::error!(target: LOG_TARGET, %error, "Failed to fetch slot duration from runtime."); - return Err(()); - }, - }; + pub async fn wait_until_next_slot(&mut self) -> Result { + let (time_until_next_attempt, timestamp) = + time_until_next_slot(duration_now(), self.relay_slot_duration, self.time_offset); - let (time_until_next_attempt, mut next_aura_slot) = - self.time_until_next_block(slot_duration); + // Calculate the current slot using the relay chain slot duration + let relay_slot_duration_for_slot = SlotDuration::from(self.relay_slot_duration); + let mut next_slot = Slot::from_timestamp(timestamp, relay_slot_duration_for_slot); - tracing::trace!( - target: LOG_TARGET, - ?time_until_next_attempt, - aura_slot = ?next_aura_slot, - last_reported = ?self.last_reported_slot, - "Determined next block production opportunity." - ); + // Calculate the actual slot start timestamp (may be different if we're catching up) + let mut slot_start_timestamp = timestamp; match self.last_reported_slot { // If we already reported a slot, we don't want to skip a slot. But we also don't want // to go through all the slots if a node was halted for some reason. - Some(ls) if ls + 1 < next_aura_slot && next_aura_slot <= ls + 3 => { - next_aura_slot = ls + 1u64; + Some(ls) if ls + 1 < next_slot && next_slot <= ls + 3 => { + next_slot = ls + 1u64; + // Calculate the timestamp for the adjusted slot + slot_start_timestamp = + next_slot.timestamp(relay_slot_duration_for_slot).ok_or(())?; + // Don't sleep since we're catching up + tracing::debug!( + target: LOG_TARGET, + last_slot = ?ls, + next_slot = ?next_slot, + "Catching up on skipped slot." + ); }, None | Some(_) => { - tracing::trace!(target: LOG_TARGET, ?time_until_next_attempt, "Sleeping until the next slot."); - tokio::time::sleep(time_until_next_attempt).await; + tracing::trace!( + target: LOG_TARGET, + time_to_sleep = ?time_until_next_attempt, + "Feeling sleepy 😴" + ); + + // Wake up slightly before the next slot to avoid noisy "catching up" logs caused by + // scheduler jitter right at the slot boundary. + tokio::time::sleep( + time_until_next_attempt.saturating_sub(Duration::from_millis(2)), + ) + .await; }, } tracing::debug!( target: LOG_TARGET, - ?slot_duration, - aura_slot = ?next_aura_slot, - "New block production opportunity." + relay_slot_duration = ?self.relay_slot_duration, + ?next_slot, + ?slot_start_timestamp, + "New block production slot." ); - self.last_reported_slot = Some(next_aura_slot); + // Update internal slot tracking + self.last_reported_slot = Some(next_slot); - Ok(()) + Ok(SlotTime::new(self.relay_slot_duration, slot_start_timestamp, self.time_offset)) } } @@ -399,250 +187,69 @@ where mod tests { use super::*; use rstest::rstest; - use sc_consensus_aura::SlotDuration; const RELAY_CHAIN_SLOT_DURATION: u64 = 6000; #[rstest] // Test that different now timestamps have correct impact - // |||| - #[case(6000, Some(1), 1000, 0, 5000)] - #[case(6000, Some(1), 0, 0, 6000)] - #[case(6000, Some(1), 6000, 0, 6000)] - #[case(6000, Some(0), 6000, 0, 6000)] - // Test that `None` core defaults to 1 - // |||| - #[case(6000, None, 1000, 0, 5000)] - #[case(6000, None, 0, 0, 6000)] - #[case(6000, None, 6000, 0, 6000)] + #[case(1000, 0, 5000)] + #[case(0, 0, 6000)] + #[case(6000, 0, 6000)] // Test that offset affects the current time correctly - // |||| - #[case(6000, Some(1), 1000, 1000, 6000)] - #[case(6000, Some(1), 12000, 2000, 2000)] - #[case(6000, Some(1), 12000, 6000, 6000)] - #[case(6000, Some(1), 12000, 7000, 1000)] - // Test that number of cores affects the block production interval - // ||||||| - #[case(6000, Some(3), 12000, 0, 2000)] - #[case(6000, Some(2), 12000, 0, 3000)] - #[case(6000, Some(3), 11999, 0, 1)] - // High core count - // |||||||| - #[case(6000, Some(12), 0, 0, 500)] - /// Test that the minimum block interval is respected - /// at high core counts. - /// ||||||||| - #[case(6000, Some(100), 0, 0, 500)] - // Test that slot_duration works correctly - // |||| - #[case(2000, Some(1), 1000, 0, 1000)] - #[case(2000, Some(1), 3000, 0, 1000)] - #[case(2000, Some(1), 10000, 0, 2000)] - #[case(2000, Some(2), 1000, 0, 1000)] - // Cores are ignored if relay_slot_duration != para_slot_duration - // ||||||| - #[case(2000, Some(3), 3000, 0, 1000)] - // For long slot durations, we should still check - // every relay chain block for the slot. - // ||||| - #[case(12000, None, 0, 0, 6000)] - #[case(12000, None, 6100, 0, 5900)] - #[case(12000, None, 6000, 2000, 2000)] - #[case(12000, Some(2), 6000, 0, 3000)] - #[case(12000, Some(3), 6000, 0, 2000)] - #[case(12000, Some(3), 8100, 0, 1900)] + #[case(1000, 1000, 6000)] + #[case(12000, 2000, 2000)] + #[case(12000, 6000, 6000)] + #[case(12000, 7000, 1000)] + // Test basic timing with relay slot duration + #[case(11999, 0, 1)] fn test_get_next_slot( - #[case] para_slot_millis: u64, - #[case] core_count: Option, #[case] time_now: u64, #[case] offset_millis: u64, #[case] expected_wait_duration: u128, ) { - let para_slot_duration = SlotDuration::from_millis(para_slot_millis); // 6 second slots let relay_slot_duration = Duration::from_millis(RELAY_CHAIN_SLOT_DURATION); - let time_now = Duration::from_millis(time_now); // 1 second passed + let time_now = Duration::from_millis(time_now); let offset = Duration::from_millis(offset_millis); - let (wait_duration, _) = compute_next_wake_up_time( - para_slot_duration, - relay_slot_duration, - core_count, - time_now, - offset, - ); + let (wait_duration, _) = time_until_next_slot(time_now, relay_slot_duration, offset); assert_eq!(wait_duration.as_millis(), expected_wait_duration, "Wait time mismatch."); - // Should wait 5 seconds } #[rstest] // Basic slot change scenarios - #[case(6000, 0, 0, Slot::from(0), 6000, Slot::from(1))] - #[case(6000, 1000, 0, Slot::from(0), 5000, Slot::from(1))] - #[case(6000, 6000, 0, Slot::from(1), 6000, Slot::from(2))] - #[case(6000, 12000, 0, Slot::from(2), 6000, Slot::from(3))] + #[case(6000, 0, 0, Slot::from(0), 6000)] + #[case(6000, 1000, 0, Slot::from(0), 5000)] + #[case(6000, 6000, 0, Slot::from(1), 6000)] + #[case(6000, 12000, 0, Slot::from(2), 6000)] // Test with offset - #[case(6000, 1000, 1000, Slot::from(0), 6000, Slot::from(1))] - #[case(6000, 2000, 1000, Slot::from(0), 5000, Slot::from(1))] - #[case(6000, 6000, 3000, Slot::from(0), 3000, Slot::from(1))] + #[case(6000, 1000, 1000, Slot::from(0), 6000)] + #[case(6000, 2000, 1000, Slot::from(0), 5000)] + #[case(6000, 6000, 3000, Slot::from(0), 3000)] // Different slot durations - #[case(3000, 1000, 0, Slot::from(0), 2000, Slot::from(1))] - #[case(3000, 3000, 0, Slot::from(1), 3000, Slot::from(2))] - #[case(12000, 6000, 0, Slot::from(0), 6000, Slot::from(1))] - #[case(12000, 12000, 0, Slot::from(1), 12000, Slot::from(2))] + #[case(3000, 1000, 0, Slot::from(0), 2000)] + #[case(3000, 3000, 0, Slot::from(1), 3000)] + #[case(12000, 6000, 0, Slot::from(0), 6000)] + #[case(12000, 12000, 0, Slot::from(1), 12000)] // Edge cases - at slot boundary - #[case(6000, 5999, 0, Slot::from(0), 1, Slot::from(1))] - #[case(6000, 11999, 0, Slot::from(1), 1, Slot::from(2))] + #[case(6000, 5999, 0, Slot::from(0), 1)] + #[case(6000, 11999, 0, Slot::from(1), 1)] fn test_compute_time_until_next_slot_change( #[case] para_slot_millis: u64, #[case] time_now: u64, #[case] offset_millis: u64, #[case] last_reported_slot: Slot, #[case] expected_duration: u128, - #[case] expected_next_slot: Slot, ) { - let para_slot_duration = SlotDuration::from_millis(para_slot_millis); - let time_now = Duration::from_millis(time_now); - let offset = Duration::from_millis(offset_millis); - - let result = compute_time_until_next_slot_change( - para_slot_duration, - time_now, - offset, - last_reported_slot, - ); - - assert!(result.is_some(), "Expected result to be Some"); - let (duration, next_slot) = result.unwrap(); - assert_eq!(duration.as_millis(), expected_duration, "Duration mismatch"); - assert_eq!(next_slot, expected_next_slot, "Next slot mismatch"); - } + let slot_time = SlotTime { + relay_slot_duration: Duration::from_millis(para_slot_millis), + time_offset: Duration::from_millis(offset_millis), + slot_start_timestamp: Timestamp::new( + Duration::from_millis(para_slot_millis).as_millis() as u64 * *last_reported_slot, + ), + }; - #[rstest] - // Various scenarios for 2s block production adjustment. - #[case::blocks_2s_fits_next_block( - Duration::from_millis(2000), // Authoring duration - (Duration::from_millis(2000), Slot::from(1)), // Next block - (Duration::from_millis(4000), Slot::from(2)), // Next slot change - true, // Different authors - Some(Duration::from_millis(2000)), // Expected - )] - #[case::blocks_2s_closer_next_slot( - Duration::from_millis(2000), // Authoring duration - (Duration::from_millis(1950), Slot::from(1)), // Next block - (Duration::from_millis(4000), Slot::from(2)), // Next slot change - true, // Different authors - Some(Duration::from_millis(1950)), // Expected - )] - #[case::blocks_2s_closer_next_slot_bigger( - Duration::from_millis(2000), // Authoring duration - (Duration::from_millis(1500), Slot::from(1)), // Next block - (Duration::from_millis(4000), Slot::from(2)), // Next slot change - true, // Different authors - Some(Duration::from_millis(1500)), // Expected - )] - #[case::blocks_2s_reduce_by_1s( - Duration::from_millis(2000), // Authoring duration - (Duration::from_millis(2000), Slot::from(1)), // Next block - (Duration::from_millis(2000), Slot::from(2)), // Next slot change - true, // Different authors - Some(Duration::from_millis(1000)), // Expected - )] - #[case::blocks_2s_reduce_by_1s_plus_offset( - Duration::from_millis(2000), // Authoring duration - (Duration::from_millis(1950), Slot::from(1)), // Next block - (Duration::from_millis(1950), Slot::from(2)), // Next slot change - true, // Different authors - Some(Duration::from_millis(950)), // Expected - )] - #[case::blocks_2s_reduce_to_minimum( - Duration::from_millis(2000), // Authoring duration - (Duration::from_millis(1400), Slot::from(1)), // Next block - (Duration::from_millis(1400), Slot::from(2)), // Next slot change - true, // Different authors - Some(Duration::from_millis(400)), // Expected - )] - #[case::blocks_2s_reduce_below_minimum( - Duration::from_millis(2000), // Authoring duration - (Duration::from_millis(1300), Slot::from(1)), // Next block - (Duration::from_millis(1300), Slot::from(2)), // Next slot change - true, // Different authors - None, // Expected to reduce below minimum - )] - #[case::blocks_2s_same_author( - Duration::from_millis(2000), // Authoring duration - (Duration::from_millis(1400), Slot::from(1)), // Next block - (Duration::from_millis(1400), Slot::from(2)), // Next slot change - false, // Different authors - Some(Duration::from_millis(1400)), // Expected no adjustment for last second. - )] - // Various scenarios for 500ms block production adjustment. - #[case::blocks_500ms_fits_next_block( - Duration::from_millis(500), // Authoring duration - (Duration::from_millis(500), Slot::from(1)), // Next block - (Duration::from_millis(2000), Slot::from(2)), // Next slot change - true, // Different authors - Some(Duration::from_millis(500)), // Expected - )] - #[case::blocks_500ms_closer_next_slot( - Duration::from_millis(500), // Authoring duration - (Duration::from_millis(450), Slot::from(1)), // Next block - (Duration::from_millis(2000), Slot::from(2)), // Next slot change - true, // Different authors - Some(Duration::from_millis(450)), // Expected - )] - #[case::blocks_500ms_closer_next_slot_bigger( - Duration::from_millis(500), // Authoring duration - (Duration::from_millis(400), Slot::from(1)), // Next block - (Duration::from_millis(1500), Slot::from(2)), // Next slot change - true, // Different authors - Some(Duration::from_millis(400)), // Expected - )] - #[case::blocks_500ms_reduce_by_1s( - Duration::from_millis(500), // Authoring duration - (Duration::from_millis(500), Slot::from(1)), // Next block - (Duration::from_millis(1000), Slot::from(2)), // Next slot change - true, // Different authors - None, // Expected - )] - #[case::blocks_500ms_reduce_by_1s_closer( - Duration::from_millis(500), // Authoring duration - (Duration::from_millis(500), Slot::from(1)), // Next block - (Duration::from_millis(500), Slot::from(2)), // Next slot change - true, // Different authors - None, // Expected - )] - // If we are producing with 1 collator for 500ms authoring duration, - // we must produce the last two slots and ignore the 1s adjustment. - #[case::blocks_500ms_same_author( - Duration::from_millis(500), // Authoring duration - (Duration::from_millis(410), Slot::from(1)), // Next block - (Duration::from_millis(1000), Slot::from(2)), // Next slot change - false, // Different authors - Some(Duration::from_millis(410)), // Expected no adjustment for last second. - )] - #[case::blocks_500ms_same_author_closer( - Duration::from_millis(500), // Authoring duration - (Duration::from_millis(400), Slot::from(1)), // Next block - (Duration::from_millis(400), Slot::from(2)), // Next slot change - false, // Different authors - Some(Duration::from_millis(400)), // Expected no adjustment for last second. - )] - fn test_adjust_authoring_duration( - #[case] authoring_duration: Duration, - #[case] next_block: (Duration, Slot), - #[case] next_slot_change: (Duration, Slot), - #[case] different_authors: bool, - #[case] expected: Option, - ) { - sp_tracing::init_for_tests(); + let time_left = slot_time.time_left_internal(Duration::from_millis(time_now)); - let result = adjust_authoring_duration( - authoring_duration, - next_block, - next_slot_change, - different_authors, - ); - tracing::debug!("Adjusted authoring duration: {:?}", result); - assert_eq!(result, expected); + assert_eq!(time_left.as_millis(), expected_duration, "Duration mismatch"); } } diff --git a/cumulus/client/consensus/aura/src/collators/slot_based/tests.rs b/cumulus/client/consensus/aura/src/collators/slot_based/tests.rs index 51b48905c58e7..e9e2f8bd5b163 100644 --- a/cumulus/client/consensus/aura/src/collators/slot_based/tests.rs +++ b/cumulus/client/consensus/aura/src/collators/slot_based/tests.rs @@ -17,13 +17,13 @@ use super::{ block_builder_task::{ - determine_core, offset_relay_parent_find_descendants, wait_for_current_relay_block, + determine_cores, offset_relay_parent_find_descendants, wait_for_current_relay_block, }, relay_chain_data_cache::{RelayChainData, RelayChainDataCache}, }; use async_trait::async_trait; use codec::Encode; -use cumulus_primitives_core::{ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem}; +use cumulus_primitives_core::CoreSelector; use cumulus_relay_chain_interface::*; use futures::Stream; use polkadot_node_subsystem_util::runtime::ClaimQueueSnapshot; @@ -36,7 +36,7 @@ use sc_consensus_babe::{ AuthorityId, ConsensusLog as BabeConsensusLog, NextEpochDescriptor, BABE_ENGINE_ID, }; use sp_core::sr25519; -use sp_runtime::{generic::BlockId, testing::Header as TestHeader, traits::Header}; +use sp_runtime::{generic::BlockId, traits::Header}; use sp_version::RuntimeVersion; use std::{ collections::{BTreeMap, HashMap, VecDeque}, @@ -156,75 +156,18 @@ async fn determine_core_new_relay_parent() { digest: Default::default(), }; - // Create a test para parent header at block 0 (genesis) - let para_parent = TestHeader::new_from_number(0); - // Setup claim queue data for the cache cache.set_test_data(relay_parent.clone(), vec![CoreIndex(0), CoreIndex(1)]); - let result = determine_core(&mut cache, &relay_parent, 1.into(), ¶_parent, 0).await; + let result = determine_cores(&mut cache, &relay_parent, 1.into(), 0).await; let core = result.unwrap(); let core = core.unwrap(); - assert_eq!(core.core_selector(), CoreSelector(0)); + assert_eq!(core.core_info().selector, CoreSelector(0)); assert_eq!(core.core_index(), CoreIndex(0)); assert_eq!(core.total_cores(), 2); } -#[tokio::test] -async fn determine_core_with_core_info() { - let (headers, best_header) = create_header_chain(); - let best_hash = best_header.hash(); - let client = TestRelayClient::new(headers); - let mut cache = RelayChainDataCache::new(client, 1.into()); - - // Create a test relay parent header - let relay_parent = RelayHeader { - parent_hash: best_hash, - number: 101, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest: Default::default(), - }; - - // Create a para parent header with core info in digest - let core_info = CoreInfo { - selector: CoreSelector(0), - claim_queue_offset: ClaimQueueOffset(0), - number_of_cores: 3.into(), - }; - let mut digest = sp_runtime::generic::Digest::default(); - digest.push(CumulusDigestItem::CoreInfo(core_info).to_digest_item()); - // Add relay parent storage root to make it a non-new relay parent - digest.push(cumulus_primitives_core::rpsr_digest::relay_parent_storage_root_item( - *relay_parent.state_root(), - *relay_parent.number(), - )); - - let para_parent = TestHeader { - parent_hash: best_hash.into(), - number: 1, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest, - }; - - // Setup claim queue data for the cache - cache.set_test_data(relay_parent.clone(), vec![CoreIndex(0), CoreIndex(1), CoreIndex(2)]); - - let result = determine_core(&mut cache, &relay_parent, 1.into(), ¶_parent, 0).await; - - match result { - Ok(Some(core)) => { - assert_eq!(core.core_selector(), CoreSelector(1)); // Should be next selector (0 + 1) - assert_eq!(core.core_index(), CoreIndex(1)); - assert_eq!(core.total_cores(), 3); - }, - Ok(None) => panic!("Expected Some core, got None"), - Err(()) => panic!("determine_core returned error"), - } -} - #[tokio::test] async fn determine_core_no_cores_available() { let (headers, _best_hash) = create_header_chain(); @@ -240,221 +183,15 @@ async fn determine_core_no_cores_available() { digest: Default::default(), }; - // Create a test para parent header at block 0 (genesis) - let para_parent = TestHeader::new_from_number(0); - // Setup empty claim queue cache.set_test_data(relay_parent.clone(), vec![]); - let result = determine_core(&mut cache, &relay_parent, 1.into(), ¶_parent, 0).await; + let result = determine_cores(&mut cache, &relay_parent, 1.into(), 0).await; let core = result.unwrap(); assert!(core.is_none()); } -#[tokio::test] -async fn determine_core_selector_overflow() { - let (headers, best_header) = create_header_chain(); - let best_hash = best_header.hash(); - let client = TestRelayClient::new(headers); - let mut cache = RelayChainDataCache::new(client, 1.into()); - - // Create a test relay parent header - let relay_parent = RelayHeader { - parent_hash: best_hash, - number: 101, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest: Default::default(), - }; - - let core_info = CoreInfo { - selector: CoreSelector(1), - claim_queue_offset: ClaimQueueOffset(0), - number_of_cores: 2.into(), - }; - let mut digest = sp_runtime::generic::Digest::default(); - digest.push(CumulusDigestItem::CoreInfo(core_info).to_digest_item()); - // Add relay parent storage root to make it a non-new relay parent - digest.push(cumulus_primitives_core::rpsr_digest::relay_parent_storage_root_item( - *relay_parent.state_root(), - *relay_parent.number(), - )); - - let para_parent = TestHeader { - parent_hash: best_hash.into(), - number: 1, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest, - }; - - // Setup claim queue with only 2 cores - cache.set_test_data(relay_parent.clone(), vec![CoreIndex(0), CoreIndex(1)]); - - let result = determine_core(&mut cache, &relay_parent, 1.into(), ¶_parent, 0).await; - - let core = result.unwrap(); - assert!(core.is_none()); // Should return None when selector overflows -} - -#[tokio::test] -async fn determine_core_uses_last_claimed_core_selector() { - let (headers, best_header) = create_header_chain(); - let best_hash = best_header.hash(); - let client = TestRelayClient::new(headers); - let mut cache = RelayChainDataCache::new(client, 1.into()); - - // Create a test relay parent header - let relay_parent = RelayHeader { - parent_hash: best_hash, - number: 101, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest: Default::default(), - }; - - // Create a para parent header without core info in digest (non-genesis) - // Need to add relay parent storage root to digest to make it a non-new relay parent - let mut digest = sp_runtime::generic::Digest::default(); - digest.push(cumulus_primitives_core::rpsr_digest::relay_parent_storage_root_item( - *relay_parent.state_root(), - *relay_parent.number(), - )); - - let para_parent = TestHeader { - parent_hash: best_hash.into(), - number: 1, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest, - }; - - // Setup claim queue data with last_claimed_core_selector set to 1 - cache.set_test_data_with_last_selector( - relay_parent.clone(), - vec![CoreIndex(0), CoreIndex(1), CoreIndex(2)], - Some(CoreSelector(1)), - ); - - let result = determine_core(&mut cache, &relay_parent, 1.into(), ¶_parent, 0).await; - - match result { - Ok(Some(core)) => { - // Should use last_claimed_core_selector (1) + 1 = 2 - assert_eq!(core.core_selector(), CoreSelector(2)); - assert_eq!(core.core_index(), CoreIndex(2)); - assert_eq!(core.total_cores(), 3); - }, - Ok(None) => panic!("Expected Some core, got None"), - Err(()) => panic!("determine_core returned error"), - } -} - -#[tokio::test] -async fn determine_core_uses_last_claimed_core_selector_wraps_around() { - let (headers, best_header) = create_header_chain(); - let best_hash = best_header.hash(); - let client = TestRelayClient::new(headers); - let mut cache = RelayChainDataCache::new(client, 1.into()); - - // Create a test relay parent header - let relay_parent = RelayHeader { - parent_hash: best_hash, - number: 101, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest: Default::default(), - }; - - // Create a para parent header without core info in digest (non-genesis) - // Need to add relay parent storage root to digest to make it a non-new relay parent - let mut digest = sp_runtime::generic::Digest::default(); - digest.push(cumulus_primitives_core::rpsr_digest::relay_parent_storage_root_item( - *relay_parent.state_root(), - *relay_parent.number(), - )); - - let para_parent = TestHeader { - parent_hash: best_hash.into(), - number: 1, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest, - }; - - // Setup claim queue data with last_claimed_core_selector set to 2 (last index) - // Next selector should wrap around to out of bounds and return None - cache.set_test_data_with_last_selector( - relay_parent.clone(), - vec![CoreIndex(0), CoreIndex(1), CoreIndex(2)], - Some(CoreSelector(2)), - ); - - let result = determine_core(&mut cache, &relay_parent, 1.into(), ¶_parent, 0).await; - - match result { - Ok(Some(_)) => panic!("Expected None due to selector overflow"), - Ok(None) => { - // This is expected - selector 2 + 1 = 3, but only cores 0,1,2 available - }, - Err(()) => panic!("determine_core returned error"), - } -} - -#[tokio::test] -async fn determine_core_no_last_claimed_core_selector() { - let (headers, best_header) = create_header_chain(); - let best_hash = best_header.hash(); - let client = TestRelayClient::new(headers); - let mut cache = RelayChainDataCache::new(client, 1.into()); - - // Create a test relay parent header - let relay_parent = RelayHeader { - parent_hash: best_hash, - number: 101, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest: Default::default(), - }; - - // Create a para parent header without core info in digest (non-genesis) - // Need to add relay parent storage root to digest to make it a non-new relay parent - let mut digest = sp_runtime::generic::Digest::default(); - digest.push(cumulus_primitives_core::rpsr_digest::relay_parent_storage_root_item( - *relay_parent.state_root(), - *relay_parent.number(), - )); - - let para_parent = TestHeader { - parent_hash: best_hash.into(), - number: 1, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest, - }; - - // Setup claim queue data with no last_claimed_core_selector (None) - cache.set_test_data_with_last_selector( - relay_parent.clone(), - vec![CoreIndex(0), CoreIndex(1), CoreIndex(2)], - None, - ); - - let result = determine_core(&mut cache, &relay_parent, 1.into(), ¶_parent, 0).await; - - match result { - Ok(Some(core)) => { - // Should start from selector 0 + 1 = 1 when no last selector - assert_eq!(core.core_selector(), CoreSelector(1)); - assert_eq!(core.core_index(), CoreIndex(1)); - assert_eq!(core.total_cores(), 3); - }, - Ok(None) => panic!("Expected Some core, got None"), - Err(()) => panic!("determine_core returned error"), - } -} - #[derive(Clone)] struct TestRelayClient { headers: HashMap, @@ -741,14 +478,13 @@ fn create_header_chain() -> (HashMap, RelayHeader) { // Test extension for RelayChainDataCache impl RelayChainDataCache { fn set_test_data(&mut self, relay_parent_header: RelayHeader, cores: Vec) { - self.set_test_data_with_last_selector(relay_parent_header, cores, None); + self.set_test_data_with_last_selector(relay_parent_header, cores); } fn set_test_data_with_last_selector( &mut self, relay_parent_header: RelayHeader, cores: Vec, - last_claimed_core_selector: Option, ) { let relay_parent_hash = relay_parent_header.hash(); @@ -763,7 +499,6 @@ impl RelayChainDataCache { relay_parent_header, claim_queue: claim_queue_snapshot, max_pov_size: 1024 * 1024, - last_claimed_core_selector, }; self.insert_test_data(relay_parent_hash, data); diff --git a/cumulus/client/consensus/aura/src/equivocation_import_queue.rs b/cumulus/client/consensus/aura/src/equivocation_import_queue.rs index 4c228f0cf6d2d..9398f697583d3 100644 --- a/cumulus/client/consensus/aura/src/equivocation_import_queue.rs +++ b/cumulus/client/consensus/aura/src/equivocation_import_queue.rs @@ -306,7 +306,7 @@ mod test { use super::*; use codec::Encode; use cumulus_test_client::{ - runtime::Block, seal_block, Client, InitBlockBuilder, TestClientBuilder, + runtime::Block, seal_block, BuildBlockBuilder, Client, TestClientBuilder, TestClientBuilderExt, }; use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; @@ -344,7 +344,11 @@ mod test { ..Default::default() }; - let block_builder = client.init_block_builder(Some(validation_data), sproof); + let block_builder = client + .init_block_builder_builder() + .with_validation_data(validation_data) + .with_relay_sproof_builder(sproof) + .build(); let block = block_builder.block_builder.build().unwrap(); let mut blocks = Vec::new(); diff --git a/cumulus/client/consensus/aura/src/lib.rs b/cumulus/client/consensus/aura/src/lib.rs index bbfa6fe089eb4..78a07407db0b4 100644 --- a/cumulus/client/consensus/aura/src/lib.rs +++ b/cumulus/client/consensus/aura/src/lib.rs @@ -23,9 +23,8 @@ //! For more information about AuRa, the Substrate crate should be checked. use codec::Encode; -use cumulus_primitives_core::PersistedValidationData; - -use cumulus_primitives_core::relay_chain::HeadData; +use cumulus_primitives_core::{relay_chain::HeadData, PersistedValidationData}; +use polkadot_node_primitives::PoV; use polkadot_primitives::{BlockNumber as RBlockNumber, Hash as RHash}; use sp_runtime::traits::{Block as BlockT, NumberFor}; use std::{fs, fs::File, path::PathBuf}; @@ -33,7 +32,6 @@ use std::{fs, fs::File, path::PathBuf}; mod import_queue; pub use import_queue::{build_verifier, import_queue, BuildVerifierParams, ImportQueueParams}; -use polkadot_node_primitives::PoV; pub use sc_consensus_aura::{ slot_duration, standalone::slot_duration_at, AuraVerifier, BuildAuraWorkerParams, SlotProportion, diff --git a/cumulus/client/consensus/common/src/parent_search.rs b/cumulus/client/consensus/common/src/parent_search.rs index fca93f1f87997..2914498eaed9f 100644 --- a/cumulus/client/consensus/common/src/parent_search.rs +++ b/cumulus/client/consensus/common/src/parent_search.rs @@ -44,7 +44,8 @@ pub struct ParentSearchParams { pub ancestry_lookback: usize, } -/// Result of the parent search, containing the included block and the best parent to build on. +/// A potential parent block returned from [`find_parent_for_building`] +#[derive(PartialEq, Clone)] pub struct ParentSearchResult { /// The header of the included block (confirmed on relay chain). pub included_header: B::Header, diff --git a/cumulus/client/consensus/common/src/tests.rs b/cumulus/client/consensus/common/src/tests.rs index d82ccd7654503..03292588b1a9f 100644 --- a/cumulus/client/consensus/common/src/tests.rs +++ b/cumulus/client/consensus/common/src/tests.rs @@ -32,7 +32,7 @@ use cumulus_relay_chain_interface::{ }; use cumulus_test_client::{ runtime::{Block, Hash, Header}, - Backend, Client, InitBlockBuilder, TestClientBuilder, TestClientBuilderExt, + Backend, BuildBlockBuilder, Client, TestClientBuilder, TestClientBuilderExt, }; use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; use futures::{channel::mpsc, executor::block_on, select, FutureExt, Stream, StreamExt}; @@ -324,19 +324,22 @@ fn sproof_with_parent(parent: HeadData) -> RelayStateSproofBuilder { x } -fn build_block( +fn build_block( builder: &B, sproof: RelayStateSproofBuilder, at: Option, timestamp: Option, relay_parent: Option, ) -> Block { - let cumulus_test_client::BlockBuilderAndSupportData { block_builder, .. } = match at { - Some(at) => match timestamp { - Some(ts) => builder.init_block_builder_with_timestamp(at, None, sproof, ts), - None => builder.init_block_builder_at(at, None, sproof), - }, - None => builder.init_block_builder(None, sproof), + let cumulus_test_client::BlockBuilderAndSupportData { block_builder, .. } = { + let mut bb = builder.init_block_builder_builder().with_relay_sproof_builder(sproof); + if let Some(at) = at { + bb = bb.at(at); + } + if let Some(ts) = timestamp { + bb = bb.with_timestamp(ts); + } + bb.build() }; let mut block = block_builder.build().unwrap().block; @@ -570,7 +573,12 @@ async fn follow_finalized_does_not_stop_on_unknown_block() { let unknown_block = { let sproof = sproof_with_parent_by_hash(&client, block.hash()); - let block_builder = client.init_block_builder_at(block.hash(), None, sproof).block_builder; + let block_builder = client + .init_block_builder_builder() + .at(block.hash()) + .with_relay_sproof_builder(sproof) + .build() + .block_builder; block_builder.build().unwrap().block }; @@ -625,7 +633,12 @@ async fn follow_new_best_sets_best_after_it_is_imported() { let unknown_block = { let sproof = sproof_with_parent_by_hash(&client, block.hash()); - let block_builder = client.init_block_builder_at(block.hash(), None, sproof).block_builder; + let block_builder = client + .init_block_builder_builder() + .at(block.hash()) + .with_relay_sproof_builder(sproof) + .build() + .block_builder; block_builder.build().unwrap().block }; diff --git a/cumulus/client/proof-size-recording/Cargo.toml b/cumulus/client/proof-size-recording/Cargo.toml new file mode 100644 index 0000000000000..1b3f206bbbef2 --- /dev/null +++ b/cumulus/client/proof-size-recording/Cargo.toml @@ -0,0 +1,19 @@ +[package] +authors.workspace = true +name = "cumulus-client-proof-size-recording" +version = "0.1.0" +edition.workspace = true +description = "Storage proof size recording utilities." +license = "GPL-3.0-or-later WITH Classpath-exception-2.0" +homepage.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[dependencies] +codec = { workspace = true, default-features = true } +sc-client-api = { workspace = true, default-features = true } +sp-blockchain = { workspace = true, default-features = true } +sp-runtime = { workspace = true, default-features = true } +sp-trie = { workspace = true, default-features = true } diff --git a/cumulus/client/proof-size-recording/src/lib.rs b/cumulus/client/proof-size-recording/src/lib.rs new file mode 100644 index 0000000000000..a8c09f227ab1b --- /dev/null +++ b/cumulus/client/proof-size-recording/src/lib.rs @@ -0,0 +1,117 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// This file is part of Cumulus. +// SPDX-License-Identifier: GPL-3.0-or-later WITH Classpath-exception-2.0 + +// Cumulus is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Cumulus is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Cumulus. If not, see . + +//! Proof size recording utilities. + +use codec::{Decode, Encode}; +use sc_client_api::{ + backend::AuxStore, + client::{AuxDataOperations, FinalityNotification, PreCommitActions}, +}; +use sp_blockchain::{Error as ClientError, Result as ClientResult}; +use sp_runtime::traits::Block as BlockT; +use sp_trie::proof_size_extension::RecordedProofSizeEstimations; +use std::sync::Arc; + +const PROOF_SIZE_RECORDING_VERSION: &[u8] = b"cumulus_proof_size_recording_version"; +const PROOF_SIZE_RECORDING_CURRENT_VERSION: u32 = 1; + +/// The aux storage key used to store the proof size recordings for the given block hash. +fn proof_size_recording_key(block_hash: H) -> Vec { + (b"cumulus_proof_size_recording", block_hash).encode() +} + +fn load_decode(backend: &B, key: &[u8]) -> ClientResult> +where + B: AuxStore, + T: Decode, +{ + let corrupt = |e: codec::Error| { + ClientError::Backend(format!("Proof size recording DB is corrupted. Decode error: {}", e)) + }; + match backend.get_aux(key)? { + None => Ok(None), + Some(t) => T::decode(&mut &t[..]).map(Some).map_err(corrupt), + } +} + +/// Prepare aux storage key-value pairs for persisting proof size recordings. +/// +/// Returns the key-value pairs that need to be written to the aux storage. +pub fn prepare_proof_size_recording_aux_data( + block_hash: H, + recordings: Vec, +) -> impl Iterator, Vec)> { + let current_version = PROOF_SIZE_RECORDING_CURRENT_VERSION.encode(); + let key = proof_size_recording_key(block_hash); + let recordings = recordings.encode(); + + [(key, recordings), (PROOF_SIZE_RECORDING_VERSION.to_vec(), current_version)].into_iter() +} + +/// Load the proof size recordings associated with a block. +pub fn load_proof_size_recording( + backend: &B, + block_hash: H, +) -> ClientResult> { + let version = load_decode::<_, u32>(backend, PROOF_SIZE_RECORDING_VERSION)?; + + match version { + None => Ok(None), + Some(PROOF_SIZE_RECORDING_CURRENT_VERSION) => { + load_decode::<_, Vec>(backend, proof_size_recording_key(block_hash).as_slice()) + .map(|recordings| recordings.map(Into::into)) + }, + Some(other) => Err(ClientError::Backend(format!( + "Unsupported proof size recording DB version: {:?}", + other + ))), + } +} + +/// Cleanup auxiliary storage for finalized blocks. +/// +/// This function removes proof size recordings for blocks that are no longer needed +/// after finalization. It processes the finalized blocks and their stale heads to +/// determine which recordings can be safely removed. +fn aux_storage_cleanup(notification: &FinalityNotification) -> AuxDataOperations +where + Block: BlockT, +{ + // Convert the hashes to deletion operations + notification + .stale_blocks + .iter() + .map(|b| (proof_size_recording_key(b.hash), None)) + .collect() +} + +/// Register a finality action for cleaning up proof size recordings. +/// +/// This should be called during consensus initialization to automatically clean up +/// proof size recordings when blocks are finalized. +pub fn register_proof_size_recording_cleanup(client: Arc) +where + C: PreCommitActions + 'static, + Block: BlockT, +{ + let on_finality = move |notification: &FinalityNotification| -> AuxDataOperations { + aux_storage_cleanup(notification) + }; + + client.register_finality_action(Box::new(on_finality)); +} diff --git a/cumulus/client/service/Cargo.toml b/cumulus/client/service/Cargo.toml index a300807d81498..11f965cc93cac 100644 --- a/cumulus/client/service/Cargo.toml +++ b/cumulus/client/service/Cargo.toml @@ -50,6 +50,7 @@ cumulus-client-collator = { workspace = true, default-features = true } cumulus-client-consensus-common = { workspace = true, default-features = true } cumulus-client-network = { workspace = true, default-features = true } cumulus-client-pov-recovery = { workspace = true, default-features = true } +cumulus-client-proof-size-recording = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true, default-features = true } cumulus-primitives-proof-size-hostfunction = { workspace = true, default-features = true } cumulus-relay-chain-inprocess-interface = { workspace = true, default-features = true } diff --git a/cumulus/client/service/src/lib.rs b/cumulus/client/service/src/lib.rs index 4c8d3e2a827e8..3f7efee15ea1b 100644 --- a/cumulus/client/service/src/lib.rs +++ b/cumulus/client/service/src/lib.rs @@ -22,6 +22,7 @@ use cumulus_client_cli::CollatorOptions; use cumulus_client_network::{AssumeSybilResistance, RequireSecondedInBlockAnnounce}; use cumulus_client_pov_recovery::{PoVRecovery, RecoveryDelayRange, RecoveryHandle}; +use cumulus_client_proof_size_recording::load_proof_size_recording; use cumulus_primitives_core::{CollectCollationInfo, ParaId}; pub use cumulus_primitives_proof_size_hostfunction::storage_proof_size; use cumulus_relay_chain_inprocess_interface::build_inprocess_relay_chain; @@ -31,7 +32,8 @@ use futures::{channel::mpsc, StreamExt}; use polkadot_primitives::{CandidateEvent, CollatorPair, OccupiedCoreAssumption}; use prometheus::{Histogram, HistogramOpts, Registry}; use sc_client_api::{ - Backend as BackendT, BlockBackend, BlockchainEvents, Finalizer, ProofProvider, UsageProvider, + AuxStore, Backend as BackendT, BlockBackend, BlockchainEvents, Finalizer, ProofProvider, + UsageProvider, }; use sc_consensus::{ import_queue::{ImportQueue, ImportQueueService}, @@ -56,7 +58,7 @@ use sp_runtime::{ traits::{Block as BlockT, BlockIdTo, Header}, SaturatedConversion, Saturating, }; -use sp_trie::proof_size_extension::ProofSizeExt; +use sp_trie::proof_size_extension::{ProofSizeExt, ReplayProofSizeProvider}; use std::{ sync::Arc, time::{Duration, Instant}, @@ -622,13 +624,19 @@ impl ParachainTracingExecuteBlock { impl TracingExecuteBlock for ParachainTracingExecuteBlock where Block: BlockT, - Client: ProvideRuntimeApi + Send + Sync, + Client: ProvideRuntimeApi + AuxStore + Send + Sync, Client::Api: Core, { - fn execute_block(&self, _: Block::Hash, block: Block) -> sp_blockchain::Result<()> { + fn execute_block(&self, orig_hash: Block::Hash, block: Block) -> sp_blockchain::Result<()> { let mut runtime_api = self.client.runtime_api(); let storage_proof_recorder = ProofRecorder::::default(); - runtime_api.register_extension(ProofSizeExt::new(storage_proof_recorder.clone())); + + let proof_size_ext = load_proof_size_recording(&*self.client, orig_hash)?.map_or_else( + || ProofSizeExt::new(storage_proof_recorder.clone()), + |recordings| ProofSizeExt::new(ReplayProofSizeProvider::from(recordings)), + ); + runtime_api.register_extension(proof_size_ext); + runtime_api.record_proof_with_recorder(storage_proof_recorder); runtime_api diff --git a/cumulus/pallets/parachain-system/src/benchmarking.rs b/cumulus/pallets/parachain-system/src/benchmarking.rs index ebe3c66785f52..c8e7d8a4da46b 100644 --- a/cumulus/pallets/parachain-system/src/benchmarking.rs +++ b/cumulus/pallets/parachain-system/src/benchmarking.rs @@ -27,7 +27,7 @@ use crate::{ parachain_inherent::InboundDownwardMessages, }; use cumulus_primitives_core::{ - relay_chain::Hash as RelayHash, BundleInfo, CoreInfo, InboundDownwardMessage, + relay_chain::Hash as RelayHash, BlockBundleInfo, CoreInfo, InboundDownwardMessage, }; use frame_benchmarking::v2::*; use frame_support::{ @@ -98,7 +98,7 @@ mod benchmarks { frame_system::Pallet::::set_extrinsic_index(1); frame_system::Pallet::::deposit_log( - BundleInfo { index: 0, maybe_last: false }.to_digest_item(), + BlockBundleInfo { index: 0, is_last: false }.to_digest_item(), ); frame_system::Pallet::::deposit_log( CoreInfo { @@ -157,7 +157,7 @@ mod benchmarks { frame_system::Pallet::::set_extrinsic_index(1); frame_system::Pallet::::deposit_log( - BundleInfo { index: 0, maybe_last: false }.to_digest_item(), + BlockBundleInfo { index: 0, is_last: false }.to_digest_item(), ); frame_system::Pallet::::deposit_log( CoreInfo { @@ -219,7 +219,7 @@ mod benchmarks { frame_system::Pallet::::set_extrinsic_index(1); frame_system::Pallet::::deposit_log( - BundleInfo { index: 0, maybe_last: false }.to_digest_item(), + BlockBundleInfo { index: 0, is_last: false }.to_digest_item(), ); frame_system::Pallet::::deposit_log( CoreInfo { diff --git a/cumulus/pallets/parachain-system/src/block_weight/mock.rs b/cumulus/pallets/parachain-system/src/block_weight/mock.rs index 8b2a738f26dc2..29d3d22774ad1 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mock.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mock.rs @@ -18,7 +18,7 @@ use super::{transaction_extension::DynamicMaxBlockWeight, *}; use crate::{self as parachain_system, MessagingStateSnapshot, PreviousCoreCount}; use codec::Compact; use cumulus_primitives_core::{ - BundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, + BlockBundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, }; use frame_support::{ construct_runtime, derive_impl, @@ -340,7 +340,7 @@ pub type ExecutiveOnlyOperational = frame_executive::Executive< pub struct TestExtBuilder { num_cores: Option, bundle_index: Option, - bundle_maybe_last: bool, + bundle_is_last: bool, previous_core_count: Option, } @@ -351,7 +351,7 @@ impl Default for TestExtBuilder { Self { num_cores: None, bundle_index: None, - bundle_maybe_last: false, + bundle_is_last: false, previous_core_count: None, } } @@ -406,8 +406,8 @@ impl TestExtBuilder { // Add bundle info if specified if let Some(bundle_index) = self.bundle_index { let bundle_info = - BundleInfo { index: bundle_index, maybe_last: self.bundle_maybe_last }; - let digest = CumulusDigestItem::BundleInfo(bundle_info).to_digest_item(); + BlockBundleInfo { index: bundle_index, is_last: self.bundle_is_last }; + let digest = CumulusDigestItem::BlockBundleInfo(bundle_info).to_digest_item(); frame_system::Pallet::::deposit_log(digest); } diff --git a/cumulus/pallets/parachain-system/src/block_weight/mod.rs b/cumulus/pallets/parachain-system/src/block_weight/mod.rs index d594b0aa2a7cc..4f15d5eb238db 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/mod.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/mod.rs @@ -87,9 +87,11 @@ pub use pre_inherents_hook::DynamicMaxBlockWeightHooks; pub use transaction_extension::DynamicMaxBlockWeight; const LOG_TARGET: &str = "runtime::parachain-system::block-weight"; + /// Maximum ref time per core const MAX_REF_TIME_PER_CORE_NS: u64 = DEFAULT_BACKING_EXECUTION_TIMEOUT.as_secs() * WEIGHT_REF_TIME_PER_SECOND; + /// The available weight per core on the relay chain. pub(crate) const FULL_CORE_WEIGHT: Weight = Weight::from_parts(MAX_REF_TIME_PER_CORE_NS, MAX_POV_SIZE as u64); @@ -297,14 +299,14 @@ fn is_first_block_in_core() -> Option { /// Is this the first block in a core? (takes digest as parameter) /// -/// Returns `None` if the [`CumulusDigestItem::BundleInfo`] digest is not set. +/// Returns `None` if the [`CumulusDigestItem::BlockBundleInfo`] digest is not set. fn is_first_block_in_core_with_digest(digest: &Digest) -> Option { - CumulusDigestItem::find_bundle_info(digest).map(|bi| bi.index == 0) + CumulusDigestItem::find_block_bundle_info(digest).map(|bi| bi.index == 0) } /// Is the `BlockWeight` already above the target block weight? /// -/// Returns `None` if the [`CumulusDigestItem::BundleInfo`] digest is not set. +/// Returns `None` if the [`CumulusDigestItem::BlockBundleInfo`] digest is not set. fn block_weight_over_target_block_weight>() -> bool { let target_block_weight = MaxParachainBlockWeight::::target_block_weight(); diff --git a/cumulus/pallets/parachain-system/src/block_weight/tests.rs b/cumulus/pallets/parachain-system/src/block_weight/tests.rs index be27cb89049c9..430eaa9b08669 100644 --- a/cumulus/pallets/parachain-system/src/block_weight/tests.rs +++ b/cumulus/pallets/parachain-system/src/block_weight/tests.rs @@ -18,7 +18,7 @@ use super::{mock::*, transaction_extension::DynamicMaxBlockWeight, *}; use assert_matches::assert_matches; use codec::Compact; use cumulus_primitives_core::{ - BundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, + BlockBundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, }; use frame_support::{ assert_ok, @@ -189,16 +189,17 @@ fn test_is_first_block_in_core_functions() { assert!(super::is_first_block_in_core_with_digest(&empty_digest).is_none()); // Test with bundle info index = 0 - should return true - let bundle_info_first = BundleInfo { index: 0, maybe_last: false }; - let digest_item_first = CumulusDigestItem::BundleInfo(bundle_info_first).to_digest_item(); + let bundle_info_first = BlockBundleInfo { index: 0, is_last: false }; + let digest_item_first = + CumulusDigestItem::BlockBundleInfo(bundle_info_first).to_digest_item(); let mut digest_first = Digest::default(); digest_first.push(digest_item_first); assert!(super::is_first_block_in_core_with_digest(&digest_first).unwrap()); // Test with bundle info index > 0 - should return false - let bundle_info_not_first = BundleInfo { index: 5, maybe_last: true }; + let bundle_info_not_first = BlockBundleInfo { index: 5, is_last: true }; let digest_item_not_first = - CumulusDigestItem::BundleInfo(bundle_info_not_first).to_digest_item(); + CumulusDigestItem::BlockBundleInfo(bundle_info_not_first).to_digest_item(); let mut digest_not_first = Digest::default(); digest_not_first.push(digest_item_not_first); assert!(!super::is_first_block_in_core_with_digest(&digest_not_first).unwrap()); diff --git a/cumulus/pallets/parachain-system/src/lib.rs b/cumulus/pallets/parachain-system/src/lib.rs index 41d6e43cd7c18..83ac8a0044b4e 100644 --- a/cumulus/pallets/parachain-system/src/lib.rs +++ b/cumulus/pallets/parachain-system/src/lib.rs @@ -34,9 +34,9 @@ use codec::{Decode, DecodeLimit, Encode}; use core::cmp; use cumulus_primitives_core::{ relay_chain::{self, UMPSignal, UMP_SEPARATOR}, - AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo, CumulusDigestItem, - GetChannelInfo, ListChannelInfos, MessageSendError, OutboundHrmpMessage, ParaId, - PersistedValidationData, UpwardMessage, UpwardMessageSender, XcmpMessageHandler, + AbridgedHostConfiguration, ChannelInfo, ChannelStatus, CollationInfo, CoreInfo, + CumulusDigestItem, GetChannelInfo, ListChannelInfos, MessageSendError, OutboundHrmpMessage, + ParaId, PersistedValidationData, UpwardMessage, UpwardMessageSender, XcmpMessageHandler, XcmpMessageSource, }; use cumulus_primitives_parachain_inherent::{v0, MessageQueueChain, ParachainInherentData}; @@ -125,6 +125,8 @@ pub struct PoVMessages { pub ump_msg_count: u32, /// Cumulative count of HRMP outbound messages sent in this PoV. pub hrmp_outbound_count: u32, + /// Recipients already used for HRMP outbound messages in this PoV. + pub hrmp_outbound_recipients: Vec, } /// Something that can check the associated relay block number. @@ -337,7 +339,7 @@ pub mod pallet { .map_or(0, |ci| ci.selector.0); let current_bundle_index = - CumulusDigestItem::find_bundle_info(&frame_system::Pallet::::digest()) + CumulusDigestItem::find_block_bundle_info(&frame_system::Pallet::::digest()) .map_or(0, |bi| bi.index); let mut pov_tracker = PoVMessagesTracker::::get() @@ -408,22 +410,18 @@ pub mod pallet { pov_tracker.ump_msg_count = pov_tracker.ump_msg_count.saturating_add(num); - if let Some(core_info) = - CumulusDigestItem::find_core_info(&frame_system::Pallet::::digest()) - { - PendingUpwardSignals::::append( - UMPSignal::SelectCore(core_info.selector, core_info.claim_queue_offset) - .encode(), - ); + let digest = frame_system::Pallet::::digest(); - PreviousCoreCount::::put(core_info.number_of_cores); - } else { - // Without the digest, we assume that it is `1`. - PreviousCoreCount::::put(Compact(1u16)); - } + let core_info = CumulusDigestItem::find_core_info(&digest); + PreviousCoreCount::::put( + core_info.as_ref().map_or(Compact(1u16), |ci| ci.number_of_cores), + ); - // Send the pending UMP signals. - Self::send_ump_signals(); + // Only send UMP signals on the last block of a PoV. + // For single-block PoVs (no BlockBundleInfo), always send signals. + if CumulusDigestItem::is_last_block_in_core(&digest).unwrap_or(true) { + Self::send_ump_signals(core_info); + } // If the total size of the pending messages is less than the threshold, // we decrease the fee factor, since the queue is less congested. @@ -459,12 +457,17 @@ pub mod pallet { // Note: this internally calls the `GetChannelInfo` implementation for this // pallet, which draws on the `RelevantMessagingState`. That in turn has // been adjusted above to reflect the correct limits in all channels. - let outbound_messages = - T::OutboundXcmpMessageSource::take_outbound_messages(maximum_channels) - .into_iter() - .map(|(recipient, data)| OutboundHrmpMessage { recipient, data }) - .collect::>(); + let outbound_messages = T::OutboundXcmpMessageSource::take_outbound_messages( + maximum_channels, + &pov_tracker.hrmp_outbound_recipients, + ) + .into_iter() + .map(|(recipient, data)| OutboundHrmpMessage { recipient, data }) + .collect::>(); + pov_tracker + .hrmp_outbound_recipients + .extend(outbound_messages.iter().map(|m| m.recipient)); pov_tracker.hrmp_outbound_count = pov_tracker.hrmp_outbound_count.saturating_add(outbound_messages.len() as u32); PoVMessagesTracker::::put(pov_tracker); @@ -768,10 +771,9 @@ pub mod pallet { ::on_validation_data(&vfp); - if let Some(collator_peer_id) = collator_peer_id { - PendingUpwardSignals::::append( - UMPSignal::ApprovedPeer(collator_peer_id).encode(), - ); + match collator_peer_id { + Some(peer_id) => PendingApprovedPeer::::put(peer_id), + None => PendingApprovedPeer::::kill(), } total_weight.saturating_accrue(Self::enqueue_inbound_downward_messages( @@ -1018,6 +1020,11 @@ pub mod pallet { #[pallet::storage] pub type PendingUpwardSignals = StorageValue<_, Vec, ValueQuery>; + /// The approved peer id to be sent as a UMP signal on the last block of the PoV. + #[pallet::storage] + pub type PendingApprovedPeer = + StorageValue<_, relay_chain::ApprovedPeerId, OptionQuery>; + /// The factor to multiply the base delivery fee by for UMP. #[pallet::storage] pub type UpwardDeliveryFeeFactor = @@ -1660,8 +1667,19 @@ impl Pallet { } /// Send the pending ump signals - fn send_ump_signals() { - let ump_signals = PendingUpwardSignals::::take(); + fn send_ump_signals(core_info: Option) { + let mut ump_signals = PendingUpwardSignals::::take(); + + if let Some(core_info) = core_info { + ump_signals.push( + UMPSignal::SelectCore(core_info.selector, core_info.claim_queue_offset).encode(), + ); + } + + if let Some(approved_peer) = PendingApprovedPeer::::take() { + ump_signals.push(UMPSignal::ApprovedPeer(approved_peer).encode()); + } + if !ump_signals.is_empty() { UpwardMessages::::append(UMP_SEPARATOR); ump_signals.into_iter().for_each(|s| UpwardMessages::::append(s)); diff --git a/cumulus/pallets/parachain-system/src/mock.rs b/cumulus/pallets/parachain-system/src/mock.rs index 15acd522d8ff0..be722a838aa1a 100644 --- a/cumulus/pallets/parachain-system/src/mock.rs +++ b/cumulus/pallets/parachain-system/src/mock.rs @@ -150,8 +150,12 @@ pub fn send_message(dest: ParaId, message: Vec) { } impl XcmpMessageSource for FromThreadLocal { - fn take_outbound_messages(maximum_channels: usize) -> Vec<(ParaId, Vec)> { - let mut ids = std::collections::BTreeSet::::new(); + fn take_outbound_messages( + maximum_channels: usize, + excluded_recipients: &[ParaId], + ) -> Vec<(ParaId, Vec)> { + let mut ids = + std::collections::BTreeSet::::from_iter(excluded_recipients.iter().copied()); let mut taken_messages = 0; let mut taken_bytes = 0; let mut result = Vec::new(); diff --git a/cumulus/pallets/parachain-system/src/tests.rs b/cumulus/pallets/parachain-system/src/tests.rs index eb295f5d78fef..4d739ab46cf6f 100755 --- a/cumulus/pallets/parachain-system/src/tests.rs +++ b/cumulus/pallets/parachain-system/src/tests.rs @@ -1790,9 +1790,9 @@ fn ump_signals_are_sent_correctly() { vec![ b"Test".to_vec(), UMP_SEPARATOR, + UMPSignal::SelectCore(core_info.selector, core_info.claim_queue_offset).encode(), UMPSignal::ApprovedPeer(ApprovedPeerId::try_from(b"12345".to_vec()).unwrap()) .encode(), - UMPSignal::SelectCore(core_info.selector, core_info.claim_queue_offset).encode(), ], ), ]); diff --git a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs index a4de351c50cba..277c8c4f92579 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/implementation.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/implementation.rs @@ -23,7 +23,7 @@ use cumulus_primitives_core::{ relay_chain::{ BlockNumber as RNumber, Hash as RHash, UMPSignal, MAX_HEAD_DATA_SIZE, UMP_SEPARATOR, }, - ClaimQueueOffset, CoreSelector, ParachainBlockData, PersistedValidationData, + ClaimQueueOffset, CoreSelector, CumulusDigestItem, ParachainBlockData, PersistedValidationData, }; use frame_support::{ traits::{ExecuteBlock, Get, IsSubType}, @@ -144,33 +144,7 @@ where let (blocks, proof) = block_data.into_inner(); - assert_eq!( - *blocks - .first() - .expect("BlockData should have at least one block") - .header() - .parent_hash(), - parent_header.hash(), - "Parachain head needs to be the parent of the first block" - ); - - blocks.iter().fold(parent_header.hash(), |p, b| { - assert_eq!( - p, - *b.header().parent_hash(), - "Not a valid chain of blocks :(; {:?} not a parent of {:?}?", - array_bytes::bytes2hex("0x", p.as_ref()), - array_bytes::bytes2hex("0x", b.header().parent_hash().as_ref()), - ); - let encoded_header_size = b.header().encoded_size(); - assert!( - encoded_header_size <= MAX_HEAD_DATA_SIZE as usize, - "Header size {} exceeds MAX_HEAD_DATA_SIZE {}", - encoded_header_size, - MAX_HEAD_DATA_SIZE - ); - b.header().hash() - }); + verify_blocks_form_chain::(&blocks, &parent_header); let mut processed_downward_messages = 0; let mut upward_messages = BoundedVec::default(); @@ -202,9 +176,8 @@ where ) .build(); - // We use the same recorder when executing all blocks. So, each node only contributes once - // to the total size of the storage proof. This recorder should only be used for - // `execute_block`. + // Each node only contributes once to the total size of the storage proof. So, we keep track + // of them inside `seen_nodes` to always return the correct proof size. let mut execute_recorder = SizeOnlyRecorderProvider::with_seen_nodes(seen_nodes.clone()); // `backend` with the `execute_recorder`. As the `execute_recorder`, this should only be // used for `execute_block`. @@ -212,8 +185,6 @@ where .with_recorder(execute_recorder.clone()) .build(); - // We let all blocks contribute to the same overlay. Data written by a previous block will - // be directly accessible without going to the db. let mut overlay = OverlayedChanges::default(); parent_header = block.header().clone(); @@ -279,9 +250,7 @@ where found_separator = true; None } else if found_separator { - if upward_message_signals.iter().all(|s| *s != m) { - upward_message_signals.push(m); - } + upward_message_signals.push(m); None } else { // No signal or separator @@ -375,6 +344,8 @@ where .expect("UMPSignals does not fit in UMPMessages"); } + horizontal_messages.sort_by(|a, b| a.recipient.cmp(&b.recipient)); + ValidationResult { head_data: head_data.expect("HeadData not set"), new_validation_code: new_validation_code.map(Into::into), @@ -403,6 +374,82 @@ fn validate_validation_data( ); } +fn verify_blocks_form_chain(blocks: &[B::LazyBlock], parent_header: &B::Header) { + let num_blocks = blocks.len(); + + // Check first block's parent matches the given parent_header + assert_eq!( + *blocks + .first() + .expect("BlockData should have at least one block") + .header() + .parent_hash(), + parent_header.hash(), + "Parachain head needs to be the parent of the first block" + ); + + let mut first_block_has_bundle_info: Option = None; + + blocks.iter().enumerate().fold( + parent_header.hash(), + |expected_parent, (block_index, block)| { + // Check chain validity + assert_eq!( + expected_parent, + *block.header().parent_hash(), + "Not a valid chain of blocks :(; {:?} not a parent of {:?}?", + array_bytes::bytes2hex("0x", expected_parent.as_ref()), + array_bytes::bytes2hex("0x", block.header().parent_hash().as_ref()), + ); + + let encoded_header_size = block.header().encoded_size(); + assert!( + encoded_header_size <= MAX_HEAD_DATA_SIZE as usize, + "Header size {encoded_header_size} exceeds MAX_HEAD_DATA_SIZE {MAX_HEAD_DATA_SIZE}", + ); + + // Validate BlockBundleInfo consistency + let bundle_info = CumulusDigestItem::find_block_bundle_info(block.header().digest()); + match (first_block_has_bundle_info, &bundle_info) { + (None, info) => { + first_block_has_bundle_info = Some(info.is_some()); + }, + (Some(true), None) => { + panic!("All blocks in a bundled PoV must include `BlockBundleInfo`"); + }, + (Some(false), _) => { + panic!("A PoV without `BlockBundleInfo` may only contain a single block"); + }, + _ => {}, + } + + if let Some(ref info) = bundle_info { + assert_eq!( + info.index as usize, block_index, + "BlockBundleInfo index mismatch: expected {block_index}, got {}", + info.index + ); + + if block_index + 1 < num_blocks { + assert!( + !CumulusDigestItem::is_last_block_in_core(block.header().digest()).unwrap_or(false), + "Intermediate block at index {block_index} is marked as last block in core, \ + but more blocks follow in the PoV", + ); + } else if !CumulusDigestItem::is_last_block_in_core(block.header().digest()) + .unwrap_or(true) + { + panic!( + "Last block in PoV must include the digest that marks it as the last block in the core" + ); + } + } + + block.header().hash() + }, + ); +} + /// Build a seed from the head data of the parachain block. /// /// Uses both the relay parent storage root and the hash of the blocks diff --git a/cumulus/pallets/parachain-system/src/validate_block/tests.rs b/cumulus/pallets/parachain-system/src/validate_block/tests.rs index 382dd8e798540..491676651e42e 100644 --- a/cumulus/pallets/parachain-system/src/validate_block/tests.rs +++ b/cumulus/pallets/parachain-system/src/validate_block/tests.rs @@ -19,8 +19,8 @@ use codec::{Decode, DecodeAll, Encode}; use cumulus_primitives_core::{ relay_chain, relay_chain::{UMPSignal, UMP_SEPARATOR}, - BundleInfo, ClaimQueueOffset, CoreInfo, CoreSelector, CumulusDigestItem, ParaId, - ParachainBlockData, PersistedValidationData, + BlockBundleInfo, ClaimQueueOffset, CollectCollationInfo, CoreInfo, CoreSelector, + CumulusDigestItem, ParaId, ParachainBlockData, PersistedValidationData, }; use cumulus_test_client::{ generate_extrinsic, generate_extrinsic_with_pair, @@ -28,8 +28,8 @@ use cumulus_test_client::{ self as test_runtime, Block, Hash, Header, SudoCall, SystemCall, TestPalletCall, UncheckedExtrinsic, WASM_BINARY, }, - seal_block, transfer, BlockData, BlockOrigin, BuildParachainBlockData, Client, - DefaultTestClientBuilderExt, HeadData, InitBlockBuilder, + seal_block, transfer, BlockData, BlockOrigin, BuildBlockBuilder, BuildParachainBlockData, + Client, DefaultTestClientBuilderExt, HeadData, Sr25519Keyring::{Alice, Bob, Charlie}, TestClientBuilder, TestClientBuilderExt, ValidationParams, }; @@ -43,6 +43,7 @@ use sp_runtime::{ traits::{BlakeTwo256, Block as BlockT, Header as HeaderT}, DigestItem, }; +use sp_tracing::capture_test_logs; use sp_trie::{proof_size_extension::ProofSizeExt, recorder::IgnoredNodes}; use std::{env, process::Command}; @@ -154,7 +155,12 @@ fn build_block_with_witness( mut block_builder, persisted_validation_data, .. - } = client.init_block_builder_with_pre_digests(Some(validation_data), sproof_builder, pre_digests); + } = client + .init_block_builder_builder() + .with_validation_data(validation_data) + .with_relay_sproof_builder(sproof_builder) + .with_pre_digests(pre_digests) + .build(); extra_extrinsics.into_iter().for_each(|e| block_builder.push(e).unwrap()); @@ -171,6 +177,7 @@ fn build_multiple_blocks_with_witness( mut sproof_builder: RelayStateSproofBuilder, num_blocks: u32, extra_extrinsics: impl Fn(u32) -> Vec, + pre_digests: impl Fn(u32) -> Vec, ) -> TestBlockData { let parent_head_root = *parent_head.state_root(); sproof_builder.para_id = test_runtime::PARACHAIN_ID.into(); @@ -208,18 +215,15 @@ fn build_multiple_blocks_with_witness( mut block_builder, persisted_validation_data: p_v_data, proof_recorder, - } = client.init_block_builder_with_ignored_nodes( - parent_head.hash(), - Some(validation_data.clone()), - sproof_builder.clone(), - timestamp, - ignored_nodes.clone(), - Some(vec![CumulusDigestItem::BundleInfo(BundleInfo { - index: i as u8, - maybe_last: i as u32 + 1 == num_blocks, - }) - .to_digest_item()]), - ); + } = client + .init_block_builder_builder() + .at(parent_head.hash()) + .with_validation_data(validation_data.clone()) + .with_relay_sproof_builder(sproof_builder.clone()) + .with_timestamp(timestamp) + .with_ignored_nodes(ignored_nodes.clone()) + .with_pre_digests((pre_digests)(i)) + .build(); persisted_validation_data = Some(p_v_data); @@ -254,11 +258,11 @@ fn build_multiple_blocks_with_witness( }) .unwrap(); - let new_proof = proof_recorder.drain_storage_proof(); + let proof_new = proof_recorder.drain_storage_proof(); - ignored_nodes.extend(IgnoredNodes::from_storage_proof::(&new_proof)); + ignored_nodes.extend(IgnoredNodes::from_storage_proof::(&proof_new)); ignored_nodes.extend(IgnoredNodes::from_memory_db(built_block.storage_changes.transaction)); - proof = StorageProof::merge([proof, new_proof]); + proof = StorageProof::merge([proof, proof_new]); parent_head = built_block.block.header.clone(); @@ -297,7 +301,7 @@ fn validate_block_works() { fn validate_multiple_blocks_work() { sp_tracing::try_init_simple(); - let blocks_per_pov = 4; + let blocks_per_pov = 4u32; let (client, parent_head) = create_elastic_scaling_test_client(); let TestBlockData { block, validation_data } = build_multiple_blocks_with_witness( &client, @@ -312,6 +316,10 @@ fn validate_multiple_blocks_work() { Some(i), )] }, + |i| { + vec![BlockBundleInfo { index: i as u8, is_last: i + 1 == blocks_per_pov } + .to_digest_item()] + }, ); assert!(block.proof().encoded_size() < 3 * 1024 * 1024); @@ -564,7 +572,7 @@ fn validate_block_works_with_child_tries() { fn state_changes_in_multiple_blocks_are_applied_in_exact_order() { sp_tracing::try_init_simple(); - let blocks_per_pov = 12; + let blocks_per_pov = 12u32; let (client, genesis_head) = create_elastic_scaling_test_client(); // 1. Build the initial block that stores values in the map. @@ -610,6 +618,10 @@ fn state_changes_in_multiple_blocks_are_applied_in_exact_order() { Some(i), )] }, + |i| { + vec![BlockBundleInfo { index: i as u8, is_last: i + 1 == blocks_per_pov } + .to_digest_item()] + }, ); // 3. Validate the PoV. @@ -666,12 +678,17 @@ fn ensure_we_only_like_blockchains() { if env::var("RUN_TEST").is_ok() { let (client, parent_head) = create_elastic_scaling_test_client(); + let num_blocks = 4u32; let TestBlockData { mut block, validation_data } = build_multiple_blocks_with_witness( &client, parent_head.clone(), Default::default(), - 4, + num_blocks, |_| Default::default(), + |i| { + vec![BlockBundleInfo { index: i as u8, is_last: i + 1 == num_blocks } + .to_digest_item()] + }, ); // Reference some non existing parent. @@ -697,7 +714,9 @@ fn ensure_we_only_like_blockchains() { } #[test] -fn rejects_multiple_blocks_per_pov_when_applying_runtime_upgrade() { +fn rejects_blocks_in_bundle_after_block_marked_as_last() { + // Note: This test also covers the case where a runtime upgrade contains following blocks. + // A block with a runtime upgrade is considered last in bundle. sp_tracing::try_init_simple(); if env::var("RUN_TEST").is_ok() { @@ -748,13 +767,18 @@ fn rejects_multiple_blocks_per_pov_when_applying_runtime_upgrade() { proof_builder.host_config.max_code_size = code_len * 2; // 2. Build a PoV that consists of multiple blocks. + let num_blocks = 4u32; let TestBlockData { block: pov_block_data, validation_data: pov_validation_data } = build_multiple_blocks_with_witness( &client, initial_block_header.clone(), // Start building PoV from the initial block's header proof_builder, - 4, + num_blocks, |_| Vec::new(), + |i| { + vec![BlockBundleInfo { index: i as u8, is_last: i + 1 == num_blocks } + .to_digest_item()] + }, ); // 3. Validate the PoV. @@ -766,11 +790,7 @@ fn rejects_multiple_blocks_per_pov_when_applying_runtime_upgrade() { .unwrap_err(); } else { let output = Command::new(env::current_exe().unwrap()) - .args([ - "rejects_multiple_blocks_per_pov_when_applying_runtime_upgrade", - "--", - "--nocapture", - ]) + .args(["rejects_blocks_in_bundle_after_block_marked_as_last", "--", "--nocapture"]) .env("RUN_TEST", "1") .output() .expect("Runs the test"); @@ -778,7 +798,7 @@ fn rejects_multiple_blocks_per_pov_when_applying_runtime_upgrade() { assert!(output.status.success()); assert!(dbg!(String::from_utf8(output.stderr).unwrap()) - .contains("only one block per PoV is allowed")); + .contains("is marked as last block in core, but more blocks follow in the PoV")); } } @@ -818,6 +838,179 @@ fn validate_block_rejects_huge_header_single_block() { } } +#[test] +fn validate_block_rejects_incomplete_bundle() { + // Required to have the global logging enabled, so we can capture it below. + sp_tracing::try_init_simple(); + + let (client, parent_head) = create_elastic_scaling_test_client(); + + // Build 2 blocks with BlockBundleInfo + let TestBlockData { block, validation_data } = build_multiple_blocks_with_witness( + &client, + parent_head.clone(), + Default::default(), + 2, + |_| Vec::new(), + |i| vec![BlockBundleInfo { index: i as u8, is_last: i == 1 }.to_digest_item()], + ); + + // Validation with only first block should fail (incomplete bundle) + let first_block_only = + ParachainBlockData::new(vec![block.blocks()[0].clone()], block.proof().clone()); + let log_capture = capture_test_logs!({ + call_validate_block_elastic_scaling( + parent_head.clone(), + first_block_only, + validation_data.relay_parent_storage_root, + ) + .unwrap_err(); + }); + assert!( + log_capture.contains( + "Last block in PoV must include the digest that marks it as the last block in the core" + ), + "Expected log about missing last block digest, got: {}", + log_capture.get_logs() + ); + + // Validation with both blocks should succeed + let header = block.blocks().last().unwrap().header().clone(); + let res_header = call_validate_block_elastic_scaling( + parent_head, + block, + validation_data.relay_parent_storage_root, + ) + .expect("Calls `validate_block`"); + assert_eq!(header, res_header); +} + +#[test] +fn only_send_ump_signal_on_last_block_in_bundle() { + sp_tracing::try_init_simple(); + + let (client, parent_head) = create_elastic_scaling_test_client(); + + // Build 4 blocks with BlockBundleInfo and CoreInfo on all blocks + let TestBlockData { block, .. } = build_multiple_blocks_with_witness( + &client, + parent_head.clone(), + Default::default(), + 4, + |_| Vec::new(), + |i| { + vec![ + BlockBundleInfo { index: i as u8, is_last: i == 3 }.to_digest_item(), + CumulusDigestItem::CoreInfo(CoreInfo { + selector: CoreSelector(0), + claim_queue_offset: ClaimQueueOffset(0), + number_of_cores: 1.into(), + }) + .to_digest_item(), + ] + }, + ); + + let blocks = block.blocks(); + + // Check CollectCollationInfo for each block + for (i, b) in blocks.iter().enumerate() { + let is_last = i == blocks.len() - 1; + let block_hash = b.header().hash(); + + let collation_info = client + .runtime_api() + .collect_collation_info(block_hash, b.header()) + .expect("Failed to collect collation info"); + + let has_separator = collation_info.upward_messages.contains(&UMP_SEPARATOR); + + if is_last { + assert!( + has_separator, + "Block {} (last) should have UMP_SEPARATOR, got: {:?}", + i, collation_info.upward_messages + ); + } else { + assert!( + !has_separator, + "Block {} should NOT have UMP_SEPARATOR, got: {:?}", + i, collation_info.upward_messages + ); + } + } +} + +#[test] +fn validate_block_accepts_single_block_with_use_full_core() { + sp_tracing::try_init_simple(); + + let (client, parent_head) = create_elastic_scaling_test_client(); + + // Build a single block with BlockBundleInfo (is_last=false) and UseFullCore set via + // extrinsic UseFullCore should make validation succeed even without is_last=true + let TestBlockData { block, validation_data } = build_block_with_witness( + &client, + vec![generate_extrinsic(&client, Alice, TestPalletCall::set_use_full_core {})], + parent_head.clone(), + Default::default(), + vec![BlockBundleInfo { index: 0, is_last: false }.to_digest_item()], + ); + + // Validation should succeed because UseFullCore marks it as last block + let header = block.blocks()[0].header().clone(); + let res_header = call_validate_block_elastic_scaling( + parent_head, + block, + validation_data.relay_parent_storage_root, + ) + .expect("Calls `validate_block`"); + assert_eq!(header, res_header); +} + +#[test] +fn only_send_ump_signal_on_single_block_with_use_full_core() { + sp_tracing::try_init_simple(); + + let (client, parent_head) = create_elastic_scaling_test_client(); + + // Build a single block with BlockBundleInfo (is_last=false), CoreInfo, and UseFullCore set + // via extrinsic. UseFullCore makes this block the last block in the core. + let TestBlockData { block, .. } = build_multiple_blocks_with_witness( + &client, + parent_head.clone(), + Default::default(), + 1, + |_| vec![generate_extrinsic(&client, Alice, TestPalletCall::set_use_full_core {})], + |_| { + vec![ + BlockBundleInfo { index: 0, is_last: false }.to_digest_item(), + CumulusDigestItem::CoreInfo(CoreInfo { + selector: CoreSelector(0), + claim_queue_offset: ClaimQueueOffset(0), + number_of_cores: 1.into(), + }) + .to_digest_item(), + ] + }, + ); + + let b = &block.blocks()[0]; + let block_hash = b.header().hash(); + + let collation_info = client + .runtime_api() + .collect_collation_info(block_hash, b.header()) + .expect("Failed to collect collation info"); + + // Block with UseFullCore should have UMP_SEPARATOR (it's the last block) + assert!( + collation_info.upward_messages.contains(&UMP_SEPARATOR), + "Single block with UseFullCore should have UMP_SEPARATOR, got: {:?}", + collation_info.upward_messages + ); +} + #[test] fn validate_block_with_max_ump_messages_and_4_blocks_per_pov() { sp_tracing::try_init_simple(); @@ -848,6 +1041,10 @@ fn validate_block_with_max_ump_messages_and_4_blocks_per_pov() { Some(i), )] }, + |i| { + vec![BlockBundleInfo { index: i as u8, is_last: i as u32 + 1 == blocks_per_pov } + .to_digest_item()] + }, ); let header = block.blocks().last().unwrap().header().clone(); @@ -872,8 +1069,9 @@ fn validate_block_with_max_hrmp_messages_and_4_blocks_per_pov() { sp_tracing::try_init_simple(); let blocks_per_pov = 4; - let max_per_candidate = 100; - let recipient = ParaId::from(300); + let msgs_per_block: u32 = 25; + let max_per_candidate = msgs_per_block * blocks_per_pov; + let first_recipient = 300u32; let (client, parent_head) = create_elastic_scaling_test_client(); let mut sproof_builder = @@ -881,10 +1079,12 @@ fn validate_block_with_max_hrmp_messages_and_4_blocks_per_pov() { sproof_builder.host_config.hrmp_max_message_num_per_candidate = max_per_candidate; sproof_builder.para_id = ParaId::from(100); - let channel = sproof_builder.upsert_outbound_channel(recipient); - channel.max_capacity = blocks_per_pov; - channel.max_total_size = blocks_per_pov * max_per_candidate * 256; - channel.max_message_size = 256; + for i in 0..max_per_candidate { + let channel = sproof_builder.upsert_outbound_channel(ParaId::from(first_recipient + i)); + channel.max_capacity = blocks_per_pov; + channel.max_total_size = blocks_per_pov * max_per_candidate * 256; + channel.max_message_size = 256; + } let TestBlockData { block, validation_data } = build_multiple_blocks_with_witness( &client, @@ -892,13 +1092,21 @@ fn validate_block_with_max_hrmp_messages_and_4_blocks_per_pov() { sproof_builder, blocks_per_pov, |i| { + let block_first_recipient = ParaId::from(first_recipient + i * msgs_per_block); vec![generate_extrinsic_with_pair( &client, Charlie.into(), - TestPalletCall::queue_hrmp_messages { n: max_per_candidate, recipient }, + TestPalletCall::queue_hrmp_messages_to_n_recipients { + n: msgs_per_block, + first_recipient: block_first_recipient, + }, Some(i), )] }, + |i| { + vec![BlockBundleInfo { index: i as u8, is_last: i as u32 + 1 == blocks_per_pov } + .to_digest_item()] + }, ); let header = block.blocks().last().unwrap().header().clone(); @@ -917,6 +1125,156 @@ fn validate_block_with_max_hrmp_messages_and_4_blocks_per_pov() { assert_eq!(result.horizontal_messages.len(), max_per_candidate as usize); } +#[test] +fn validate_block_hrmp_messages_sorted_across_blocks_in_bundle() { + sp_tracing::try_init_simple(); + + let blocks_per_pov = 2; + let recipient_a = ParaId::from(200); + let recipient_b = ParaId::from(300); + let (client, parent_head) = create_elastic_scaling_test_client(); + + let mut sproof_builder = + RelayStateSproofBuilder { current_slot: 1.into(), ..Default::default() }; + sproof_builder.host_config.hrmp_max_message_num_per_candidate = 10; + sproof_builder.para_id = ParaId::from(100); + + for recipient in [recipient_a, recipient_b] { + let channel = sproof_builder.upsert_outbound_channel(recipient); + channel.max_capacity = blocks_per_pov; + channel.max_total_size = blocks_per_pov * 10 * 256; + channel.max_message_size = 256; + } + + let TestBlockData { block, validation_data } = build_multiple_blocks_with_witness( + &client, + parent_head.clone(), + sproof_builder, + blocks_per_pov, + |i| { + // Block 0 sends to recipient_b (300), block 1 sends to recipient_a (200). + // Naive concatenation would produce [300, 200] which violates the + // strictly-ascending-by-recipient requirement enforced by the relay chain. + let recipient = if i == 0 { recipient_b } else { recipient_a }; + vec![generate_extrinsic_with_pair( + &client, + Charlie.into(), + TestPalletCall::queue_hrmp_messages { n: 1, recipient }, + Some(i), + )] + }, + |i| { + vec![BlockBundleInfo { index: i as u8, is_last: i as u32 + 1 == blocks_per_pov } + .to_digest_item()] + }, + ); + + let result = call_validate_block_validation_result( + test_runtime::elastic_scaling_500ms::WASM_BINARY + .expect("You need to build the WASM binaries to run the tests!"), + parent_head, + block, + validation_data.relay_parent_storage_root, + ) + .expect("Calls `validate_block`"); + + assert_eq!(result.horizontal_messages.len(), 2); + + // The relay chain requires strictly ascending recipient order and at most one message + // per recipient (see `hrmp::Pallet::check_outbound_hrmp`). + assert!( + result.horizontal_messages[0].recipient < result.horizontal_messages[1].recipient, + "HRMP messages must be strictly sorted by recipient, got {:?} before {:?}", + result.horizontal_messages[0].recipient, + result.horizontal_messages[1].recipient, + ); +} + +#[test] +fn validate_block_hrmp_duplicate_recipient_across_blocks_in_bundle() { + sp_tracing::try_init_simple(); + + let blocks_per_pov = 2; + let recipient = ParaId::from(300); + let (client, parent_head) = create_elastic_scaling_test_client(); + + let mut sproof_builder = + RelayStateSproofBuilder { current_slot: 1.into(), ..Default::default() }; + sproof_builder.host_config.hrmp_max_message_num_per_candidate = 10; + sproof_builder.para_id = ParaId::from(100); + + let channel = sproof_builder.upsert_outbound_channel(recipient); + channel.max_capacity = 10; + channel.max_total_size = 10 * 256; + channel.max_message_size = 256; + + let TestBlockData { block: pov1_block, validation_data: pov1_vdata } = + build_multiple_blocks_with_witness( + &client, + parent_head.clone(), + sproof_builder.clone(), + blocks_per_pov, + |i| { + vec![generate_extrinsic_with_pair( + &client, + Charlie.into(), + TestPalletCall::queue_hrmp_messages { n: 1, recipient }, + Some(i), + )] + }, + |i| { + vec![BlockBundleInfo { index: i as u8, is_last: i as u32 + 1 == blocks_per_pov } + .to_digest_item()] + }, + ); + + let pov1_result = call_validate_block_validation_result( + test_runtime::elastic_scaling_500ms::WASM_BINARY + .expect("You need to build the WASM binaries to run the tests!"), + parent_head, + pov1_block.clone(), + pov1_vdata.relay_parent_storage_root, + ) + .expect("Calls `validate_block` for PoV 1"); + + assert_eq!( + pov1_result.horizontal_messages.len(), + 1, + "PoV 1: expected 1 HRMP message, got {}", + pov1_result.horizontal_messages.len(), + ); + + let pov2_parent_head = pov1_block.blocks().last().unwrap().header().clone(); + sproof_builder.current_slot = 2.into(); + sproof_builder.included_para_head = Some(HeadData(pov2_parent_head.encode())); + + let TestBlockData { block: pov2_block, validation_data: pov2_vdata } = + build_multiple_blocks_with_witness( + &client, + pov2_parent_head.clone(), + sproof_builder, + 1, + |_| vec![], + |_| vec![], + ); + + let pov2_result = call_validate_block_validation_result( + test_runtime::elastic_scaling_500ms::WASM_BINARY + .expect("You need to build the WASM binaries to run the tests!"), + pov2_parent_head, + pov2_block, + pov2_vdata.relay_parent_storage_root, + ) + .expect("Calls `validate_block` for PoV 2"); + + assert_eq!( + pov2_result.horizontal_messages.len(), + 1, + "PoV 2: expected 1 HRMP message (the pending one from PoV 1), got {}", + pov2_result.horizontal_messages.len(), + ); +} + #[test] fn validate_block_with_ump_size_constraint_and_4_blocks_per_pov() { sp_tracing::try_init_simple(); @@ -947,6 +1305,10 @@ fn validate_block_with_ump_size_constraint_and_4_blocks_per_pov() { Some(i), )] }, + |i| { + vec![BlockBundleInfo { index: i as u8, is_last: i as u32 + 1 == blocks_per_pov } + .to_digest_item()] + }, ); let header = block.blocks().last().unwrap().header().clone(); @@ -997,6 +1359,10 @@ fn validate_block_with_ump_capacity_constraint_and_4_blocks_per_pov() { Some(i), )] }, + |i| { + vec![BlockBundleInfo { index: i as u8, is_last: i as u32 + 1 == blocks_per_pov } + .to_digest_item()] + }, ); let header = block.blocks().last().unwrap().header().clone(); diff --git a/cumulus/pallets/xcmp-queue/src/lib.rs b/cumulus/pallets/xcmp-queue/src/lib.rs index 2ad3af5c37950..4f891cee075bf 100644 --- a/cumulus/pallets/xcmp-queue/src/lib.rs +++ b/cumulus/pallets/xcmp-queue/src/lib.rs @@ -1082,7 +1082,10 @@ impl XcmpMessageHandler for Pallet { } impl XcmpMessageSource for Pallet { - fn take_outbound_messages(maximum_channels: usize) -> Vec<(ParaId, Vec)> { + fn take_outbound_messages( + maximum_channels: usize, + excluded_recipients: &[ParaId], + ) -> Vec<(ParaId, Vec)> { let mut statuses = >::get().into_inner(); let old_statuses_len = statuses.len(); let max_message_count = statuses.len().min(maximum_channels); @@ -1114,6 +1117,11 @@ impl XcmpMessageSource for Pallet { ChannelStatus::Ready(max_size_now, max_size_ever) => (max_size_now, max_size_ever), }; + // Check if we should omit the recipient. + if excluded_recipients.contains(para_id) { + return true; + } + // This is a hard limit from the host config; not even signals can bypass it. if result.len() == max_message_count { // We check this condition in the beginning of the loop so that we don't include diff --git a/cumulus/pallets/xcmp-queue/src/tests.rs b/cumulus/pallets/xcmp-queue/src/tests.rs index b9da912434329..a3923f8cd244f 100644 --- a/cumulus/pallets/xcmp-queue/src/tests.rs +++ b/cumulus/pallets/xcmp-queue/src/tests.rs @@ -720,7 +720,7 @@ fn send_xcm_nested_works() { new_test_ext().execute_with(|| { assert_ok!(send_xcm::(dest.into(), good.clone())); assert_eq!( - XcmpQueue::take_outbound_messages(usize::MAX), + XcmpQueue::take_outbound_messages(usize::MAX, &[]), vec![( HRMP_PARA_ID.into(), (XcmpMessageFormat::ConcatenatedVersionedXcm, VersionedXcm::from(good.clone())) @@ -733,7 +733,7 @@ fn send_xcm_nested_works() { let bad = Xcm(vec![SetAppendix(good)]); new_test_ext().execute_with(|| { assert_err!(send_xcm::(dest.into(), bad), SendError::ExceedsMaxMessageSize); - assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); + assert!(XcmpQueue::take_outbound_messages(usize::MAX, &[]).is_empty()); }); } @@ -766,7 +766,7 @@ fn hrmp_signals_are_prioritized() { }, ); - let taken = XcmpQueue::take_outbound_messages(130); + let taken = XcmpQueue::take_outbound_messages(130, &[]); assert_eq!(taken, vec![]); // Enqueue some messages @@ -783,14 +783,14 @@ fn hrmp_signals_are_prioritized() { } hypothetically!({ - let taken = XcmpQueue::take_outbound_messages(usize::MAX); + let taken = XcmpQueue::take_outbound_messages(usize::MAX, &[]); assert_eq!(taken, vec![(sibling_para_id.into(), expected_msg,)]); }); // But a signal gets prioritized instead of the messages: assert_ok!(XcmpQueue::send_signal(sibling_para_id.into(), ChannelSignal::Suspend)); - let taken = XcmpQueue::take_outbound_messages(130); + let taken = XcmpQueue::take_outbound_messages(130, &[]); assert_eq!( taken, vec![( @@ -1030,13 +1030,13 @@ fn xcmp_queue_send_xcm_works() { ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests(sibling_para_id); // check empty outbound queue - assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); + assert!(XcmpQueue::take_outbound_messages(usize::MAX, &[]).is_empty()); // now send works assert_ok!(send_xcm::(dest, msg)); // check outbound queue contains message/page for sibling_para_id - assert!(XcmpQueue::take_outbound_messages(usize::MAX) + assert!(XcmpQueue::take_outbound_messages(usize::MAX, &[]) .iter() .any(|(para_id, _)| para_id == &sibling_para_id)); }) @@ -1074,7 +1074,7 @@ fn xcmp_queue_send_too_big_xcm_fails() { assert_eq!(encoded_message_size, max_message_size as usize - versioned_size); // check empty outbound queue - assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); + assert!(XcmpQueue::take_outbound_messages(usize::MAX, &[]).is_empty()); // Message is too big because after adding the VersionedXcm enum, it would reach // `max_message_size` Then, adding the format, which is the worst case scenario in which a @@ -1082,7 +1082,7 @@ fn xcmp_queue_send_too_big_xcm_fails() { assert_eq!(send_xcm::(dest, message), Err(SendError::Transport("TooBig")),); // outbound queue is still empty - assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); + assert!(XcmpQueue::take_outbound_messages(usize::MAX, &[]).is_empty()); }); } @@ -1099,7 +1099,7 @@ fn concatenated_opaque_version_xcm_negotiation_works() { // If there is a message in the queue, the notification is not sent assert_ok!(send_xcm::(dest.clone(), msg.clone())); assert_eq!( - XcmpQueue::take_outbound_messages(usize::MAX), + XcmpQueue::take_outbound_messages(usize::MAX, &[]), vec![( sibling_para_id, [ConcatenatedVersionedXcm.encode(), VersionedXcm::V5(msg.clone()).encode()] @@ -1109,12 +1109,12 @@ fn concatenated_opaque_version_xcm_negotiation_works() { // The queue is empty. The notification should be sent. assert_eq!( - XcmpQueue::take_outbound_messages(usize::MAX), + XcmpQueue::take_outbound_messages(usize::MAX, &[]), vec![(sibling_para_id, ConcatenatedOpaqueVersionedXcm.encode())] ); // The notification should not be sent again - assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); + assert!(XcmpQueue::take_outbound_messages(usize::MAX, &[]).is_empty()); // The recipient parachain still uses the `ConcatenatedVersionedXcm`. let page = generate_mock_xcm_page(0, 1, XcmEncoding::Simple); @@ -1122,7 +1122,7 @@ fn concatenated_opaque_version_xcm_negotiation_works() { // The next message is still sent using the `ConcatenatedVersionedXcm` format. assert_ok!(send_xcm::(dest.clone(), msg.clone())); assert_eq!( - XcmpQueue::take_outbound_messages(usize::MAX), + XcmpQueue::take_outbound_messages(usize::MAX, &[]), vec![( sibling_para_id, [ConcatenatedVersionedXcm.encode(), VersionedXcm::V5(msg.clone()).encode()] @@ -1136,7 +1136,7 @@ fn concatenated_opaque_version_xcm_negotiation_works() { // The next message is sent using the `ConcatenatedOpaqueVersionedXcm` format. assert_ok!(send_xcm::(dest, msg.clone())); assert_eq!( - XcmpQueue::take_outbound_messages(usize::MAX), + XcmpQueue::take_outbound_messages(usize::MAX, &[]), vec![( sibling_para_id, [ConcatenatedOpaqueVersionedXcm.encode(), VersionedXcm::V5(msg).encode().encode()] @@ -1215,10 +1215,10 @@ fn verify_fee_factor_increase_and_decrease() { // Fee factor only decreases in `take_outbound_messages` for _ in 0..5 { // We take 5 100 byte pages - XcmpQueue::take_outbound_messages(1); + XcmpQueue::take_outbound_messages(1, &[]); } assert!(DeliveryFeeFactor::::get(sibling_para_id) < FixedU128::from_float(1.72)); - XcmpQueue::take_outbound_messages(1); + XcmpQueue::take_outbound_messages(1, &[]); assert!(DeliveryFeeFactor::::get(sibling_para_id) < FixedU128::from_float(1.63)); }); } diff --git a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs index 72c93c133cc48..7d9e5e28680b9 100644 --- a/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/assets/asset-hub-rococo/src/lib.rs @@ -2102,7 +2102,7 @@ impl_runtime_apis! { impl cumulus_primitives_core::TargetBlockRate for Runtime { fn target_block_rate() -> u32 { - 1 + BLOCK_PROCESSING_VELOCITY } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs index 7fa9622ed2d4d..f8f821847fd4f 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-rococo/src/lib.rs @@ -1379,7 +1379,7 @@ impl_runtime_apis! { params: MessageProofParams>, ) -> (bridge_to_westend_config::FromWestendBridgeHubMessagesProof, Weight) { use cumulus_primitives_core::XcmpMessageSource; - assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); + assert!(XcmpQueue::take_outbound_messages(usize::MAX, &[]).is_empty()); ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests(42.into()); let universal_source = bridge_to_westend_config::open_bridge_for_benchmarks::< Runtime, @@ -1410,7 +1410,7 @@ impl_runtime_apis! { fn is_message_successfully_dispatched(_nonce: bp_messages::MessageNonce) -> bool { use cumulus_primitives_core::XcmpMessageSource; - !XcmpQueue::take_outbound_messages(usize::MAX).is_empty() + !XcmpQueue::take_outbound_messages(usize::MAX, &[]).is_empty() } } @@ -1424,7 +1424,7 @@ impl_runtime_apis! { params: MessageProofParams>, ) -> (bridge_to_bulletin_config::FromRococoBulletinMessagesProof, Weight) { use cumulus_primitives_core::XcmpMessageSource; - assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); + assert!(XcmpQueue::take_outbound_messages(usize::MAX, &[]).is_empty()); ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests(42.into()); let universal_source = bridge_to_bulletin_config::open_bridge_for_benchmarks::< Runtime, @@ -1455,7 +1455,7 @@ impl_runtime_apis! { fn is_message_successfully_dispatched(_nonce: bp_messages::MessageNonce) -> bool { use cumulus_primitives_core::XcmpMessageSource; - !XcmpQueue::take_outbound_messages(usize::MAX).is_empty() + !XcmpQueue::take_outbound_messages(usize::MAX, &[]).is_empty() } } diff --git a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs index d917fb61b7704..1342bb4c19fa6 100644 --- a/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs +++ b/cumulus/parachains/runtimes/bridge-hubs/bridge-hub-westend/src/lib.rs @@ -1352,7 +1352,7 @@ impl_runtime_apis! { params: MessageProofParams>, ) -> (bridge_to_rococo_config::FromRococoBridgeHubMessagesProof, Weight) { use cumulus_primitives_core::XcmpMessageSource; - assert!(XcmpQueue::take_outbound_messages(usize::MAX).is_empty()); + assert!(XcmpQueue::take_outbound_messages(usize::MAX, &[]).is_empty()); ParachainSystem::open_outbound_hrmp_channel_for_benchmarks_or_tests(42.into()); let universal_source = bridge_to_rococo_config::open_bridge_for_benchmarks::< Runtime, @@ -1383,7 +1383,7 @@ impl_runtime_apis! { fn is_message_successfully_dispatched(_nonce: bp_messages::MessageNonce) -> bool { use cumulus_primitives_core::XcmpMessageSource; - !XcmpQueue::take_outbound_messages(usize::MAX).is_empty() + !XcmpQueue::take_outbound_messages(usize::MAX, &[]).is_empty() } } diff --git a/cumulus/parachains/runtimes/constants/src/rococo.rs b/cumulus/parachains/runtimes/constants/src/rococo.rs index 1538b49b064f2..cdb1ef0172456 100644 --- a/cumulus/parachains/runtimes/constants/src/rococo.rs +++ b/cumulus/parachains/runtimes/constants/src/rococo.rs @@ -112,10 +112,10 @@ pub mod consensus { /// Maximum number of blocks simultaneously accepted by the Runtime, not yet included /// into the relay chain. - pub const UNINCLUDED_SEGMENT_CAPACITY: u32 = 3; + pub const UNINCLUDED_SEGMENT_CAPACITY: u32 = 36; /// How many parachain blocks are processed by the relay chain per parent. Limits the /// number of blocks authored per slot. - pub const BLOCK_PROCESSING_VELOCITY: u32 = 1; + pub const BLOCK_PROCESSING_VELOCITY: u32 = 12; /// Relay chain slot duration, in milliseconds. pub const RELAY_CHAIN_SLOT_DURATION_MILLIS: u32 = 6000; diff --git a/cumulus/parachains/runtimes/test-utils/src/lib.rs b/cumulus/parachains/runtimes/test-utils/src/lib.rs index 189025f2ee9f4..abbaead9ea862 100644 --- a/cumulus/parachains/runtimes/test-utils/src/lib.rs +++ b/cumulus/parachains/runtimes/test-utils/src/lib.rs @@ -763,7 +763,7 @@ impl { pub fn take_xcm(sent_to_para_id: ParaId) -> Option> { - match HrmpChannelSource::take_outbound_messages(10)[..] { + match HrmpChannelSource::take_outbound_messages(10, &[])[..] { [(para_id, ref mut xcm_message_data)] if para_id.eq(&sent_to_para_id.into()) => { let mut xcm_message_data = &xcm_message_data[..]; // decode diff --git a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs index 2df483883ddfb..c040ac21bcaa4 100644 --- a/cumulus/parachains/runtimes/testing/penpal/src/lib.rs +++ b/cumulus/parachains/runtimes/testing/penpal/src/lib.rs @@ -612,7 +612,6 @@ impl cumulus_pallet_parachain_system::Config for Runtime { BLOCK_PROCESSING_VELOCITY, UNINCLUDED_SEGMENT_CAPACITY, >; - type RelayParentOffset = ConstU32<0>; } diff --git a/cumulus/polkadot-omni-node/lib/src/common/aura.rs b/cumulus/polkadot-omni-node/lib/src/common/aura.rs index b6f156f96dfdb..da277fb29e964 100644 --- a/cumulus/polkadot-omni-node/lib/src/common/aura.rs +++ b/cumulus/polkadot-omni-node/lib/src/common/aura.rs @@ -56,6 +56,8 @@ pub trait AuraRuntimeApi: + AuraUnincludedSegmentApi + KeyToIncludeInRelayProof + Sized +where + ::Public: std::fmt::Debug, { /// Check if the runtime has the Aura API. fn has_aura_api(&self, at: Block::Hash) -> bool { @@ -64,10 +66,12 @@ pub trait AuraRuntimeApi: } } -impl AuraRuntimeApi for T where +impl AuraRuntimeApi for T +where T: sp_api::ApiExt + AuraApi::Public> + AuraUnincludedSegmentApi - + KeyToIncludeInRelayProof + + KeyToIncludeInRelayProof, + ::Public: std::fmt::Debug, { } diff --git a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs index a41bd4a447ed1..033a7ccbd4148 100644 --- a/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs +++ b/cumulus/polkadot-omni-node/lib/src/nodes/aura.rs @@ -50,7 +50,7 @@ use cumulus_client_parachain_inherent::MockValidationDataInherentDataProvider; use cumulus_client_service::CollatorSybilResistance; use cumulus_primitives_core::{ relay_chain::ValidationCode, CollectCollationInfo, GetParachainInfo, ParaId, - RelayParentOffsetApi, + RelayParentOffsetApi, TargetBlockRate, }; use cumulus_relay_chain_interface::{OverseerHandle, RelayChainInterface}; use futures::{prelude::*, FutureExt}; @@ -567,6 +567,7 @@ where RuntimeApi::RuntimeApi: AuraRuntimeApi + pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi + substrate_frame_rpc_system::AccountNonceApi + + TargetBlockRate + GetParachainInfo, AuraId: AuraIdT + Sync + Send, ::Pair: Send + Sync, @@ -599,7 +600,7 @@ impl, RuntimeApi, AuraId> StartSlotBasedAuraConsensus where RuntimeApi: ConstructNodeRuntimeApi>, - RuntimeApi::RuntimeApi: AuraRuntimeApi, + RuntimeApi::RuntimeApi: AuraRuntimeApi + TargetBlockRate, AuraId: AuraIdT + Sync + Send, ::Pair: Send + Sync, { @@ -627,7 +628,10 @@ where ) where CIDP: CreateInherentDataProviders + 'static, CIDP::InherentDataProviders: Send, - CHP: cumulus_client_consensus_common::ValidationCodeHashProvider + Send + 'static, + CHP: cumulus_client_consensus_common::ValidationCodeHashProvider + + Send + + Sync + + 'static, Proposer: Environment + Send + Sync + 'static, CS: CollatorServiceInterface + Send + Sync + Clone + 'static, Spawner: SpawnEssentialNamed + Clone + 'static, @@ -651,7 +655,7 @@ impl, RuntimeApi, AuraId> > for StartSlotBasedAuraConsensus where RuntimeApi: ConstructNodeRuntimeApi>, - RuntimeApi::RuntimeApi: AuraRuntimeApi, + RuntimeApi::RuntimeApi: AuraRuntimeApi + TargetBlockRate, AuraId: AuraIdT + Sync + Send, ::Pair: Send + Sync, { @@ -733,7 +737,6 @@ where para_id, proposer, collator_service, - authoring_duration: Duration::from_millis(2000), reinitialize: false, slot_offset: Duration::from_secs(1), block_import_handle, diff --git a/cumulus/primitives/core/src/lib.rs b/cumulus/primitives/core/src/lib.rs index 28c61a53cdaa7..b47764b61367c 100644 --- a/cumulus/primitives/core/src/lib.rs +++ b/cumulus/primitives/core/src/lib.rs @@ -194,12 +194,20 @@ pub enum ChannelStatus { /// A means of figuring out what outbound XCMP messages should be being sent. pub trait XcmpMessageSource { - /// Take a single XCMP message from the queue for the given `dest`, if one exists. - fn take_outbound_messages(maximum_channels: usize) -> Vec<(ParaId, Vec)>; + /// Take outbound XCMP messages from the queue. + /// + /// `excluded_recipients` contains para IDs that must be skipped. + fn take_outbound_messages( + maximum_channels: usize, + excluded_recipients: &[ParaId], + ) -> Vec<(ParaId, Vec)>; } impl XcmpMessageSource for () { - fn take_outbound_messages(_maximum_channels: usize) -> Vec<(ParaId, Vec)> { + fn take_outbound_messages( + _maximum_channels: usize, + _excluded_recipients: &[ParaId], + ) -> Vec<(ParaId, Vec)> { Vec::new() } } @@ -248,7 +256,7 @@ impl CoreInfo { /// Information about a block that is part of a PoV bundle. #[derive(Clone, Debug, Decode, Encode, PartialEq)] -pub struct BundleInfo { +pub struct BlockBundleInfo { /// The index of the block in the bundle. pub index: u8, /// Is this the last block in the bundle from the point of view of the node? @@ -256,14 +264,14 @@ pub struct BundleInfo { /// It is possible that the runtime outputs the /// [`CumulusDigestItem::UseFullCore`] to inform the node to use an entire for one block /// only. - pub maybe_last: bool, + pub is_last: bool, } -impl BundleInfo { - /// Puts this into a [`CumulusDigestItem::BundleInfo`] and then encodes it as a Substrate +impl BlockBundleInfo { + /// Puts this into a [`CumulusDigestItem::BlockBundleInfo`] and then encodes it as a Substrate /// [`DigestItem`]. pub fn to_digest_item(&self) -> DigestItem { - CumulusDigestItem::BundleInfo(self.clone()).to_digest_item() + CumulusDigestItem::BlockBundleInfo(self.clone()).to_digest_item() } } @@ -299,7 +307,7 @@ pub enum CumulusDigestItem { CoreInfo(CoreInfo), /// A digest item providing information about the position of the block in the bundle. #[codec(index = 2)] - BundleInfo(BundleInfo), + BlockBundleInfo(BlockBundleInfo), /// A digest item informing the node that this block should be put alone onto a core. /// /// In other words, the core should not be shared with other blocks. @@ -400,11 +408,11 @@ impl CumulusDigestItem { }) } - /// Returns the [`BundleInfo`] from the given `digest`. - pub fn find_bundle_info(digest: &Digest) -> Option { + /// Returns the [`BlockBundleInfo`] from the given `digest`. + pub fn find_block_bundle_info(digest: &Digest) -> Option { digest.convert_first(|d| match d { DigestItem::PreRuntime(id, val) if id == &CUMULUS_CONSENSUS_ID => { - let Ok(CumulusDigestItem::BundleInfo(bundle_info)) = + let Ok(CumulusDigestItem::BlockBundleInfo(bundle_info)) = CumulusDigestItem::decode_all(&mut &val[..]) else { return None; @@ -433,6 +441,28 @@ impl CumulusDigestItem { }) .unwrap_or_default() } + + /// Returns `true` if the given `digest` is from a block that is the last block in a core. + /// + /// Checks the following conditions: + /// + /// - Is [`BlockBundleInfo::is_last`] set to true? + /// - Or is [`Self::UseFullCore`] digest present? + /// - Or is [`DigestItem::RuntimeEnvironmentUpdated`] digest present? + /// + /// If any of these conditions is `true`, this function will return `true`. + /// + /// Returns `None` if the `BlockBundleInfo` digest is not present, which is interpreted as the + /// associated block is not using block bundling. + pub fn is_last_block_in_core(digest: &Digest) -> Option { + let bundle_info = Self::find_block_bundle_info(digest)?; + + Some( + bundle_info.is_last || + Self::contains_use_full_core(digest) || + digest.logs.iter().any(|l| matches!(l, DigestItem::RuntimeEnvironmentUpdated)), + ) + } } /// If there are multiple valid digests, this returns the value of the first one, although diff --git a/cumulus/test/client/Cargo.toml b/cumulus/test/client/Cargo.toml index 143f665551c26..d1041f264e0b1 100644 --- a/cumulus/test/client/Cargo.toml +++ b/cumulus/test/client/Cargo.toml @@ -26,6 +26,7 @@ sp-application-crypto = { workspace = true, default-features = true } sp-blockchain = { workspace = true, default-features = true } sp-consensus-aura = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-externalities = { workspace = true, default-features = true } sp-inherents = { workspace = true, default-features = true } sp-io = { workspace = true, default-features = true } sp-keyring = { workspace = true, default-features = true } @@ -40,6 +41,7 @@ polkadot-parachain-primitives = { workspace = true, default-features = true } polkadot-primitives = { workspace = true, default-features = true } # Cumulus +cumulus-pallet-parachain-system = { workspace = true, default-features = true } cumulus-pallet-weight-reclaim = { workspace = true, default-features = true } cumulus-primitives-core = { workspace = true, default-features = true } cumulus-primitives-parachain-inherent = { workspace = true, default-features = true } @@ -50,6 +52,7 @@ cumulus-test-service = { workspace = true } [features] runtime-benchmarks = [ + "cumulus-pallet-parachain-system/runtime-benchmarks", "cumulus-pallet-weight-reclaim/runtime-benchmarks", "cumulus-primitives-core/runtime-benchmarks", "cumulus-test-service/runtime-benchmarks", diff --git a/cumulus/test/client/src/block_builder.rs b/cumulus/test/client/src/block_builder.rs index a3f86ec2a358a..6916e016ddebf 100644 --- a/cumulus/test/client/src/block_builder.rs +++ b/cumulus/test/client/src/block_builder.rs @@ -21,9 +21,9 @@ use cumulus_primitives_parachain_inherent::{ParachainInherentData, INHERENT_IDEN use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; use cumulus_test_runtime::{Block, GetLastTimestamp, Hash, Header}; use polkadot_primitives::{BlockNumber as PBlockNumber, Hash as PHash}; -use sc_block_builder::BlockBuilderBuilder; use sp_api::{ApiExt, ProofRecorder, ProofRecorderIgnoredNodes, ProvideRuntimeApi}; use sp_consensus_aura::{AuraApi, Slot}; +use sp_externalities::Extensions; use sp_runtime::{traits::Header as HeaderT, Digest, DigestItem}; use sp_trie::proof_size_extension::ProofSizeExt; @@ -34,71 +34,99 @@ pub struct BlockBuilderAndSupportData<'a> { pub proof_recorder: ProofRecorder, } -/// An extension for the Cumulus test client to init a block builder. -pub trait InitBlockBuilder { - /// Init a specific block builder that works for the test runtime. - /// - /// This will automatically create and push the inherents for you to make the block - /// valid for the test runtime. - /// - /// You can use the relay chain state sproof builder to arrange required relay chain state or - /// just use a default one. The relay chain slot in the storage proof - /// will be adjusted to align with the parachain slot to pass validation. - /// - /// Returns the block builder and validation data for further usage. - fn init_block_builder( - &self, - validation_data: Option>, - relay_sproof_builder: RelayStateSproofBuilder, - ) -> BlockBuilderAndSupportData<'_>; +/// Builder for creating a block builder with customizable parameters. +pub struct BlockBuilderBuilder<'a> { + client: &'a Client, + at: Option, + validation_data: Option>, + relay_sproof_builder: RelayStateSproofBuilder, + timestamp: Option, + ignored_nodes: Option>, + pre_digests: Vec, +} - /// Init a specific block builder at a specific block that works for the test runtime. - /// - /// Same as [`InitBlockBuilder::init_block_builder`] besides that it takes a - /// [`type@Hash`] to say which should be the parent block of the block that is being build. - fn init_block_builder_at( - &self, - at: Hash, - validation_data: Option>, - relay_sproof_builder: RelayStateSproofBuilder, - ) -> BlockBuilderAndSupportData<'_>; +impl<'a> BlockBuilderBuilder<'a> { + fn new(client: &'a Client) -> Self { + Self { + client, + at: None, + validation_data: None, + relay_sproof_builder: Default::default(), + timestamp: None, + ignored_nodes: None, + pre_digests: Vec::new(), + } + } - /// Init a specific block builder using the given pre-digests. - /// - /// Same as [`InitBlockBuilder::init_block_builder`] besides that it takes vector of - /// [`DigestItem`]'s that are passed as pre-digest to the block builder. - fn init_block_builder_with_pre_digests( - &self, - validation_data: Option>, - relay_sproof_builder: RelayStateSproofBuilder, - pre_digests: Vec, - ) -> BlockBuilderAndSupportData<'_>; - /// Init a specific block builder at a specific block that works for the test runtime. - /// - /// Same as [`InitBlockBuilder::init_block_builder_with_timestamp`] besides that it takes - /// `ignored_nodes` that instruct the proof recorder to not record these nodes. - fn init_block_builder_with_ignored_nodes( - &self, - at: Hash, - validation_data: Option>, + /// Set the parent block hash for the block builder. + pub fn at(mut self, at: Hash) -> Self { + self.at = Some(at); + self + } + + /// Set the validation data for the block builder. + pub fn with_validation_data( + mut self, + validation_data: PersistedValidationData, + ) -> Self { + self.validation_data = Some(validation_data); + self + } + + /// Set the relay state proof builder for the block builder. + pub fn with_relay_sproof_builder( + mut self, relay_sproof_builder: RelayStateSproofBuilder, - timestamp: u64, - ignored_nodes: ProofRecorderIgnoredNodes, - extra_pre_digests: Option>, - ) -> BlockBuilderAndSupportData<'_>; + ) -> Self { + self.relay_sproof_builder = relay_sproof_builder; + self + } + + /// Set the timestamp for the block builder. + pub fn with_timestamp(mut self, timestamp: u64) -> Self { + self.timestamp = Some(timestamp); + self + } + + /// Set the ignored nodes for the proof recorder. + pub fn with_ignored_nodes(mut self, ignored_nodes: ProofRecorderIgnoredNodes) -> Self { + self.ignored_nodes = Some(ignored_nodes); + self + } + + /// Set the pre-digest items for the block builder. + pub fn with_pre_digests(mut self, pre_digests: Vec) -> Self { + self.pre_digests = pre_digests; + self + } + + /// Build the block builder with the configured parameters. + pub fn build(self) -> BlockBuilderAndSupportData<'a> { + let at = self.at.unwrap_or_else(|| self.client.chain_info().best_hash); + init_block_builder( + self.client, + at, + self.validation_data, + self.relay_sproof_builder, + self.timestamp, + self.ignored_nodes, + Some(self.pre_digests), + ) + } +} - /// Init a specific block builder that works for the test runtime. +/// An extension for the Cumulus test client to build a block builder. +pub trait BuildBlockBuilder { + /// Initialize a block builder builder that can be configured and built. /// - /// Same as [`InitBlockBuilder::init_block_builder`] besides that it takes a - /// [`type@Hash`] to say which should be the parent block of the block that is being build and - /// it will use the given `timestamp` as input for the timestamp inherent. - fn init_block_builder_with_timestamp( - &self, - at: Hash, - validation_data: Option>, - relay_sproof_builder: RelayStateSproofBuilder, - timestamp: u64, - ) -> BlockBuilderAndSupportData<'_>; + /// This returns a builder that can be configured with various options like + /// parent block hash, validation data, relay state proof builder, timestamp, + /// ignored nodes, and pre-digests. Call `.build()` on the builder to create + /// the actual block builder. + /// + /// The builder will automatically create and push the inherents for you to make + /// the block valid for the test runtime. + fn init_block_builder_builder(&self) -> BlockBuilderBuilder<'_>; } fn init_block_builder( @@ -107,8 +135,8 @@ fn init_block_builder( validation_data: Option>, mut relay_sproof_builder: RelayStateSproofBuilder, timestamp: Option, - extra_pre_digests: Option>, ignored_nodes: Option>, + extra_pre_digests: Option>, ) -> BlockBuilderAndSupportData<'_> { let mut runtime_api = client.runtime_api(); runtime_api.set_call_context(sp_core::traits::CallContext::Onchain { import: false }); @@ -152,13 +180,16 @@ fn init_block_builder( let proof_recorder = ProofRecorder::::with_ignored_nodes(ignored_nodes.unwrap_or_default()); - let mut block_builder = BlockBuilderBuilder::new(client) + let mut extra_extensions = Extensions::default(); + extra_extensions.register(ProofSizeExt::new(proof_recorder.clone())); + + let mut block_builder = sc_block_builder::BlockBuilderBuilder::new(client) .on_parent_block(at) .fetch_parent_block_number(client) .unwrap() .with_proof_recorder(Some(proof_recorder.clone())) .with_inherent_digests(pre_digests) - .with_extra_extensions(ProofSizeExt::new(proof_recorder.clone())) + .with_extra_extensions(extra_extensions) .build() .expect("Creates new block builder for test runtime"); @@ -201,79 +232,9 @@ fn init_block_builder( } } -impl InitBlockBuilder for Client { - fn init_block_builder( - &self, - validation_data: Option>, - relay_sproof_builder: RelayStateSproofBuilder, - ) -> BlockBuilderAndSupportData<'_> { - let chain_info = self.chain_info(); - self.init_block_builder_at(chain_info.best_hash, validation_data, relay_sproof_builder) - } - - fn init_block_builder_with_pre_digests( - &self, - validation_data: Option>, - relay_sproof_builder: RelayStateSproofBuilder, - pre_digests: Vec, - ) -> BlockBuilderAndSupportData<'_> { - let chain_info = self.chain_info(); - init_block_builder( - self, - chain_info.best_hash, - validation_data, - relay_sproof_builder, - None, - Some(pre_digests), - None, - ) - } - - fn init_block_builder_at( - &self, - at: Hash, - validation_data: Option>, - relay_sproof_builder: RelayStateSproofBuilder, - ) -> BlockBuilderAndSupportData<'_> { - init_block_builder(self, at, validation_data, relay_sproof_builder, None, None, None) - } - - fn init_block_builder_with_ignored_nodes( - &self, - at: Hash, - validation_data: Option>, - relay_sproof_builder: RelayStateSproofBuilder, - timestamp: u64, - ignored_nodes: ProofRecorderIgnoredNodes, - extra_pre_digests: Option>, - ) -> BlockBuilderAndSupportData<'_> { - init_block_builder( - self, - at, - validation_data, - relay_sproof_builder, - Some(timestamp), - extra_pre_digests, - Some(ignored_nodes), - ) - } - - fn init_block_builder_with_timestamp( - &self, - at: Hash, - validation_data: Option>, - relay_sproof_builder: RelayStateSproofBuilder, - timestamp: u64, - ) -> BlockBuilderAndSupportData<'_> { - init_block_builder( - self, - at, - validation_data, - relay_sproof_builder, - Some(timestamp), - None, - None, - ) +impl BuildBlockBuilder for Client { + fn init_block_builder_builder(&self) -> BlockBuilderBuilder<'_> { + BlockBuilderBuilder::new(self) } } @@ -287,7 +248,9 @@ pub trait BuildParachainBlockData { impl<'a> BuildParachainBlockData for sc_block_builder::BlockBuilder<'a, Block, Client> { fn build_parachain_block(self, parent_state_root: Hash) -> ParachainBlockData { - let proof_recorder = self.proof_recorder().expect("Proof recorder is always set"); + let proof_recorder = self + .proof_recorder() + .expect("Proof recorder is always set for the test block builder; qed"); let built_block = self.build().expect("Builds the block"); let storage_proof = proof_recorder diff --git a/cumulus/test/client/src/lib.rs b/cumulus/test/client/src/lib.rs index c39e1180a24dc..f9dbc9339551d 100644 --- a/cumulus/test/client/src/lib.rs +++ b/cumulus/test/client/src/lib.rs @@ -19,14 +19,15 @@ mod block_builder; pub use block_builder::*; use codec::{Decode, Encode}; +use cumulus_pallet_parachain_system::block_weight::DynamicMaxBlockWeight; pub use cumulus_test_runtime as runtime; use cumulus_test_runtime::AuraId; pub use polkadot_parachain_primitives::primitives::{ BlockData, HeadData, ValidationParams, ValidationResult, }; use runtime::{ - Balance, Block, BlockHashCount, Runtime, RuntimeCall, Signature, SignedPayload, TxExtension, - UncheckedExtrinsic, VERSION, + test_pallet, Balance, Block, BlockHashCount, Runtime, RuntimeCall, Signature, SignedPayload, + TxExtension, UncheckedExtrinsic, VERSION, }; use sc_consensus_aura::{ find_pre_digest, @@ -141,24 +142,27 @@ pub fn generate_extrinsic_with_pair( let period = BlockHashCount::get().checked_next_power_of_two().map(|c| c / 2).unwrap_or(2) as u64; let tip = 0; - let tx_ext: TxExtension = ( - frame_system::AuthorizeCall::::new(), - frame_system::CheckNonZeroSender::::new(), - frame_system::CheckSpecVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckEra::::from(Era::mortal(period, current_block)), - frame_system::CheckNonce::::from(nonce), - frame_system::CheckWeight::::new(), - pallet_transaction_payment::ChargeTransactionPayment::::from(tip), - ) - .into(); + let tx_ext: TxExtension = DynamicMaxBlockWeight::new( + ( + frame_system::AuthorizeCall::::new(), + frame_system::CheckNonZeroSender::::new(), + frame_system::CheckSpecVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckEra::::from(Era::mortal(period, current_block)), + frame_system::CheckNonce::::from(nonce), + frame_system::CheckWeight::::new(), + pallet_transaction_payment::ChargeTransactionPayment::::from(tip), + test_pallet::TestTransactionExtension::::default(), + ) + .into(), + ); let function = function.into(); let raw_payload = SignedPayload::from_raw( function.clone(), tx_ext.clone(), - ((), (), VERSION.spec_version, genesis_block, current_block_hash, (), (), ()), + ((), (), VERSION.spec_version, genesis_block, current_block_hash, (), (), (), ()), ); let signature = raw_payload.using_encoded(|e| origin.sign(e)); diff --git a/cumulus/test/runtime/Cargo.toml b/cumulus/test/runtime/Cargo.toml index e57a67cd48fd3..7b224dc0fef4b 100644 --- a/cumulus/test/runtime/Cargo.toml +++ b/cumulus/test/runtime/Cargo.toml @@ -12,6 +12,7 @@ workspace = true codec = { features = ["derive"], workspace = true } scale-info = { features = ["derive"], workspace = true } serde_json = { workspace = true } +tracing = { workspace = true } # Substrate frame-executive = { workspace = true } @@ -27,6 +28,7 @@ pallet-session = { workspace = true } pallet-sudo = { workspace = true } pallet-timestamp = { workspace = true } pallet-transaction-payment = { workspace = true } +pallet-utility = { workspace = true } sp-api = { workspace = true } sp-block-builder = { workspace = true } sp-consensus-aura = { workspace = true } @@ -50,6 +52,7 @@ cumulus-pallet-parachain-system = { workspace = true } cumulus-pallet-weight-reclaim = { workspace = true } cumulus-primitives-aura = { workspace = true } cumulus-primitives-core = { workspace = true } +cumulus-primitives-storage-weight-reclaim = { workspace = true } parachain-info = { workspace = true } [build-dependencies] @@ -64,6 +67,7 @@ std = [ "cumulus-pallet-weight-reclaim/std", "cumulus-primitives-aura/std", "cumulus-primitives-core/std", + "cumulus-primitives-storage-weight-reclaim/std", "frame-executive/std", "frame-support/std", "frame-system-rpc-runtime-api/std", @@ -77,6 +81,7 @@ std = [ "pallet-sudo/std", "pallet-timestamp/std", "pallet-transaction-payment/std", + "pallet-utility/std", "parachain-info/std", "polkadot-primitives/std", "scale-info/std", @@ -95,6 +100,7 @@ std = [ "sp-transaction-pool/std", "sp-version/std", "substrate-wasm-builder", + "tracing/std", ] increment-spec-version = [] # A runtime which expects to build behind the relay chain tip. @@ -105,6 +111,8 @@ elastic-scaling = [] elastic-scaling-500ms = [] # A runtime with a slot duration of 6s but parameters that allow multiple blocks per slot. elastic-scaling-multi-block-slot = [] +# A runtime that uses block-bundling. +block-bundling = [] # A runtime with 12s slot duration which only authors one block per slot. sync-backing = [] # A runtime with 6s slot duration which only authors one block per slot. diff --git a/cumulus/test/runtime/build.rs b/cumulus/test/runtime/build.rs index 4c1298575f816..712106903db8e 100644 --- a/cumulus/test/runtime/build.rs +++ b/cumulus/test/runtime/build.rs @@ -77,6 +77,13 @@ fn main() { .set_file_name("wasm_binary_elastic_scaling_12s_slot.rs") .build(); + WasmBuilder::new() + .with_current_project() + .enable_feature("block-bundling") + .import_memory() + .set_file_name("wasm_binary_block_bundling.rs") + .build(); + WasmBuilder::init_with_defaults() .enable_feature("slot-duration-18s") .enable_feature("increment-spec-version") diff --git a/cumulus/test/runtime/src/lib.rs b/cumulus/test/runtime/src/lib.rs index 89b64359a542b..72c5329e960ae 100644 --- a/cumulus/test/runtime/src/lib.rs +++ b/cumulus/test/runtime/src/lib.rs @@ -46,14 +46,14 @@ pub mod elastic_scaling { include!(concat!(env!("OUT_DIR"), "/wasm_binary_elastic_scaling.rs")); } -pub mod elastic_scaling_multi_block_slot { +pub mod elastic_scaling_12s_slot { #[cfg(feature = "std")] - include!(concat!(env!("OUT_DIR"), "/wasm_binary_elastic_scaling_multi_block_slot.rs")); + include!(concat!(env!("OUT_DIR"), "/wasm_binary_elastic_scaling_12s_slot.rs")); } -pub mod elastic_scaling_12s_slot { +pub mod block_bundling { #[cfg(feature = "std")] - include!(concat!(env!("OUT_DIR"), "/wasm_binary_elastic_scaling_12s_slot.rs")); + include!(concat!(env!("OUT_DIR"), "/wasm_binary_block_bundling.rs")); } pub mod sync_backing { @@ -72,7 +72,7 @@ pub mod slot_duration_18s { } mod genesis_config_presets; -mod test_pallet; +pub mod test_pallet; extern crate alloc; @@ -80,7 +80,7 @@ use alloc::{vec, vec::Vec}; use frame_support::{derive_impl, traits::OnRuntimeUpgrade, PalletId}; use sp_api::{decl_runtime_apis, impl_runtime_apis}; pub use sp_consensus_aura::sr25519::AuthorityId as AuraId; -use sp_core::{ConstBool, ConstU32, ConstU64, OpaqueMetadata}; +use sp_core::{ConstBool, ConstU32, ConstU64, Get, OpaqueMetadata}; use sp_runtime::{ generic, impl_opaque_keys, @@ -121,7 +121,7 @@ pub use pallet_timestamp::{Call as TimestampCall, Now}; #[cfg(any(feature = "std", test))] pub use sp_runtime::BuildStorage; pub use sp_runtime::{Perbill, Permill}; -pub use test_pallet::Call as TestPalletCall; +pub use test_pallet::{Call as TestPalletCall, TestTransactionExtension}; pub type SessionHandlers = (); @@ -134,7 +134,7 @@ impl_opaque_keys! { /// The para-id used in this runtime. pub const PARACHAIN_ID: u32 = 100; -#[cfg(feature = "elastic-scaling-500ms")] +#[cfg(any(feature = "elastic-scaling-500ms", feature = "block-bundling"))] pub const BLOCK_PROCESSING_VELOCITY: u32 = 12; #[cfg(all(feature = "elastic-scaling-multi-block-slot", not(feature = "elastic-scaling-500ms")))] @@ -152,6 +152,7 @@ pub const BLOCK_PROCESSING_VELOCITY: u32 = 3; feature = "elastic-scaling-500ms", feature = "elastic-scaling-multi-block-slot", feature = "relay-parent-offset", + feature = "block-bundling", )))] pub const BLOCK_PROCESSING_VELOCITY: u32 = 1; @@ -161,9 +162,18 @@ const UNINCLUDED_SEGMENT_CAPACITY: u32 = 3; #[cfg(all(feature = "sync-backing", not(feature = "async-backing")))] const UNINCLUDED_SEGMENT_CAPACITY: u32 = 1; -// The `+2` shouldn't be needed, https://github.com/paritytech/polkadot-sdk/issues/5260 +/// We need `VELOCITY * 3`, because the block flow is the following: +/// +/// - Collator produces the block(s) on relay chain block `X` +/// - In the mean time the relay chain is building block `X + 1` +/// - The collator sends the collation to the relay chain and it gets backed on chain in relay block +/// `X + 2` +/// - The collation then gets included on chain in relay block `X + 3` +/// - As we are building on `RELAY_PARENT_OFFSET` old relay parents, the included block from the +/// parachain is also `RELAY_PARENT_OFFSET` relay blocks older (one relay block may contains +/// multiple parachain blocks). #[cfg(all(not(feature = "sync-backing"), not(feature = "async-backing")))] -const UNINCLUDED_SEGMENT_CAPACITY: u32 = BLOCK_PROCESSING_VELOCITY * (2 + RELAY_PARENT_OFFSET) + 2; +const UNINCLUDED_SEGMENT_CAPACITY: u32 = BLOCK_PROCESSING_VELOCITY * (3 + RELAY_PARENT_OFFSET); #[cfg(feature = "slot-duration-18s")] pub const SLOT_DURATION: u64 = 18000; @@ -242,17 +252,18 @@ const AVERAGE_ON_INITIALIZE_RATIO: Perbill = Perbill::from_percent(10); /// We allow `Normal` extrinsics to fill up the block up to 75%, the rest can be used /// by Operational extrinsics. const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75); -/// We allow for 1 second of compute with a 6 second average block time. -const MAXIMUM_BLOCK_WEIGHT: Weight = Weight::from_parts( - WEIGHT_REF_TIME_PER_SECOND, - cumulus_primitives_core::relay_chain::MAX_POV_SIZE as u64, -); + +type MaximumBlockWeight = cumulus_pallet_parachain_system::block_weight::MaxParachainBlockWeight< + Runtime, + ConstU32, +>; parameter_types! { /// Target number of blocks per relay chain slot. pub const NumberOfBlocksPerRelaySlot: u32 = 12; pub const BlockHashCount: BlockNumber = 250; pub const Version: RuntimeVersion = VERSION; + /// We allow for 1 second of compute with a 6 second average block time. pub RuntimeBlockLength: BlockLength = BlockLength::builder().max_length(10 * 1024 * 1024).max_header_size(5 * 1024 * 1024).build(); pub RuntimeBlockWeights: BlockWeights = BlockWeights::builder() @@ -261,14 +272,14 @@ parameter_types! { weights.base_extrinsic = ExtrinsicBaseWeight::get(); }) .for_class(DispatchClass::Normal, |weights| { - weights.max_total = Some(NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT); + weights.max_total = Some(NORMAL_DISPATCH_RATIO * MaximumBlockWeight::get()); }) .for_class(DispatchClass::Operational, |weights| { - weights.max_total = Some(MAXIMUM_BLOCK_WEIGHT); + weights.max_total = Some(MaximumBlockWeight::get()); // Operational transactions have some extra reserved space, so that they - // are included even if block reached `MAXIMUM_BLOCK_WEIGHT`. + // are included even if block reached `MaximumBlockWeight`. weights.reserved = Some( - MAXIMUM_BLOCK_WEIGHT - NORMAL_DISPATCH_RATIO * MAXIMUM_BLOCK_WEIGHT + MaximumBlockWeight::get() - NORMAL_DISPATCH_RATIO * MaximumBlockWeight::get() ); }) .avg_block_initialization(AVERAGE_ON_INITIALIZE_RATIO) @@ -296,6 +307,10 @@ impl frame_system::Config for Runtime { type SS58Prefix = SS58Prefix; type OnSetCode = cumulus_pallet_parachain_system::ParachainSetCode; type MaxConsumers = frame_support::traits::ConstU32<16>; + type PreInherents = cumulus_pallet_parachain_system::block_weight::DynamicMaxBlockWeightHooks< + Runtime, + ConstU32, + >; type SingleBlockMigrations = SingleBlockMigrations; } @@ -366,6 +381,13 @@ impl pallet_sudo::Config for Runtime { type WeightInfo = pallet_sudo::weights::SubstrateWeight; } +impl pallet_utility::Config for Runtime { + type RuntimeCall = RuntimeCall; + type RuntimeEvent = RuntimeEvent; + type PalletsOrigin = OriginCaller; + type WeightInfo = pallet_utility::weights::SubstrateWeight; +} + impl pallet_glutton::Config for Runtime { type RuntimeEvent = RuntimeEvent; type AdminOrigin = EnsureRoot; @@ -425,6 +447,7 @@ construct_runtime! { ParachainInfo: parachain_info, Balances: pallet_balances, Sudo: pallet_sudo, + Utility: pallet_utility, TransactionPayment: pallet_transaction_payment, TestPallet: test_pallet, Glutton: pallet_glutton, @@ -461,19 +484,25 @@ pub type SignedBlock = generic::SignedBlock; /// BlockId type as expected by this runtime. pub type BlockId = generic::BlockId; /// The extension to the basic transaction logic. -pub type TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim< +pub type TxExtension = cumulus_pallet_parachain_system::block_weight::DynamicMaxBlockWeight< Runtime, - ( - frame_system::AuthorizeCall, - frame_system::CheckNonZeroSender, - frame_system::CheckSpecVersion, - frame_system::CheckGenesis, - frame_system::CheckEra, - frame_system::CheckNonce, - frame_system::CheckWeight, - pallet_transaction_payment::ChargeTransactionPayment, - ), + cumulus_pallet_weight_reclaim::StorageWeightReclaim< + Runtime, + ( + frame_system::AuthorizeCall, + frame_system::CheckNonZeroSender, + frame_system::CheckSpecVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_transaction_payment::ChargeTransactionPayment, + test_pallet::TestTransactionExtension, + ), + >, + ConstU32, >; + /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; @@ -599,6 +628,7 @@ impl_runtime_apis! { fn check_inherents(block: ::LazyBlock, data: sp_inherents::InherentData) -> sp_inherents::CheckInherentsResult { data.check_extrinsics(&block) } + } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { @@ -659,19 +689,19 @@ impl_runtime_apis! { fn parachain_id() -> ParaId { ParachainInfo::parachain_id() } - } + // "Elastic scaling" should run with the fallback method. + #[cfg(any(not(feature = "elastic-scaling"), feature = "std"))] impl cumulus_primitives_core::TargetBlockRate for Runtime { fn target_block_rate() -> u32 { - 1 + BLOCK_PROCESSING_VELOCITY } } impl cumulus_primitives_core::KeyToIncludeInRelayProof for Runtime { fn keys_to_prove() -> cumulus_primitives_core::RelayProofRequest { use cumulus_primitives_core::RelayStorageKey; - RelayProofRequest { keys: vec![ // Request a key to verify its inclusion in the proof. diff --git a/cumulus/test/runtime/src/test_pallet.rs b/cumulus/test/runtime/src/test_pallet.rs index 15a5a29ab2ad3..0d324dc2904ea 100644 --- a/cumulus/test/runtime/src/test_pallet.rs +++ b/cumulus/test/runtime/src/test_pallet.rs @@ -19,8 +19,9 @@ pub use pallet::*; use codec::Encode; -/// Some key that we set in genesis and only read in [`TestOnRuntimeUpgrade`] to ensure that -/// [`OnRuntimeUpgrade`] works as expected. +/// Some key that we set in genesis and only read in +/// [`SingleBlockMigrations`](crate::SingleBlockMigrations) to ensure that +/// [`OnRuntimeUpgrade`](frame_support::traits::OnRuntimeUpgrade) works as expected. pub const TEST_RUNTIME_UPGRADE_KEY: &[u8] = b"+test_runtime_upgrade_key+"; /// Generates the storage key for Alice's account on the relay chain. @@ -39,10 +40,22 @@ pub fn relay_alice_account_key() -> alloc::vec::Vec { #[frame_support::pallet(dev_mode)] pub mod pallet { use crate::test_pallet::TEST_RUNTIME_UPGRADE_KEY; - use alloc::vec; - use cumulus_primitives_core::{ParaId, XcmpMessageSource}; - use frame_support::pallet_prelude::*; + use alloc::{vec, vec::Vec}; + use cumulus_primitives_core::{CumulusDigestItem, ParaId, XcmpMessageSource}; + use cumulus_primitives_storage_weight_reclaim::get_proof_size; + use frame_support::{ + dispatch::DispatchInfo, + inherent::{InherentData, InherentIdentifier, ProvideInherent}, + pallet_prelude::*, + traits::IsSubType, + weights::constants::WEIGHT_REF_TIME_PER_SECOND, + DebugNoBound, + }; use frame_system::pallet_prelude::*; + use sp_runtime::traits::{Dispatchable, Implication, TransactionExtension}; + + /// The inherent identifier for weight consumption. + pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"consume0"; #[pallet::pallet] pub struct Pallet(_); @@ -62,16 +75,100 @@ pub mod pallet { impl XcmpMessageSource for Pallet { fn take_outbound_messages( maximum_channels: usize, + excluded_recipients: &[ParaId], ) -> alloc::vec::Vec<(ParaId, alloc::vec::Vec)> { PendingOutboundHrmpMessages::::mutate(|messages| { - let to_take = messages.len().min(maximum_channels); - messages.drain(..to_take).collect() + let mut taken_recipients = alloc::vec::Vec::new(); + let mut result = alloc::vec::Vec::new(); + messages.retain(|(recipient, data)| { + if result.len() >= maximum_channels || + excluded_recipients.contains(recipient) || + taken_recipients.contains(recipient) + { + return true; + } + taken_recipients.push(*recipient); + result.push((*recipient, data.clone())); + false + }); + result }) } } + /// When active, `on_initialize` queues one HRMP message per block, alternating + /// between `HRMP_RECIPIENT_HIGH` (odd blocks) and `HRMP_RECIPIENT_LOW` (even blocks). + /// This produces descending recipient order across consecutive blocks in a bundle, + /// exercising the HRMP message sorting in the collation path. + #[pallet::storage] + pub type HrmpSendingActive = StorageValue<_, bool, ValueQuery>; + + /// Flag to indicate if a 1s weight should be registered in the next `on_initialize`. + #[pallet::storage] + pub type ScheduleWeightRegistration = StorageValue<_, bool, ValueQuery>; + + /// Weight to be consumed by the inherent call. + #[pallet::storage] + pub type InherentWeightConsume = StorageValue<_, Weight, OptionQuery>; + + /// A map that contains on single big value at the current block. + /// + /// In every block we are moving the big value from the previous block to current block. This is + /// done to test that the storage proof size between multiple blocks in the same bundle is + /// shared. + #[pallet::storage] + pub type BigValueMove = + StorageMap<_, Twox64Concat, BlockNumberFor, Vec, OptionQuery>; + + pub const HRMP_RECIPIENT_LOW: u32 = 2500; + pub const HRMP_RECIPIENT_HIGH: u32 = 2600; + #[pallet::hooks] - impl Hooks> for Pallet {} + impl Hooks> for Pallet { + fn on_initialize(n: BlockNumberFor) -> Weight { + if HrmpSendingActive::::get() { + let block_num: u32 = n.try_into().unwrap_or(0); + let recipient = if block_num % 2 == 1 { + ParaId::from(HRMP_RECIPIENT_HIGH) + } else { + ParaId::from(HRMP_RECIPIENT_LOW) + }; + PendingOutboundHrmpMessages::::mutate(|messages| { + messages.push((recipient, vec![block_num as u8])); + }); + } + + if ScheduleWeightRegistration::::get() { + let weight_to_register = Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 0); + + let left_weight = frame_system::Pallet::::remaining_block_weight(); + + if left_weight.can_consume(weight_to_register) { + tracing::info!("Consuming 1s of weight :)"); + // We have enough capacity, consume the flag and register the weight + ScheduleWeightRegistration::::kill(); + return weight_to_register; + } + } + + if let Some(mut value) = BigValueMove::::take(n - 1u32.into()) { + // Modify the value a little bit. + let parent_hash = frame_system::Pallet::::parent_hash(); + value[..parent_hash.as_ref().len()].copy_from_slice(parent_hash.as_ref()); + + BigValueMove::::insert(n, value); + + // Depositing the event is important, because then we write the actual proof size + // into the state. If some node returns a different proof size on import of this + // block, we will detect it this way as the storage root will be different. + Self::deposit_event(Event::MovedBigValue { + proof_size: get_proof_size().unwrap_or_default(), + }) + } + + Weight::zero() + } + } #[pallet::call] impl Pallet { @@ -165,6 +262,103 @@ pub mod pallet { }); Ok(()) } + + /// Queues one HRMP message each to `n` consecutive recipients starting from + /// `first_recipient`. + #[pallet::weight(0)] + pub fn queue_hrmp_messages_to_n_recipients( + _: OriginFor, + n: u32, + first_recipient: ParaId, + ) -> DispatchResult { + PendingOutboundHrmpMessages::::mutate(|messages| { + for i in 0..n { + messages.push((ParaId::from(u32::from(first_recipient) + i), vec![i as u8])); + } + }); + Ok(()) + } + + /// Schedule a 1 second weight registration in the next `on_initialize`. + #[pallet::weight(0)] + pub fn schedule_weight_registration(_: OriginFor) -> DispatchResult { + ScheduleWeightRegistration::::set(true); + Ok(()) + } + + /// Set the weight to be consumed by the next inherent call. + #[pallet::weight(0)] + pub fn set_inherent_weight_consume(_: OriginFor, weight: Weight) -> DispatchResult { + InherentWeightConsume::::put(weight); + Ok(()) + } + + /// Consume weight via inherent call (clears the storage after consuming). + #[pallet::weight(( + InherentWeightConsume::::get().unwrap_or_default(), + DispatchClass::Mandatory + ))] + pub fn consume_weight_inherent(origin: OriginFor) -> DispatchResult { + ensure_none(origin)?; + + // Clear the storage item to ensure this can only be called once per inherent + InherentWeightConsume::::kill(); + + Ok(()) + } + + /// This function registers a high weight usage manually, while it actually only announces + /// to use a weight of `0` :) + /// + /// Uses the [`TestTransactionExtension`] logic to ensure the transaction is only accepted + /// when we can fit the `1s` weight into the block. + #[pallet::weight(0)] + pub fn use_more_weight_than_announced( + _: OriginFor, + _must_be_first_block_in_core: bool, + ) -> DispatchResult { + // Register weight manually. + frame_system::Pallet::::register_extra_weight_unchecked( + Weight::from_parts(WEIGHT_REF_TIME_PER_SECOND, 0), + DispatchClass::Normal, + ); + + Ok(()) + } + + /// Deposits the `UseFullCore` digest item to signal that this block should use the full + /// core. + #[pallet::weight(0)] + pub fn set_use_full_core(_: OriginFor) -> DispatchResult { + frame_system::Pallet::::deposit_log(CumulusDigestItem::UseFullCore.to_digest_item()); + Ok(()) + } + } + + #[pallet::inherent] + impl ProvideInherent for Pallet { + type Call = Call; + type Error = sp_inherents::MakeFatalError<()>; + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(_data: &InherentData) -> Option { + // Check if there's weight to consume from storage + let weight_to_consume = InherentWeightConsume::::get()?; + + // Check if the weight fits in the remaining block capacity + let remaining_weight = frame_system::Pallet::::remaining_block_weight(); + + if remaining_weight.can_consume(weight_to_consume) { + Some(Call::consume_weight_inherent {}) + } else { + // Weight doesn't fit, don't create the inherent + None + } + } + + fn is_inherent(call: &Self::Call) -> bool { + matches!(call, Call::consume_weight_inherent {}) + } } #[derive(frame_support::DefaultNoBound)] @@ -172,12 +366,122 @@ pub mod pallet { pub struct GenesisConfig { #[serde(skip)] pub _config: core::marker::PhantomData, + /// Controls if the `BigValueMove` logic is enabled. + pub enable_big_value_move: bool, + /// Activate HRMP sending with descending recipients from genesis. + pub enable_hrmp_sending: bool, } #[pallet::genesis_build] impl BuildGenesisConfig for GenesisConfig { fn build(&self) { sp_io::storage::set(TEST_RUNTIME_UPGRADE_KEY, &[1, 2, 3, 4]); + + if self.enable_big_value_move { + BigValueMove::::insert(BlockNumberFor::::from(0u32), vec![0u8; 4 * 1024]); + } + + if self.enable_hrmp_sending { + HrmpSendingActive::::set(true); + } + } + } + + #[pallet::event] + #[pallet::generate_deposit(pub(super) fn deposit_event)] + pub enum Event { + MovedBigValue { proof_size: u64 }, + } + + #[derive( + DebugNoBound, + Encode, + Decode, + CloneNoBound, + EqNoBound, + PartialEqNoBound, + TypeInfo, + DecodeWithMemTracking, + )] + #[scale_info(skip_type_params(T))] + pub struct TestTransactionExtension(core::marker::PhantomData); + + impl Default for TestTransactionExtension { + fn default() -> Self { + Self(core::marker::PhantomData) + } + } + + impl TransactionExtension for TestTransactionExtension + where + T: Config + Send + Sync, + T::RuntimeCall: IsSubType> + Dispatchable, + { + const IDENTIFIER: &'static str = "TestTransactionExtension"; + type Implicit = (); + type Val = (); + type Pre = (); + + fn validate( + &self, + origin: T::RuntimeOrigin, + call: &T::RuntimeCall, + _info: &DispatchInfo, + _len: usize, + _self_implicit: Self::Implicit, + _inherited_implication: &impl Implication, + _: TransactionSource, + ) -> ValidateResult { + if let Some(call) = call.is_sub_type() { + match call { + Call::use_more_weight_than_announced { must_be_first_block_in_core } => { + if { + let digest = frame_system::Pallet::::digest(); + + CumulusDigestItem::find_block_bundle_info(&digest) + // Default being `true` to support `validate_transaction` + .map_or(true, |bi| { + // Either we want that the transaction goes into the first block + // of a core + bi.index == 0 && *must_be_first_block_in_core || + // Or it goes to any block that isn't the first block + bi.index > 0 && !*must_be_first_block_in_core + }) + } { + Ok(( + ValidTransaction { + provides: vec![vec![1, 2, 3, 4, 5]], + ..Default::default() + }, + (), + origin, + )) + } else { + Err(TransactionValidityError::Invalid( + InvalidTransaction::ExhaustsResources, + )) + } + }, + _ => Ok((Default::default(), (), origin)), + } + } else { + Ok((Default::default(), (), origin)) + } + } + + fn prepare( + self, + val: Self::Val, + _origin: &T::RuntimeOrigin, + _call: &T::RuntimeCall, + _info: &DispatchInfo, + _len: usize, + ) -> Result { + Ok(val) + } + + fn weight(&self, _: &T::RuntimeCall) -> Weight { + Weight::zero() } } } diff --git a/cumulus/test/service/benches/block_production.rs b/cumulus/test/service/benches/block_production.rs index 3b0db578041f0..246b14deead67 100644 --- a/cumulus/test/service/benches/block_production.rs +++ b/cumulus/test/service/benches/block_production.rs @@ -81,7 +81,7 @@ fn benchmark_block_production(c: &mut Criterion) { let mut block_builder = BlockBuilderBuilder::new(&*client) .on_parent_block(chain.best_hash) .with_parent_block_number(chain.best_number) - .enable_proof_recording() + .with_proof_recorder(Some(Default::default())) .build() .unwrap(); diff --git a/cumulus/test/service/benches/block_production_glutton.rs b/cumulus/test/service/benches/block_production_glutton.rs index 6ab2c0e56bd18..31e0d3ce1d494 100644 --- a/cumulus/test/service/benches/block_production_glutton.rs +++ b/cumulus/test/service/benches/block_production_glutton.rs @@ -78,7 +78,7 @@ fn benchmark_block_production_compute(c: &mut Criterion) { let mut block_builder = BlockBuilderBuilder::new(&*client) .on_parent_block(best_hash) .with_parent_block_number(best_number) - .enable_proof_recording() + .with_proof_recorder(Some(Default::default())) .build() .unwrap(); block_builder.push(validation_data).unwrap(); diff --git a/cumulus/test/service/benches/validate_block.rs b/cumulus/test/service/benches/validate_block.rs index 60af340141581..47b8dcd038abc 100644 --- a/cumulus/test/service/benches/validate_block.rs +++ b/cumulus/test/service/benches/validate_block.rs @@ -22,7 +22,7 @@ use cumulus_primitives_core::{ relay_chain::AccountId, ParaId, PersistedValidationData, ValidationParams, }; use cumulus_test_client::{ - generate_extrinsic_with_pair, BuildParachainBlockData, InitBlockBuilder, TestClientBuilder, + generate_extrinsic_with_pair, BuildBlockBuilder, BuildParachainBlockData, TestClientBuilder, ValidationResult, }; use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; @@ -111,8 +111,11 @@ fn benchmark_block_validation(c: &mut Criterion) { ..Default::default() }; - let cumulus_test_client::BlockBuilderAndSupportData { mut block_builder, .. } = - client.init_block_builder(Some(validation_data), sproof_builder.clone()); + let cumulus_test_client::BlockBuilderAndSupportData { mut block_builder, .. } = client + .init_block_builder_builder() + .with_validation_data(validation_data) + .with_relay_sproof_builder(sproof_builder.clone()) + .build(); for extrinsic in extrinsics { block_builder.push(extrinsic).unwrap(); diff --git a/cumulus/test/service/benches/validate_block_glutton.rs b/cumulus/test/service/benches/validate_block_glutton.rs index d680401cf1b5c..74f9d9074387c 100644 --- a/cumulus/test/service/benches/validate_block_glutton.rs +++ b/cumulus/test/service/benches/validate_block_glutton.rs @@ -20,8 +20,8 @@ use core::time::Duration; use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; use cumulus_primitives_core::{relay_chain::AccountId, PersistedValidationData, ValidationParams}; use cumulus_test_client::{ - generate_extrinsic_with_pair, BlockBuilderAndSupportData, BuildParachainBlockData, Client, - InitBlockBuilder, ParachainBlockData, TestClientBuilder, ValidationResult, + generate_extrinsic_with_pair, BlockBuilderAndSupportData, BuildBlockBuilder, + BuildParachainBlockData, Client, ParachainBlockData, TestClientBuilder, ValidationResult, }; use cumulus_test_relay_sproof_builder::RelayStateSproofBuilder; use cumulus_test_runtime::{Block, GluttonCall, Header, SudoCall}; @@ -88,8 +88,10 @@ fn benchmark_block_validation(c: &mut Criterion) { parent_head: parent_header.encode().into(), ..Default::default() }; - let BlockBuilderAndSupportData { block_builder, .. } = - client.init_block_builder(Some(validation_data), Default::default()); + let BlockBuilderAndSupportData { block_builder, .. } = client + .init_block_builder_builder() + .with_validation_data(validation_data) + .build(); let parachain_block = block_builder.build_parachain_block(*parent_header.state_root()); let proof_size_in_kb = parachain_block.proof().encoded_size() as f64 / 1024f64; @@ -198,8 +200,10 @@ fn set_glutton_parameters( ); extrinsics.push(set_storage); - let BlockBuilderAndSupportData { mut block_builder, .. } = - client.init_block_builder(Some(validation_data), Default::default()); + let BlockBuilderAndSupportData { mut block_builder, .. } = client + .init_block_builder_builder() + .with_validation_data(validation_data) + .build(); for extrinsic in extrinsics { block_builder.push(extrinsic).unwrap(); diff --git a/cumulus/test/service/src/chain_spec.rs b/cumulus/test/service/src/chain_spec.rs index 3a8f5359d53a0..a49acbf6fca57 100644 --- a/cumulus/test/service/src/chain_spec.rs +++ b/cumulus/test/service/src/chain_spec.rs @@ -125,11 +125,11 @@ pub fn get_elastic_scaling_mvp_chain_spec(id: Option) -> GenericChainSpe ) } -pub fn get_elastic_scaling_multi_block_slot_chain_spec(id: Option) -> GenericChainSpec { +pub fn get_block_bundling_chain_spec(id: Option) -> GenericChainSpec { get_chain_spec_with_extra_endowed( id, Default::default(), - cumulus_test_runtime::elastic_scaling_multi_block_slot::WASM_BINARY + cumulus_test_runtime::block_bundling::WASM_BINARY .expect("WASM binary was not built, please build it!"), ) } diff --git a/cumulus/test/service/src/cli.rs b/cumulus/test/service/src/cli.rs index a7ab1a08101af..9232097b87154 100644 --- a/cumulus/test/service/src/cli.rs +++ b/cumulus/test/service/src/cli.rs @@ -306,11 +306,11 @@ impl SubstrateCli for TestCollatorCli { ParaId::from(2300), ))) as Box<_> }, - "elastic-scaling-multi-block-slot" => { - tracing::info!("Using elastic-scaling multi-block-slot chain spec."); - Box::new(cumulus_test_service::get_elastic_scaling_multi_block_slot_chain_spec( - Some(ParaId::from(2400)), - )) as Box<_> + "block-bundling" => { + tracing::info!("Using block-bundling chain spec."); + Box::new(cumulus_test_service::get_block_bundling_chain_spec(Some(ParaId::from( + 2400, + )))) as Box<_> }, "sync-backing" => { tracing::info!("Using sync backing chain spec."); diff --git a/cumulus/test/service/src/lib.rs b/cumulus/test/service/src/lib.rs index 72f832a86d43d..cbff93aa9d5b2 100644 --- a/cumulus/test/service/src/lib.rs +++ b/cumulus/test/service/src/lib.rs @@ -200,7 +200,7 @@ pub fn new_partial( )?; let client = Arc::new(client); - let (block_import, slot_based_handle) = + let (block_import, block_import_handle) = SlotBasedBlockImport::new(client.clone(), client.clone()); let block_import = ParachainBlockImport::new(block_import, backend.clone()); @@ -245,7 +245,7 @@ pub fn new_partial( task_manager, transaction_pool, select_chain: (), - other: (block_import, slot_based_handle), + other: (block_import, block_import_handle), }; Ok(params) @@ -330,8 +330,7 @@ where let client = params.client.clone(); let backend = params.backend.clone(); - let block_import = params.other.0; - let slot_based_handle = params.other.1; + let (block_import, block_import_handle) = params.other; let relay_chain_interface = build_relay_chain_interface( relay_chain_config, parachain_config.prometheus_registry(), @@ -470,10 +469,9 @@ where para_id, proposer, collator_service, - authoring_duration: Duration::from_millis(2000), reinitialize: false, slot_offset: Duration::from_secs(1), - block_import_handle: slot_based_handle, + block_import_handle, spawner: task_manager.spawn_essential_handle(), export_pov: None, max_pov_percentage: None, @@ -925,7 +923,7 @@ pub fn construct_extrinsic( .map(|c| c / 2) .unwrap_or(2) as u64; let tip = 0; - let tx_ext: runtime::TxExtension = ( + let tx_ext: runtime::TxExtension = cumulus_pallet_weight_reclaim::StorageWeightReclaim::from(( frame_system::AuthorizeCall::::new(), frame_system::CheckNonZeroSender::::new(), frame_system::CheckSpecVersion::::new(), @@ -937,12 +935,13 @@ pub fn construct_extrinsic( frame_system::CheckNonce::::from(nonce), frame_system::CheckWeight::::new(), pallet_transaction_payment::ChargeTransactionPayment::::from(tip), - ) - .into(); + runtime::TestTransactionExtension::::default(), + )) + .into(); let raw_payload = runtime::SignedPayload::from_raw( function.clone(), tx_ext.clone(), - ((), (), runtime::VERSION.spec_version, genesis_block, current_block_hash, (), (), ()), + ((), (), runtime::VERSION.spec_version, genesis_block, current_block_hash, (), (), (), ()), ); let signature = raw_payload.using_encoded(|e| caller.sign(e)); runtime::UncheckedExtrinsic::new_signed( diff --git a/cumulus/zombienet/examples/README.md b/cumulus/zombienet/examples/README.md new file mode 100644 index 0000000000000..84e7a05dfb19d --- /dev/null +++ b/cumulus/zombienet/examples/README.md @@ -0,0 +1,20 @@ +# Zombienet Examples + +## Prerequisites + +Install the zombienet CLI: + +```bash +cargo install zombie-cli +``` + +## Usage + +```bash +./run.sh +``` + +The script will: +1. Build `polkadot`, `polkadot-prepare-worker`, `polkadot-execute-worker`, and `polkadot-parachain` in release mode +2. Add the release directory to `PATH` +3. Spawn the network using `zombie-cli` diff --git a/cumulus/zombienet/examples/run.sh b/cumulus/zombienet/examples/run.sh new file mode 100755 index 0000000000000..8e25815dc6b2d --- /dev/null +++ b/cumulus/zombienet/examples/run.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +set -e + +if [ -z "$1" ]; then + echo "Usage: $0 " + echo "Available networks:" + ls -1 "$(dirname "$0")"/*.toml + exit 1 +fi + +NETWORK_FILE="$1" +SCRIPT_DIR="$(dirname "$0")" + +# Resolve to absolute path if relative +if [[ ! "$NETWORK_FILE" = /* ]]; then + if [ -f "$SCRIPT_DIR/$NETWORK_FILE" ]; then + NETWORK_FILE="$SCRIPT_DIR/$NETWORK_FILE" + fi +fi + +if [ ! -f "$NETWORK_FILE" ]; then + echo "Error: Network file '$NETWORK_FILE' not found" + exit 1 +fi + +cargo build --release -p polkadot --bin polkadot-prepare-worker --bin polkadot-execute-worker --bin polkadot -p polkadot-parachain-bin --bin polkadot-parachain + +RELEASE_DIR=$(dirname "$(cargo locate-project --workspace --message-format plain)")/target/release + +export PATH=$RELEASE_DIR:$PATH + +zombie-cli spawn --provider native "$NETWORK_FILE" diff --git a/cumulus/zombienet/examples/small_network.toml b/cumulus/zombienet/examples/small_network.toml index 64765566471a0..3dfcbd935580b 100644 --- a/cumulus/zombienet/examples/small_network.toml +++ b/cumulus/zombienet/examples/small_network.toml @@ -22,4 +22,4 @@ name = "charlie" validator = true image = "parity/polkadot-parachain:latest" command = "polkadot-parachain" -args = ["--force-authoring"] +args = ["--force-authoring", "--authoring=slot-based"] diff --git a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs index 0d9555dd4aa76..d2d60a8a1839b 100644 --- a/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs +++ b/cumulus/zombienet/zombienet-sdk-helpers/src/lib.rs @@ -3,11 +3,10 @@ use anyhow::anyhow; use codec::{Decode, Encode}; -use cumulus_primitives_core::{CumulusDigestItem, RelayBlockIdentifier}; +use cumulus_primitives_core::{BlockBundleInfo, CoreInfo, CumulusDigestItem, RelayBlockIdentifier}; use futures::stream::StreamExt; -use polkadot_primitives::{BlakeTwo256, CandidateReceiptV2, Id as ParaId}; -use sp_runtime::traits::Hash; -use std::{cmp::max, collections::HashMap, ops::Range}; +use polkadot_primitives::{BlakeTwo256, CandidateReceiptV2, HashT, Id as ParaId}; +use std::{cmp::max, collections::HashMap, ops::Range, sync::Arc}; use tokio::{ join, time::{sleep, Duration}, @@ -15,16 +14,25 @@ use tokio::{ use zombienet_sdk::subxt::{ self, blocks::Block, - config::{polkadot::PolkadotExtrinsicParamsBuilder, substrate::DigestItem, Config}, + config::{polkadot::PolkadotExtrinsicParamsBuilder, substrate::DigestItem}, dynamic::Value, events::Events, ext::scale_value::value, metadata::Metadata, - tx::{signer::Signer, DynamicPayload, TxStatus}, + tx::{signer::Signer, DynamicPayload, SubmittableTransaction, TxStatus}, utils::H256, - OnlineClient, PolkadotConfig, + Config, OnlineClient, PolkadotConfig, }; +/// Specifies which block should occupy a full core. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum BlockToCheck { + /// The exact block hash provided should occupy a full core. + Exact(H256), + /// Wait for the next first bundle block. + NextFirstBundleBlock(H256), +} + // Maximum number of blocks to wait for a session change. // If it does not arrive for whatever reason, we should not wait forever. const WAIT_MAX_BLOCKS_FOR_SESSION: u32 = 50; @@ -77,25 +85,23 @@ async fn is_session_change( // Helper function for asserting the throughput of parachains, after the first session change. // -// The throughput is measured as total number of backed candidates in a window of relay chain -// blocks. Relay chain blocks with session changes are generally ignored. +// The throughput is measured as total number of backed candidates in a window of `stop_after` relay +// chain blocks. The counting window starts from the relay chain block after the first one that +// contains a backed candidate for a tracked para. Relay chain blocks with session changes are +// generally ignored, but it is ensured that no blocks are build on top of these relay blocks. pub async fn assert_para_throughput( relay_client: &OnlineClient, stop_after: u32, expected_candidate_ranges: impl Into>>, + expected_number_of_blocks: impl Into, Range)>>, ) -> Result<(), anyhow::Error> { let ranges = expected_candidate_ranges.into(); - let valid_para_ids: Vec = ranges.keys().cloned().collect(); + let expected_number_of_blocks = expected_number_of_blocks.into(); - assert_para_throughput_with(relay_client, stop_after, ranges, |receipt| { - let para_id = receipt.descriptor.para_id(); - if !valid_para_ids.contains(¶_id) { - return Err(anyhow!("Invalid ParaId detected: {}", para_id)); - } + let candidate_count = + collect_para_throughput(relay_client, stop_after, ranges, |_| Ok(true)).await?; - Ok(true) - }) - .await + assert_expected_number_of_blocks(candidate_count, expected_number_of_blocks).await } /// Like [`assert_para_throughput`], but accepts a closure to validate each backed candidate @@ -113,11 +119,25 @@ pub async fn assert_para_throughput_with( expected_candidate_ranges: impl Into>>, validate: F, ) -> Result<(), anyhow::Error> +where + F: Fn(&CandidateReceiptV2) -> Result, +{ + collect_para_throughput(relay_client, stop_after, expected_candidate_ranges, validate) + .await + .map(|_| ()) +} + +async fn collect_para_throughput( + relay_client: &OnlineClient, + stop_after: u32, + expected_candidate_ranges: impl Into>>, + validate: F, +) -> Result>>, anyhow::Error> where F: Fn(&CandidateReceiptV2) -> Result, { let mut blocks_sub = relay_client.blocks().subscribe_finalized().await?; - let mut candidate_count: HashMap = HashMap::new(); + let mut candidate_count: HashMap>> = HashMap::new(); let mut current_block_count = 0; let expected_candidate_ranges = expected_candidate_ranges.into(); @@ -130,9 +150,46 @@ where // Wait for the first session, block production on the parachain will start after that. wait_for_first_session_change(&mut blocks_sub).await?; log::info!( - "First session change detected. Counting {stop_after} finalized relay chain blocks." + "First session change detected. Waiting for backed candidates from all tracked paras before counting." ); + // Skip relay chain blocks until every tracked para has had at least one backed candidate. + // This avoids counting the initial warm-up period where the backing pipeline (PVF + // compilation, first collation) hasn't reached steady state yet. + let mut paras_seen = std::collections::HashSet::new(); + loop { + let block = blocks_sub + .next() + .await + .ok_or_else(|| anyhow!("Block stream ended while waiting for first candidate"))??; + + if is_session_change(&block).await? { + continue; + } + + let events = block.events().await?; + let receipts = find_event_and_decode_fields::>( + &events, + "ParaInclusion", + "CandidateBacked", + )?; + + for receipt in &receipts { + let para_id = receipt.descriptor.para_id(); + if valid_para_ids.contains(¶_id) { + paras_seen.insert(para_id); + } + } + + if paras_seen.len() == valid_para_ids.len() { + log::info!( + "All tracked paras have produced candidates by relay block {}. Counting {stop_after} blocks from the next one.", + block.number() + ); + break; + } + } + while let Some(block) = blocks_sub.next().await { let block = block?; log::debug!("Finalized relay chain block {}", block.number()); @@ -163,7 +220,7 @@ where continue; } - *(candidate_count.entry(para_id).or_default()) += 1; + candidate_count.entry(para_id).or_default().push(receipt); } if current_block_count == stop_after { @@ -173,17 +230,72 @@ where log::info!( "Reached {stop_after} finalized relay chain blocks that contain backed candidates. The per-parachain distribution is: {:#?}", - candidate_count.iter().map(|(para_id, count)| format!("{para_id} has {count} backed candidates")).collect::>() + candidate_count.iter().map(|(para_id, receipts)| format!("{para_id} has {} backed candidates", receipts.len())).collect::>() ); for (para_id, expected_candidate_range) in expected_candidate_ranges { let actual = candidate_count .get(¶_id) - .ok_or_else(|| anyhow!("ParaId {} did not have any backed candidates", para_id))?; + .ok_or_else(|| anyhow!("ParaId {para_id} did not have any backed candidates"))? + .len() as u32; + + if !expected_candidate_range.contains(&actual) { + return Err(anyhow!( + "ParaId {para_id}: candidate count {actual} not within expected range {expected_candidate_range:?}" + )); + } + } + + Ok(candidate_count) +} + +async fn assert_expected_number_of_blocks( + candidate_count: HashMap>>, + expected_number_of_blocks: HashMap, Range)>, +) -> Result<(), anyhow::Error> { + for (para_id, (para_client, expected_number_of_blocks)) in expected_number_of_blocks { + let receipts = candidate_count + .get(¶_id) + .ok_or_else(|| anyhow!("ParaId did not have any backed candidates"))?; + + let mut num_blocks = 0; + + for receipt in receipts { + // We "abuse" the fact that the parachain is using `BlakeTwo256` as hash and thus, the + // `para_head` hash and the hash of the `header` should be equal. + let mut next_para_block_hash = receipt.descriptor().para_head(); + + let mut relay_identifier = None; + let mut core_info = None; + + loop { + let block: Block> = + para_client.blocks().at(next_para_block_hash).await?; + + // Genesis block is not part of a candidate :) + if block.number() == 0 { + break; + } + + let ri = find_relay_block_identifier(&block)?; + let ci = find_core_info(&block)?; + + // If the core changes or the relay identifier, we found all blocks for the + // candidate. + if *relay_identifier.get_or_insert(ri.clone()) != ri || + *core_info.get_or_insert(ci.clone()) != ci + { + break; + } + + num_blocks += 1; + next_para_block_hash = block.header().parent_hash; + } + } - if !expected_candidate_range.contains(actual) { + if !expected_number_of_blocks.contains(&num_blocks) { return Err(anyhow!( - "Candidate count {actual} not within range {expected_candidate_range:?}" + "Block number count {num_blocks} not within range {expected_number_of_blocks:?}", )); } } @@ -191,6 +303,30 @@ where Ok(()) } +/// Returns [`CoreInfo`] for the given parachain block. +pub fn find_core_info( + block: &Block>, +) -> Result { + let substrate_digest = + sp_runtime::generic::Digest::decode(&mut &block.header().digest.encode()[..]) + .expect("`subxt::Digest` and `substrate::Digest` should encode and decode; qed"); + + CumulusDigestItem::find_core_info(&substrate_digest) + .ok_or_else(|| anyhow!("Failed to find `CoreInfo` digest")) +} + +/// Returns [`RelayBlockIdentifier`] for the given parachain block. +fn find_relay_block_identifier( + block: &Block>, +) -> Result { + let substrate_digest = + sp_runtime::generic::Digest::decode(&mut &block.header().digest.encode()[..]) + .expect("`subxt::Digest` and `substrate::Digest` should encode and decode; qed"); + + CumulusDigestItem::find_relay_block_identifier(&substrate_digest) + .ok_or_else(|| anyhow!("Failed to find `RelayBlockIdentifier` digest")) +} + /// Wait for the first block with a session change. /// /// The session change is detected by inspecting the events in the block. @@ -281,18 +417,6 @@ pub async fn assert_blocks_are_being_finalized( Ok(()) } -/// Returns [`RelayBlockIdentifier`] for the given parachain block. -fn find_relay_block_identifier( - block: &Block>, -) -> Result { - let substrate_digest = - sp_runtime::generic::Digest::decode(&mut &block.header().digest.encode()[..]) - .expect("`subxt::Digest` and `substrate::Digest` should encode and decode; qed"); - - CumulusDigestItem::find_relay_block_identifier(&substrate_digest) - .ok_or_else(|| anyhow!("Failed to find `RelayBlockIdentifier` digest")) -} - /// Checks if the given `RelayBlockIdentifier` matches a relay chain header. fn identifier_matches_header( identifier: &RelayBlockIdentifier, @@ -364,10 +488,15 @@ pub async fn assert_relay_parent_offset( RelayBlockIdentifier::ByHash(block_hash) => relay_client.blocks().at(*block_hash).await?.number(), RelayBlockIdentifier::ByStorageRoot { block_number, .. } => *block_number, }; + let para_block_number = para_block.number(); seen_relay_parents.insert(relay_block_identifier.clone(), para_block); log::debug!("Parachain block #{para_block_number} was built on relay parent #{relay_parent_number}, highest seen was {highest_relay_block_seen}"); - assert!(highest_relay_block_seen < offset || relay_parent_number <= highest_relay_block_seen.saturating_sub(offset), "Relay parent is not at the correct offset! relay_parent: #{relay_parent_number} highest_seen_relay_block: #{highest_relay_block_seen}"); + assert!( + highest_relay_block_seen < offset || + relay_parent_number <= highest_relay_block_seen.saturating_sub(offset), + "Relay parent is not at the correct offset! relay_parent: #{relay_parent_number} highest_seen_relay_block: #{highest_relay_block_seen}", + ); // As per explanation above, we need to check that no parachain blocks are built // on the forbidden parents. for forbidden in &forbidden_parents { @@ -389,6 +518,7 @@ pub async fn assert_relay_parent_offset( } } } + Ok(()) } @@ -404,15 +534,29 @@ pub async fn submit_extrinsic_and_wait_for_finalization_success, + call: &DynamicPayload, +) -> Result { + let tx = client.tx().create_unsigned(call)?; + + submit_tx_and_wait_for_finalization(tx).await +} + +/// Submit the given transaction and wait for its finalization. +async fn submit_tx_and_wait_for_finalization( + tx: SubmittableTransaction>, +) -> Result { + log::info!("Submitting transaction: {:?}", tx.hash()); + + let mut tx = tx.submit_and_watch().await?; - // Below we use the low level API to replicate the `wait_for_in_block` behavior - // which was removed in subxt 0.33.0. See https://github.com/paritytech/subxt/pull/1237. while let Some(status) = tx.next().await.transpose()? { match status { TxStatus::InBestBlock(tx_in_block) => { @@ -503,6 +647,146 @@ pub async fn assert_para_is_registered( Err(anyhow!("No more blocks to check")) } +/// Returns [`BlockBundleInfo`] for the given parachain block. +fn find_block_bundle_info( + block: &Block>, +) -> Result { + let substrate_digest = + sp_runtime::generic::Digest::decode(&mut &block.header().digest.encode()[..]) + .expect("`subxt::Digest` and `substrate::Digest` should encode and decode; qed"); + + CumulusDigestItem::find_block_bundle_info(&substrate_digest) + .ok_or_else(|| anyhow!("Failed to find `BlockBundleInfo` digest")) +} + +/// Validates that the given block is a "special" block in the core. +/// +/// If `is_only_block_in_core` is true, it checks if the given block is the first block in the core +/// and the only one. If this is `false`, it only checks if the block is the last block in the core. +async fn ensure_is_block_in_core_impl( + para_client: &OnlineClient, + block_hash: H256, + is_only_block_in_core: bool, +) -> Result<(), anyhow::Error> { + let blocks = para_client.blocks(); + let block = blocks.at(block_hash).await?; + let block_core_info = find_core_info(&block)?; + + if is_only_block_in_core { + let parent = blocks.at(block.header().parent_hash).await?; + + // Genesis is for sure on a different core :) + if parent.number() != 0 { + let parent_core_info = find_core_info(&parent)?; + + if parent_core_info == block_core_info { + return Err(anyhow::anyhow!( + "Not first block ({}) in core, at least the parent block is on the same core.", + block.header().number + )); + } + } + } + + let next_block = loop { + // Start with the latest best block. + let mut current_block = Arc::new(blocks.subscribe_best().await?.next().await.unwrap()?); + + let mut next_block = None; + + while current_block.hash() != block_hash { + next_block = Some(current_block.clone()); + current_block = Arc::new(blocks.at(current_block.header().parent_hash).await?); + + if current_block.number() == 0 { + return Err(anyhow::anyhow!( + "Did not found block while going backwards from the best block" + )); + } + } + + // It possible that the first block we got is the same as the transaction got finalized. + // So, we just retry again until we found some more blocks. + if let Some(next_block) = next_block { + break next_block; + } + }; + + let next_block_core_info = find_core_info(&next_block)?; + + if next_block_core_info == block_core_info { + return Err(anyhow::anyhow!( + "Not {} block ({}) in core, at least the following block is on the same core.", + if is_only_block_in_core { "first" } else { "last" }, + block.header().number + )); + } + + Ok(()) +} + +/// Checks if the specified block occupies a full core. +pub async fn ensure_is_only_block_in_core( + para_client: &OnlineClient, + block_to_check: BlockToCheck, +) -> Result<(), anyhow::Error> { + let blocks = para_client.blocks(); + + match block_to_check { + BlockToCheck::Exact(block_hash) => { + ensure_is_block_in_core_impl(para_client, block_hash, true).await + }, + BlockToCheck::NextFirstBundleBlock(start_block_hash) => { + let start_block = blocks.at(start_block_hash).await?; + + let mut best_block_stream = blocks.subscribe_best().await?; + + let mut next_first_bundle_block = None; + while let Some(mut block) = best_block_stream.next().await.transpose()? { + while block.number() > start_block.number() { + if find_block_bundle_info(&block)?.index == 0 { + next_first_bundle_block = Some(block.hash()); + } + + block = blocks.at(block.header().parent_hash).await?; + } + + if next_first_bundle_block.is_some() { + break; + } + } + + if let Some(block) = next_first_bundle_block { + ensure_is_block_in_core_impl(para_client, block, true).await + } else { + Err(anyhow!("Could not find the next bundle after {}", start_block.number())) + } + }, + } +} + +/// Checks if the specified block is the last block in a core. +/// +/// Also ensures that the last block is NOT the first block. +pub async fn ensure_is_last_block_in_core( + para_client: &OnlineClient, + block_to_check: H256, +) -> Result<(), anyhow::Error> { + ensure_is_block_in_core_impl(para_client, block_to_check, false).await?; + + let blocks = para_client.blocks(); + let block = blocks.at(block_to_check).await?; + let bundle_info = find_block_bundle_info(&block)?; + + // Above we ensure it is the last block in the core and now we want to ensure it isn't the first + // block. + if bundle_info.index == 0 { + Err(anyhow!("`{block_to_check:?}` is the first block of a core and not the last")) + } else { + Ok(()) + } +} + /// Assigns the given `cores` to the given `para_id`. /// /// Zombienet by default adds extra core for each registered parachain additionally to the one @@ -526,7 +810,7 @@ pub async fn assert_para_is_registered( /// To assign these extra `2` cores, the call would look like this: /// /// ```ignore -/// assign_cores(&relay_client, PARA_ID, vec![0, 1]) +/// assign_cores(&relay_node, PARA_ID, vec![0, 1]) /// ``` /// /// The cores `2` and `3` are assigned to the parachains by Zombienet. @@ -570,7 +854,9 @@ fn create_assign_core_call(core_and_para: &[(u32, u32)]) -> DynamicPayload { ) } -/// Creates a runtime upgrade call using `sudo` and `set_code`. +/// Creates a runtime upgrade call using `Sudo::sudo(System::set_code_without_checks)`. +/// +/// The `wasm_binary` should be the WASM runtime binary to upgrade to. pub fn create_runtime_upgrade_call(wasm: &[u8]) -> DynamicPayload { zombienet_sdk::subxt::tx::dynamic( "Sudo", diff --git a/cumulus/zombienet/zombienet-sdk/Cargo.toml b/cumulus/zombienet/zombienet-sdk/Cargo.toml index 61aa2a10f44d7..69808b7aadddc 100644 --- a/cumulus/zombienet/zombienet-sdk/Cargo.toml +++ b/cumulus/zombienet/zombienet-sdk/Cargo.toml @@ -20,11 +20,19 @@ zombienet-sdk = { workspace = true } zombienet-orchestrator = { workspace = true } zombienet-configuration = { workspace = true } cumulus-zombienet-sdk-helpers = { workspace = true } -cumulus-test-runtime = { workspace = true } +sp-rpc = { workspace = true, default-features = true } sp-statement-store = { workspace = true, default-features = true, features = ["serde"] } sc-statement-store = { workspace = true, default-features = true, features = ["test-helpers"] } sp-keyring = { workspace = true, default-features = true } sp-core = { workspace = true, default-features = true } +sp-version = { workspace = true, default-features = true } +sp-maybe-compressed-blob = { workspace = true, default-features = true } +parity-wasm = { workspace = true } +sc-executor = { workspace = true, default-features = true } +sc-executor-common = { workspace = true, default-features = true } +frame-support = { workspace = true } +cumulus-primitives-core = { workspace = true } +cumulus-test-runtime = { workspace = true } sp-runtime = { workspace = true, default-features = true } sp-consensus-aura = { workspace = true, default-features = true } sp-consensus-slots = { workspace = true, default-features = true } diff --git a/cumulus/zombienet/zombienet-sdk/tests/tests.rs b/cumulus/zombienet/zombienet-sdk/tests/tests.rs index 3d9f091a226b8..efc472126ce7a 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/tests.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/tests.rs @@ -1,6 +1,20 @@ +// This file is part of Cumulus. + // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + #[cfg(feature = "zombie-ci")] mod zombie_ci; diff --git a/cumulus/zombienet/zombienet-sdk/tests/utils.rs b/cumulus/zombienet/zombienet-sdk/tests/utils.rs index 70ecb56b099ae..ca9bc679f9f3f 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/utils.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/utils.rs @@ -4,6 +4,7 @@ use zombienet_sdk::{LocalFileSystem, Network, NetworkConfig}; pub const BEST_BLOCK_METRIC: &str = "block_height{status=\"best\"}"; +pub const FINALIZED_BLOCK_METRIC: &str = "block_height{status=\"finalized\"}"; pub async fn initialize_network( config: NetworkConfig, diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs new file mode 100644 index 0000000000000..c8be4c5642a8c --- /dev/null +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/basic.rs @@ -0,0 +1,238 @@ +// This file is part of Cumulus. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::utils::initialize_network; +use anyhow::anyhow; +use cumulus_test_runtime::test_pallet::{HRMP_RECIPIENT_HIGH, HRMP_RECIPIENT_LOW}; +use cumulus_zombienet_sdk_helpers::{ + assert_finality_lag, assert_para_throughput, assign_cores, + submit_extrinsic_and_wait_for_finalization_success, +}; +use polkadot_primitives::Id as ParaId; +use serde_json::json; +use tokio::{join, spawn, task::JoinHandle}; +use zombienet_sdk::{ + subxt::{ext::scale_value::value, OnlineClient, PolkadotConfig}, + subxt_signer::sr25519::dev, + NetworkConfig, NetworkConfigBuilder, NetworkNode, +}; + +const PARA_ID: u32 = 2400; + +/// A test that ensures that `PoV` bundling works. +/// +/// Initially, one core is assigned. We expect the parachain to produce 12 block per relay core. +/// As we increase the number of cores via `assign_core`, we expect the blocks to spread over the +/// relay cores. +#[tokio::test(flavor = "multi_thread")] +async fn block_bundling_basic() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + log::info!("Spawning network"); + let config = build_network_config().await?; + let network = initialize_network(config).await?; + let relay_node = network.get_node("validator-0")?; + let para_node = network.get_node("collator-1")?; + let para_full_node = network.get_node("para-full-node")?; + + let handle = wait_for_block_and_restart_node(para_full_node.clone()); + + let para_client = para_node.wait_client().await?; + let relay_client: OnlineClient = relay_node.wait_client().await?; + + for recipient in [HRMP_RECIPIENT_LOW, HRMP_RECIPIENT_HIGH] { + let call = zombienet_sdk::subxt::tx::dynamic( + "Sudo", + "sudo", + vec![value! { + Hrmp(force_open_hrmp_channel { + sender: PARA_ID, + recipient: recipient, + max_capacity: 1000u32, + max_message_size: 1024u32 + }) + }], + ); + submit_extrinsic_and_wait_for_finalization_success(&relay_client, &call, &dev::alice()) + .await?; + } + log::info!("HRMP channels opened to {HRMP_RECIPIENT_LOW} and {HRMP_RECIPIENT_HIGH}"); + + assert_para_throughput( + &relay_client, + 6, + [(ParaId::from(PARA_ID), 4..7)], + [(ParaId::from(PARA_ID), (para_client.clone(), 44..73))], + ) + .await?; + // 6 relay chain blocks + assert_finality_lag(¶_client, 72).await?; + + assign_cores(&relay_client, PARA_ID, vec![0, 1]).await?; + + assert_para_throughput( + &relay_client, + 6, + [(ParaId::from(PARA_ID), 12..19)], + [(ParaId::from(PARA_ID), (para_client.clone(), 44..73))], + ) + .await?; + assert_finality_lag(¶_client, 72).await?; + + assign_cores(&relay_client, PARA_ID, vec![2, 3, 4]).await?; + + assert_para_throughput( + &relay_client, + 6, + [(ParaId::from(PARA_ID), 24..37)], + [(ParaId::from(PARA_ID), (para_client.clone(), 44..73))], + ) + .await?; + + assert_finality_lag(¶_client, 72).await?; + + // Ensure we restarted the node successfully + handle.await??; + + let para_full_client: OnlineClient = para_full_node.wait_client().await?; + let mut full_best_blocks = para_full_client.blocks().subscribe_best().await?; + let mut collator_best_blocks = para_client.blocks().subscribe_best().await?; + + let (Some(full_best), Some(best)) = join!(full_best_blocks.next(), collator_best_blocks.next()) + else { + return Err(anyhow!("Failed to get a best block from the full node and the collator")); + }; + + let diff = full_best?.number().abs_diff(best?.number()); + if diff > 12 { + return Err(anyhow!( + "Best block difference between full node and collator of {diff} is too big!" + )); + } + + log::info!("Test finished successfully"); + + Ok(()) +} + +/// Wait for block `13` and then restart the node. +/// +/// We take block `13`, because it should be near the beginning of a block bundle and we want to +/// test stopping the node while importing blocks in the middle of a bundle. +fn wait_for_block_and_restart_node(node: NetworkNode) -> JoinHandle> { + spawn(async move { + let para_client: OnlineClient = node.wait_client().await?; + let mut best_blocks = para_client.blocks().subscribe_best().await?; + + loop { + let Some(block) = best_blocks.next().await.transpose()? else { + return Err(anyhow!("Node stopped before reaching the block to restart")); + }; + + if block.number() >= 13 { + log::info!("Full node has imported block `13`, going to restart it"); + return node.restart(None).await; + } + } + }) +} + +async fn build_network_config() -> Result { + // images are not relevant for `native`, but we leave it here in case we use `k8s` some day + let images = zombienet_sdk::environment::get_images_from_env(); + log::info!("Using images: {images:?}"); + + NetworkConfigBuilder::new() + .with_relaychain(|r| { + let r = r + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image(images.polkadot.as_str()) + .with_default_args(vec![("-lparachain=trace").into()]) + .with_default_resources(|resources| { + // These settings are applicable only for `k8s` provider. + // Leaving them in case we switch to `k8s` some day. + resources.with_request_cpu(4).with_request_memory("4G") + }) + .with_genesis_overrides(json!({ + "configuration": { + "config": { + "scheduler_params": { + "num_cores": 7, + "max_validators_per_core": 1 + }, + "hrmp_channel_max_capacity": 1000, + "hrmp_channel_max_message_size": 1024, + "hrmp_max_message_num_per_candidate": 100, + "hrmp_max_parachain_outbound_channels": 10 + } + } + })) + // Have to set a `with_validator` outside of the loop below, so that `r` has the + // right type. + .with_validator(|node| node.with_name("validator-0")); + (1..9).fold(r, |acc, i| { + acc.with_validator(|node| node.with_name(&format!("validator-{i}"))) + }) + }) + .with_parachain(|p| { + p.with_id(PARA_ID) + .with_default_command("test-parachain") + .with_default_image(images.cumulus.as_str()) + .with_chain("block-bundling") + .with_default_args(vec![ + ("--authoring").into(), + ("slot-based").into(), + ("-lparachain=trace,aura=trace").into(), + ]) + .with_genesis_overrides(json!({ + "testPallet": { + "enableBigValueMove": true, + "enableHrmpSending": true + } + })) + .with_collator(|n| n.with_name("collator-0")) + .with_collator(|n| n.with_name("collator-1")) + .with_collator(|n| n.with_name("collator-2")) + .with_collator(|n| n.with_name("para-full-node").validator(false)) + }) + .with_parachain(|p| { + p.with_id(HRMP_RECIPIENT_LOW) + .with_default_command("test-parachain") + .with_default_image(images.cumulus.as_str()) + .with_chain("sync-backing") + .with_collator(|n| n.with_name("hrmp-recipient-low")) + }) + .with_parachain(|p| { + p.with_id(HRMP_RECIPIENT_HIGH) + .with_default_command("test-parachain") + .with_default_image(images.cumulus.as_str()) + .with_chain("async-backing") + .with_collator(|n| n.with_name("hrmp-recipient-high")) + }) + .with_global_settings(|global_settings| match std::env::var("ZOMBIENET_SDK_BASE_DIR") { + Ok(val) => global_settings.with_base_dir(val), + _ => global_settings, + }) + .build() + .map_err(|e| { + let errs = e.into_iter().map(|e| e.to_string()).collect::>().join(" "); + anyhow!("config errs: {errs}") + }) +} diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/full_core_usage_scenarios.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/full_core_usage_scenarios.rs new file mode 100644 index 0000000000000..6eccfbda631da --- /dev/null +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/full_core_usage_scenarios.rs @@ -0,0 +1,254 @@ +// This file is part of Cumulus. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use anyhow::anyhow; +use cumulus_primitives_core::relay_chain::MAX_POV_SIZE; +use cumulus_zombienet_sdk_helpers::{ + assign_cores, ensure_is_last_block_in_core, ensure_is_only_block_in_core, + submit_extrinsic_and_wait_for_finalization_success, BlockToCheck, +}; +use frame_support::weights::constants::WEIGHT_REF_TIME_PER_SECOND; +use serde_json::json; +use zombienet_sdk::{ + subxt::{ext::scale_value::value, tx::DynamicPayload, OnlineClient, PolkadotConfig}, + subxt_signer::sr25519::dev, + NetworkConfig, NetworkConfigBuilder, +}; + +const PARA_ID: u32 = 2400; + +/// A test that sends transactions using `pallet-utility` `with_weight` through `pallet-sudo`. +/// +/// This test starts with 3 cores assigned and sends two transactions: +/// 1. One with 1s ref_time +/// 2. One with a PoV size bigger than what one block alone is allowed to process. +/// Each transaction is sent after the other and waits for finalization. +#[tokio::test(flavor = "multi_thread")] +async fn block_bundling_full_core_usage_scenarios() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + let config = build_network_config().await?; + + let spawn_fn = zombienet_sdk::environment::get_spawn_fn(); + let network = spawn_fn(config).await?; + + let relay_node = network.get_node("validator-0")?; + let para_node = network.get_node("collator-1")?; + + let para_client: OnlineClient = para_node.wait_client().await?; + let relay_client: OnlineClient = relay_node.wait_client().await?; + let alice = dev::alice(); + + // Assign cores 0 and 1 to start with 3 cores total (core 2 is assigned by Zombienet) + assign_cores(&relay_client, PARA_ID, vec![0, 1]).await?; + + // Create and send first transaction: 1s ref_time using utility.with_weight + // + // While we only should have 500ms available. + let ref_time_1s = WEIGHT_REF_TIME_PER_SECOND; + let first_call = create_utility_with_weight_call(ref_time_1s, 0); + let sudo_first_call = create_sudo_call(first_call); + + log::info!("Testing scenario 1: Sending a transaction with 1s ref time weight usage"); + let block_hash = + submit_extrinsic_and_wait_for_finalization_success(¶_client, &sudo_first_call, &alice) + .await?; + + ensure_is_only_block_in_core(¶_client, BlockToCheck::Exact(block_hash)).await?; + + // Create a transaction that uses more than the allowed POV size per block. + let pov_size = MAX_POV_SIZE / 4 + 512 * 1024; + let second_call = create_utility_with_weight_call(0, pov_size as u64); + let sudo_second_call = create_sudo_call(second_call); + + log::info!("Testing scenario 2: Sending a transaction with ~2.5MiB storage weight usage"); + let block_hash = + submit_extrinsic_and_wait_for_finalization_success(¶_client, &sudo_second_call, &alice) + .await?; + + ensure_is_only_block_in_core(¶_client, BlockToCheck::Exact(block_hash)).await?; + + let third_call = create_schedule_weight_registration_call(); + let sudo_third_call = create_sudo_call(third_call); + + log::info!("Testing scenario 5: Enabling `on_initialize` to use 1s ref time"); + let block_hash = + submit_extrinsic_and_wait_for_finalization_success(¶_client, &sudo_third_call, &alice) + .await?; + + ensure_is_only_block_in_core(¶_client, BlockToCheck::NextFirstBundleBlock(block_hash)) + .await?; + + let inherent_weight_call = create_set_inherent_weight_consume_call(ref_time_1s, 0); + let sudo_inherent_weight_call = create_sudo_call(inherent_weight_call); + + log::info!("Testing scenario 4: Enabling an inherent that will use 1s ref time"); + let block_hash = submit_extrinsic_and_wait_for_finalization_success( + ¶_client, + &sudo_inherent_weight_call, + &alice, + ) + .await?; + + // The next block should contain the consume_weight_inherent and consume the 1s ref_time + ensure_is_only_block_in_core(¶_client, BlockToCheck::NextFirstBundleBlock(block_hash)) + .await?; + + let use_more_weight_than_announced = create_use_more_weight_than_announced_call(true); + + log::info!( + "Testing scenario 5: Sending a transaction which uses more weight than what \ + it registered and transactions appears in the first block of a core" + ); + let block_hash = submit_extrinsic_and_wait_for_finalization_success( + ¶_client, + &use_more_weight_than_announced, + &alice, + ) + .await?; + + ensure_is_only_block_in_core(¶_client, BlockToCheck::Exact(block_hash)).await?; + + let use_more_weight_than_announced = create_use_more_weight_than_announced_call(false); + + // Here we are testing that a transaction that uses more weight than registered makes the block + // production stop for this core. Even as the block is not the first block in the core. + log::info!( + "Testing scenario 6: Sending a transaction which uses more weight than what \ + it registered and transactions appears in the last block of a core" + ); + let block_hash = submit_extrinsic_and_wait_for_finalization_success( + ¶_client, + &use_more_weight_than_announced, + &alice, + ) + .await?; + + ensure_is_last_block_in_core(¶_client, block_hash).await?; + + Ok(()) +} + +/// Creates a `pallet-utility` `with_weight` call +fn create_utility_with_weight_call(ref_time: u64, proof_size: u64) -> DynamicPayload { + // Create a simple remark call as the inner call + let remark_data = vec![0u8; proof_size as usize]; // Fill with dummy data for PoV size + let inner_call = + zombienet_sdk::subxt::tx::dynamic("System", "remark", vec![value!(remark_data)]); + + // Create the weight struct + let weight = value!({ + ref_time: ref_time, + proof_size: proof_size + }); + + // Create the utility.with_weight call + zombienet_sdk::subxt::tx::dynamic( + "Utility", + "with_weight", + vec![inner_call.into_value(), weight], + ) +} + +/// Creates a `pallet-sudo` `sudo` call wrapping the inner call +fn create_sudo_call(inner_call: DynamicPayload) -> DynamicPayload { + zombienet_sdk::subxt::tx::dynamic("Sudo", "sudo", vec![inner_call.into_value()]) +} + +/// Creates a `test-pallet` `schedule_weight_registration` call +fn create_schedule_weight_registration_call() -> DynamicPayload { + zombienet_sdk::subxt::tx::dynamic( + "TestPallet", + "schedule_weight_registration", + vec![] as Vec, + ) +} + +/// Creates a `test-pallet` `use_more_weight_than_announced` call +fn create_use_more_weight_than_announced_call(must_be_first_block_in_core: bool) -> DynamicPayload { + zombienet_sdk::subxt::tx::dynamic( + "TestPallet", + "use_more_weight_than_announced", + vec![value![must_be_first_block_in_core]] + as Vec, + ) +} + +/// Creates a `test-pallet` `set_inherent_weight_consume` call +fn create_set_inherent_weight_consume_call(ref_time: u64, proof_size: u64) -> DynamicPayload { + let weight = value!({ + ref_time: ref_time, + proof_size: proof_size + }); + + zombienet_sdk::subxt::tx::dynamic("TestPallet", "set_inherent_weight_consume", vec![weight]) +} + +async fn build_network_config() -> Result { + let images = zombienet_sdk::environment::get_images_from_env(); + log::info!("Using images: {images:?}"); + NetworkConfigBuilder::new() + .with_relaychain(|r| { + let r = r + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image(images.polkadot.as_str()) + .with_default_args(vec![("-lparachain=trace").into()]) + .with_default_resources(|resources| { + resources.with_request_cpu(4).with_request_memory("4G") + }) + .with_genesis_overrides(json!({ + "configuration": { + "config": { + "scheduler_params": { + "num_cores": 3, + "max_validators_per_core": 1 + } + } + } + })) + .with_validator(|node| node.with_name("validator-0")); + (1..9).fold(r, |acc, i| { + acc.with_validator(|node| node.with_name(&format!("validator-{i}"))) + }) + }) + .with_parachain(|p| { + p.with_id(PARA_ID) + .with_default_command("test-parachain") + .with_default_image(images.cumulus.as_str()) + .with_chain("block-bundling") + .with_default_args(vec![ + ("--authoring").into(), + ("slot-based").into(), + ("-lparachain=debug,aura=trace,runtime=trace").into(), + ]) + .with_collator(|n| n.with_name("collator-0")) + .with_collator(|n| n.with_name("collator-1")) + .with_collator(|n| n.with_name("collator-2")) + }) + .with_global_settings(|global_settings| match std::env::var("ZOMBIENET_SDK_BASE_DIR") { + Ok(val) => global_settings.with_base_dir(val), + _ => global_settings, + }) + .build() + .map_err(|e| { + let errs = e.into_iter().map(|e| e.to_string()).collect::>().join(" "); + anyhow!("config errs: {errs}") + }) +} diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/mod.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/mod.rs new file mode 100644 index 0000000000000..e48bc0f8a9c5b --- /dev/null +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/mod.rs @@ -0,0 +1,25 @@ +// This file is part of Cumulus. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod basic; +mod full_core_usage_scenarios; +mod pov_recovery; +mod relay_parent_offset; +mod runtime_upgrade; +mod three_cores_glutton; +mod tracing_block; +mod warp_sync; diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs new file mode 100644 index 0000000000000..690304f97f01a --- /dev/null +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/pov_recovery.rs @@ -0,0 +1,196 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +use crate::utils::initialize_network; +use anyhow::anyhow; +use cumulus_zombienet_sdk_helpers::{assert_para_throughput, assign_cores}; +use polkadot_primitives::Id as ParaId; +use serde_json::json; +use std::{sync::Arc, time::Duration}; +use zombienet_orchestrator::network::node::LogLineCountOptions; +use zombienet_sdk::{ + subxt::{OnlineClient, PolkadotConfig}, + NetworkConfig, NetworkConfigBuilder, +}; + +const PARA_ID: u32 = 2100; + +/// This test checks if parachain node is importing blocks using PoV recovery even +/// after more cores have been assigned for the parachain. +#[tokio::test(flavor = "multi_thread")] +async fn block_bundling_pov_recovery() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + log::info!("Spawning network with relay chain only"); + let config = build_network_config().await?; + let network = initialize_network(config).await?; + + let collator = network.get_node("collator")?; + collator.pause().await?; + + let recovery_target = network.get_node("recovery-target")?; + + // Wait for the node to be ready. We have the collator in between paused, this ensures that it + // doesn't produce any blocks in between. This is important as the recovery node needs to be up + // to observe the candidates on the relay chain, to recover them. + recovery_target.wait_until_is_up(120u64).await?; + + collator.resume().await?; + + let alice = network.get_node("alice")?; + + let relay_client: OnlineClient = alice.wait_client().await?; + + assign_cores(&relay_client, PARA_ID, vec![0, 1]).await?; + + log::info!("Ensuring parachain making progress"); + assert_para_throughput(&relay_client, 20, [(ParaId::from(PARA_ID), 40..65)], []).await?; + + // We want to make sure that none of the consensus hook checks fail, even if the chain makes + // progress. If below log line occurred 1 or more times then test failed. + log::info!("Ensuring none of the consensus hook checks fail at {}", collator.name()); + let result = collator + .wait_log_line_count_with_timeout( + "set_validation_data inherent needs to be present in every block", + false, + LogLineCountOptions::no_occurences_within_timeout(Duration::from_secs(10)), + ) + .await?; + + if !result.success() { + return Err(anyhow!("Consensus hook failed at {}: {:?}", collator.name(), result)); + } + + // Wait (up to 10 seconds) until pattern occurs more than 35 times + let options = LogLineCountOptions { + predicate: Arc::new(|n| n > 35), + timeout: Duration::from_secs(10), + wait_until_timeout_elapses: false, + }; + + log::info!("Ensuring blocks are imported using PoV recovery by {}", recovery_target.name()); + let result = recovery_target + .wait_log_line_count_with_timeout( + "Importing blocks retrieved using pov_recovery", + false, + options, + ) + .await?; + + if !result.success() { + return Err(anyhow!( + "Failed importing blocks using PoV recovery by {}: {result:?}", + recovery_target.name() + )); + } + + log::info!("Test finished successfully"); + Ok(()) +} + +async fn build_network_config() -> Result { + // images are not relevant for `native`, but we leave it here in case we use `k8s` some day + let images = zombienet_sdk::environment::get_images_from_env(); + log::info!("Using images: {images:?}"); + + // Network setup: + // - relaychain nodes: + // - alice + // - validator + // - validator[0-3] + // - validator + // - synchronize only with alice + // - parachain nodes + // - recovery-target + // - full node + // - collator-elastic + // - collator which is the only one producing blocks + NetworkConfigBuilder::new() + .with_relaychain(|r| { + let r = r + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image(images.polkadot.as_str()) + .with_default_resources(|resources| { + // These settings are applicable only for `k8s` provider. + // Leaving them in case we switch to `k8s` some day. + resources + .with_request_cpu(1) + .with_request_memory("2G") + .with_limit_cpu(2) + .with_limit_memory("4G") + }) + .with_genesis_overrides(json!({ + "configuration": { + "config": { + "scheduler_params": { + "num_cores": 3, + "max_validators_per_core": 1 + }, + "approval_voting_params": { + "max_approval_coalesce_count": 5 + } + } + } + })) + // Have to set a `with_validator` outside of the loop below, so that `r` has the right + // type. + .with_validator(|node| node.with_name("alice").with_args(vec![])); + + (0..4).fold(r, |acc, i| { + acc.with_validator(|node| { + node.with_name(&format!("validator-{i}")).with_args(vec![ + ("-lruntime=debug,parachain=trace").into(), + ("--reserved-only").into(), + ("--reserved-nodes", "{{ZOMBIE:alice:multiaddr}}").into(), + ]) + }) + }) + }) + .with_parachain(|p| { + p.with_id(PARA_ID) + .with_chain("block-bundling") + .with_default_command("test-parachain") + .with_default_image(images.cumulus.as_str()) + .with_default_resources(|resources| { + // These settings are applicable only for `k8s` provider. + // Leaving them in case we switch to `k8s` some day. + resources + .with_request_cpu(1) + .with_request_memory("2G") + .with_limit_cpu(2) + .with_limit_memory("4G") + }) + .with_fullnode(|n| + n.with_name("recovery-target") + .with_args(vec![ + ("-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug").into(), + ("--disable-block-announcements").into(), + ("--in-peers", "0").into(), + ("--out-peers", "0").into(), + ("--").into(), + ("--reserved-only").into(), + ("--reserved-nodes", "{{ZOMBIE:alice:multiaddr}}").into() + ])) + .with_collator(|n| n.with_name("collator") + .with_args(vec![ + ("--reserved-nodes", "{{ZOMBIE:alice:multiaddr}}").into(), + ("-laura=trace,runtime=info,cumulus-consensus=trace,consensus::common=trace,parachain::collation-generation=trace,parachain::collator-protocol=trace,parachain=debug").into(), + ("--disable-block-announcements").into(), + ("--force-authoring").into(), + ("--authoring", "slot-based").into() + ]) + ) + }) + .with_global_settings(|global_settings| match std::env::var("ZOMBIENET_SDK_BASE_DIR") { + Ok(val) => global_settings.with_base_dir(val), + _ => global_settings, + }) + .build() + .map_err(|e| { + let errs = e.into_iter().map(|e| e.to_string()).collect::>().join(" "); + anyhow!("config errs: {errs}") + }) +} diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/relay_parent_offset.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/relay_parent_offset.rs new file mode 100644 index 0000000000000..073fda9389e4f --- /dev/null +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/relay_parent_offset.rs @@ -0,0 +1,92 @@ +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Test that parachains that use a single slot-based collator with elastic scaling MVP and with +// elastic scaling with RFC103 can achieve full throughput of 3 candidates per block. + +use anyhow::anyhow; +use cumulus_zombienet_sdk_helpers::assert_relay_parent_offset; +use serde_json::json; +use zombienet_sdk::{ + subxt::{OnlineClient, PolkadotConfig}, + NetworkConfigBuilder, +}; + +use cumulus_zombienet_sdk_helpers::assign_cores; + +#[tokio::test(flavor = "multi_thread")] +async fn block_bundling_relay_parent_offset() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + // Images are not relevant for `native`, but we leave it here in case we use `k8s` some day + let images = zombienet_sdk::environment::get_images_from_env(); + + let config = NetworkConfigBuilder::new() + .with_relaychain(|r| { + let r = r + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image(images.polkadot.as_str()) + .with_default_args(vec![("-lparachain=debug").into()]) + .with_genesis_overrides(json!({ + "configuration": { + "config": { + "scheduler_params": { + // Num cores is 4, because 2 extra will be added automatically when registering the paras. + "num_cores": 4, + "max_validators_per_core": 1 + } + } + } + })) + // Have to set a `with_validator` outside of the loop below, so that `r` has the + // right type. + .with_validator(|node| node.with_name("validator-0")); + + (1..6).fold(r, |acc, i| { + acc.with_validator(|node| node.with_name(&format!("validator-{i}"))) + }) + }) + .with_parachain(|p| { + p.with_id(2400) + .with_default_command("test-parachain") + .with_default_image(images.cumulus.as_str()) + .with_chain("relay-parent-offset") + .with_default_args(vec![ + "--authoring=slot-based".into(), + ("-lparachain=debug,aura=debug").into(), + ]) + .with_collator(|n| n.with_name("collator-rp-offset")) + }) + .with_global_settings(|global_settings| match std::env::var("ZOMBIENET_SDK_BASE_DIR") { + Ok(val) => global_settings.with_base_dir(val), + _ => global_settings, + }) + .build() + .map_err(|e| { + let errs = e.into_iter().map(|e| e.to_string()).collect::>().join(" "); + anyhow!("config errs: {errs}") + })?; + + let spawn_fn = zombienet_sdk::environment::get_spawn_fn(); + let network = spawn_fn(config).await?; + + let relay_node = network.get_node("validator-0")?; + let relay_client: OnlineClient = relay_node.wait_client().await?; + + let para_node_rp_offset = network.get_node("collator-rp-offset")?; + + let para_client = para_node_rp_offset.wait_client().await?; + + assign_cores(&relay_client, 2400, vec![0]).await?; + + log::info!("Checking that the parachain runs with the expected relay parent offset"); + + assert_relay_parent_offset(&relay_client, ¶_client, 2, 30).await?; + + log::info!("Test finished successfully"); + + Ok(()) +} diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs new file mode 100644 index 0000000000000..b33d42f5fb9c7 --- /dev/null +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/runtime_upgrade.rs @@ -0,0 +1,236 @@ +// This file is part of Cumulus. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use anyhow::anyhow; +use cumulus_primitives_core::relay_chain::MAX_POV_SIZE; +use cumulus_test_runtime::block_bundling::WASM_BINARY; +use cumulus_zombienet_sdk_helpers::{ + assign_cores, ensure_is_only_block_in_core, submit_extrinsic_and_wait_for_finalization_success, + submit_unsigned_extrinsic_and_wait_for_finalization_success, wait_for_runtime_upgrade, + BlockToCheck, +}; +use serde_json::json; +use sp_core::blake2_256; +use zombienet_sdk::{ + subxt::{ + ext::scale_value::{value, Value}, + tx::DynamicPayload, + utils::H256, + OnlineClient, PolkadotConfig, + }, + subxt_signer::sr25519::dev, + NetworkConfig, NetworkConfigBuilder, +}; + +const PARA_ID: u32 = 2400; +/// 4 blocks per core and each gets 1/4 of the [`MAX_POV_SIZE`], so the runtime needs to be bigger +/// than this to trigger the logic of getting one full core. +const MIN_RUNTIME_SIZE_BYTES: usize = MAX_POV_SIZE as usize / 4 + 50 * 1024; + +/// A test that performs runtime upgrade using the `authorize_upgrade` and +/// `apply_authorized_upgrade` logic. +/// +/// This test starts with 3 cores assigned and performs two transactions: +/// 1. First calls `authorize_upgrade` to authorize the new runtime code hash +/// 2. Then calls `apply_authorized_upgrade` with the actual runtime code +/// The runtime code is validated to be at least 2.5MiB in size, and both transactions +/// are validated to be the only block in their respective cores. +#[tokio::test(flavor = "multi_thread")] +async fn block_bundling_runtime_upgrade() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + let compressed_wasm = + WASM_BINARY.ok_or_else(|| anyhow!("WASM runtime binary not available"))?; + + // Decompress and inflate with a custom wasm section containing pseudo-random data until + // the compressed size exceeds `MIN_RUNTIME_SIZE_BYTES`. + let runtime_wasm = inflate_runtime_wasm(compressed_wasm, MIN_RUNTIME_SIZE_BYTES)?; + + log::info!("Runtime size validation passed: {} bytes", runtime_wasm.len()); + + let config = build_network_config().await?; + + let spawn_fn = zombienet_sdk::environment::get_spawn_fn(); + let network = spawn_fn(config).await?; + + let relay_node = network.get_node("validator-0")?; + let para_node = network.get_node("collator-1")?; + + let relay_client: OnlineClient = relay_node.wait_client().await?; + let para_client: OnlineClient = para_node.wait_client().await?; + let alice = dev::alice(); + + // Assign cores 0 and 1 to start with 3 cores total (core 2 is assigned by Zombienet) + assign_cores(&relay_client, PARA_ID, vec![0, 1]).await?; + + log::info!("3 cores total assigned to the parachain"); + + // Step 1: Authorize the runtime upgrade + let code_hash = blake2_256(&runtime_wasm); + let authorize_call = create_authorize_upgrade_call(code_hash.into()); + let sudo_authorize_call = create_sudo_call(authorize_call); + + log::info!("Sending authorize_upgrade transaction"); + submit_extrinsic_and_wait_for_finalization_success(¶_client, &sudo_authorize_call, &alice) + .await?; + log::info!("Authorize upgrade transaction finalized"); + + // Step 2: Apply the authorized upgrade with the actual runtime code + let apply_call = create_apply_authorized_upgrade_call(runtime_wasm.clone()); + + log::info!( + "Sending apply_authorized_upgrade transaction with runtime size: {} bytes", + runtime_wasm.len() + ); + + let block_hash = + submit_unsigned_extrinsic_and_wait_for_finalization_success(¶_client, &apply_call) + .await?; + log::info!("Apply authorized upgrade transaction finalized in block: {:?}", block_hash); + + ensure_is_only_block_in_core(¶_client, BlockToCheck::Exact(block_hash)).await?; + + let upgrade_block = wait_for_runtime_upgrade(¶_client).await?; + + ensure_is_only_block_in_core(¶_client, BlockToCheck::Exact(upgrade_block)).await?; + + Ok(()) +} + +/// Creates a `System::authorize_upgrade` call +fn create_authorize_upgrade_call(code_hash: H256) -> DynamicPayload { + zombienet_sdk::subxt::tx::dynamic( + "System", + "authorize_upgrade", + vec![Value::from_bytes(code_hash)], + ) +} + +/// Creates a `System::apply_authorized_upgrade` call +fn create_apply_authorized_upgrade_call(code: Vec) -> DynamicPayload { + zombienet_sdk::subxt::tx::dynamic("System", "apply_authorized_upgrade", vec![value!(code)]) +} + +/// Creates a `pallet-sudo` `sudo` call wrapping the inner call +fn create_sudo_call(inner_call: DynamicPayload) -> DynamicPayload { + zombienet_sdk::subxt::tx::dynamic("Sudo", "sudo", vec![inner_call.into_value()]) +} + +/// Decompress the WASM binary and pad with a custom section containing pseudo-random data +/// until the compressed size exceeds `min_compressed_size`. +fn inflate_runtime_wasm( + compressed_wasm: &[u8], + min_compressed_size: usize, +) -> Result, anyhow::Error> { + let mut wasm = sp_maybe_compressed_blob::decompress(compressed_wasm, 50 * 1024 * 1024) + .map_err(|e| anyhow!("Decompression failed: {:?}", e))? + .into_owned(); + + // Bump the `spec_version` so that `apply_authorized_upgrade`'s version check passes. + // On chain nothing will change, as we only change the runtime version stored inside the wasm + // file. + let blob = sc_executor_common::runtime_blob::RuntimeBlob::new(&wasm)?; + let mut version = sc_executor::read_embedded_version(&blob)? + .ok_or_else(|| anyhow!("No runtime version found?"))?; + version.spec_version += 1; + wasm = sp_version::embed::embed_runtime_version(&wasm, version)?; + + let mut rng_state: u64 = 0xdeadbeef; + let mut padding = Vec::new(); + let chunk_size = 256 * 1024; + loop { + padding.extend((0..chunk_size).map(|_| { + // xorshift64 + rng_state ^= rng_state << 13; + rng_state ^= rng_state >> 7; + rng_state ^= rng_state << 17; + rng_state as u8 + })); + + let mut module: parity_wasm::elements::Module = + parity_wasm::deserialize_buffer(&wasm).map_err(|e| anyhow!("wasm parse: {e:?}"))?; + module.set_custom_section("padding", padding.clone()); + wasm = parity_wasm::serialize(module).map_err(|e| anyhow!("wasm serialize: {e:?}"))?; + + let compressed = sp_maybe_compressed_blob::compress_weakly(&wasm, 50 * 1024 * 1024) + .ok_or_else(|| anyhow!("Compression failed"))?; + log::info!( + "Inflated WASM: uncompressed={} bytes, compressed={} bytes (target={})", + wasm.len(), + compressed.len(), + min_compressed_size, + ); + if compressed.len() >= min_compressed_size { + return Ok(compressed); + } + } +} + +async fn build_network_config() -> Result { + let images = zombienet_sdk::environment::get_images_from_env(); + log::info!("Using images: {images:?}"); + NetworkConfigBuilder::new() + .with_relaychain(|r| { + let r = r + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image(images.polkadot.as_str()) + .with_default_args(vec![("-lparachain=trace").into()]) + .with_default_resources(|resources| { + resources.with_request_cpu(4).with_request_memory("4G") + }) + .with_genesis_overrides(json!({ + "configuration": { + "config": { + "scheduler_params": { + "num_cores": 3, + "max_validators_per_core": 1 + } + } + } + })) + .with_validator(|node| node.with_name("validator-0")); + (1..9).fold(r, |acc, i| { + acc.with_validator(|node| node.with_name(&format!("validator-{i}"))) + }) + }) + .with_parachain(|p| { + p.with_id(PARA_ID) + .with_default_command("test-parachain") + .with_default_image(images.cumulus.as_str()) + .with_chain("block-bundling") + .with_default_args(vec![ + ("--authoring").into(), + ("slot-based").into(), + ("-lparachain=debug,aura=trace,basic-authorship=trace,runtime=trace,txpool=trace").into(), + ]) + .with_collator(|n| n.with_name("collator-0")) + .with_collator(|n| n.with_name("collator-1")) + .with_collator(|n| n.with_name("collator-2")) + }) + .with_global_settings(|global_settings| match std::env::var("ZOMBIENET_SDK_BASE_DIR") { + Ok(val) => global_settings.with_base_dir(val), + _ => global_settings, + }) + .build() + .map_err(|e| { + let errs = e.into_iter().map(|e| e.to_string()).collect::>().join(" "); + anyhow!("config errs: {errs}") + }) +} diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/multiple_blocks_per_slot.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs similarity index 53% rename from cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/multiple_blocks_per_slot.rs rename to cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs index c39b9ebde5c7c..4df102cb9e3de 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/multiple_blocks_per_slot.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/three_cores_glutton.rs @@ -1,9 +1,21 @@ +// This file is part of Cumulus. + // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 -use anyhow::anyhow; +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -use crate::utils::initialize_network; +use anyhow::anyhow; use cumulus_zombienet_sdk_helpers::{assert_finality_lag, assert_para_throughput, assign_cores}; use polkadot_primitives::Id as ParaId; @@ -15,46 +27,49 @@ use zombienet_sdk::{ const PARA_ID: u32 = 2400; -/// This test spawns a parachain network. -/// Initially, one core is assigned. We expect the parachain to produce 1 block per relay. -/// As we increase the number of cores via `assign_core`, we expect the block pace to increase too. -/// **Note:** The runtime in use here has 6s slot duration, so multiple blocks will be produced per -/// slot. +/// A test that ensures that PoV bundling works with 3 cores and glutton consuming 10% ref time. +/// +/// This test starts with 3 cores assigned and configures glutton to use 10% of ref time, +/// then validates that the parachain produces 72 blocks. #[tokio::test(flavor = "multi_thread")] -async fn elastic_scaling_multiple_blocks_per_slot() -> Result<(), anyhow::Error> { +async fn block_bundling_three_cores_glutton() -> Result<(), anyhow::Error> { let _ = env_logger::try_init_from_env( env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), ); - log::info!("Spawning network"); let config = build_network_config().await?; - let network = initialize_network(config).await?; + + let spawn_fn = zombienet_sdk::environment::get_spawn_fn(); + let network = spawn_fn(config).await?; let relay_node = network.get_node("validator-0")?; - let para_node_elastic = network.get_node("collator-1")?; + let para_node = network.get_node("collator-1")?; + let para_client = para_node.wait_client().await?; let relay_client: OnlineClient = relay_node.wait_client().await?; - assert_para_throughput(&relay_client, 10, [(ParaId::from(PARA_ID), 3..18)]).await?; - assert_finality_lag(¶_node_elastic.wait_client().await?, 5).await?; - assign_cores(&relay_client, PARA_ID, vec![2, 3]).await?; + // Assign cores 0 and 1 to start with 3 cores total (core 2 is assigned by Zombienet) + assign_cores(&relay_client, PARA_ID, vec![0, 1]).await?; - assert_para_throughput(&relay_client, 15, [(ParaId::from(PARA_ID), 39..46)]).await?; - assert_finality_lag(¶_node_elastic.wait_client().await?, 20).await?; + // Wait for the parachain to produce 72 blocks with 3 cores and glutton active + // With 3 cores, we expect roughly 3x throughput compared to single core + // Adjusting expectations based on glutton consuming 80% of ref time + assert_para_throughput( + &relay_client, + 6, + [(ParaId::from(PARA_ID), 12..19)], + [(ParaId::from(PARA_ID), (para_client.clone(), 48..73))], + ) + .await?; - assign_cores(&relay_client, PARA_ID, vec![4, 5, 6]).await?; - - assert_para_throughput(&relay_client, 10, [(ParaId::from(PARA_ID), 52..61)]).await?; - assert_finality_lag(¶_node_elastic.wait_client().await?, 30).await?; - log::info!("Test finished successfully"); + assert_finality_lag(¶_client, 72).await?; + log::info!("Test finished successfully - 72 blocks produced with 3 cores and glutton"); Ok(()) } async fn build_network_config() -> Result { - // images are not relevant for `native`, but we leave it here in case we use `k8s` some day let images = zombienet_sdk::environment::get_images_from_env(); log::info!("Using images: {images:?}"); - NetworkConfigBuilder::new() .with_relaychain(|r| { let r = r @@ -63,22 +78,18 @@ async fn build_network_config() -> Result { .with_default_image(images.polkadot.as_str()) .with_default_args(vec![("-lparachain=trace").into()]) .with_default_resources(|resources| { - // These settings are applicable only for `k8s` provider. - // Leaving them in case we switch to `k8s` some day. resources.with_request_cpu(4).with_request_memory("4G") }) .with_genesis_overrides(json!({ "configuration": { "config": { "scheduler_params": { - "num_cores": 7, + "num_cores": 2, "max_validators_per_core": 1 } } } })) - // Have to set a `with_validator` outside of the loop below, so that `r` has the - // right type. .with_validator(|node| node.with_name("validator-0")); (1..9).fold(r, |acc, i| { acc.with_validator(|node| node.with_name(&format!("validator-{i}"))) @@ -88,12 +99,20 @@ async fn build_network_config() -> Result { p.with_id(PARA_ID) .with_default_command("test-parachain") .with_default_image(images.cumulus.as_str()) - .with_chain("elastic-scaling-multi-block-slot") + .with_chain("block-bundling") .with_default_args(vec![ ("--authoring").into(), ("slot-based").into(), - ("-lparachain=trace,aura=debug").into(), + ("-lparachain=debug,aura=trace,runtime=trace").into(), ]) + .with_genesis_overrides(json!({ + "glutton": { + "compute": "0.1", + "storage": "0", + "trashDataCount": 5000, + "blockLength": "0" + } + })) .with_collator(|n| n.with_name("collator-0")) .with_collator(|n| n.with_name("collator-1")) .with_collator(|n| n.with_name("collator-2")) diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs new file mode 100644 index 0000000000000..baf1e719d1a57 --- /dev/null +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/tracing_block.rs @@ -0,0 +1,154 @@ +// This file is part of Cumulus. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::utils::initialize_network; +use anyhow::anyhow; +use cumulus_zombienet_sdk_helpers::submit_extrinsic_and_wait_for_finalization_success; +use serde_json::json; +use sp_rpc::tracing::TraceBlockResponse; +use zombienet_sdk::{ + subxt::{dynamic::Value, ext::subxt_rpcs::rpc_params, OnlineClient, PolkadotConfig}, + subxt_signer::sr25519::dev, + NetworkConfig, NetworkConfigBuilder, +}; + +const PARA_ID: u32 = 2400; + +/// A test that sends a transfer transaction, waits for it to be finalized, and then runs the +/// tracing_block rpc for the block containing the transfer. +#[tokio::test(flavor = "multi_thread")] +async fn block_bundling_tracing_block() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + log::info!("Spawning network"); + let config = build_network_config().await?; + let network = initialize_network(config).await?; + + let para_node = network.get_node("collator-0")?; + let para_client: OnlineClient = para_node.wait_client().await?; + + // Create a balance transfer transaction + let alice = dev::alice(); + let bob = dev::bob().public_key(); + let transfer_amount = 1_000_000_000_000u128; // 1 unit with 12 decimals + + log::info!("Creating balance transfer transaction"); + let transfer_call = zombienet_sdk::subxt::dynamic::tx( + "Balances", + "transfer_allow_death", + vec![Value::unnamed_variant("Id", [Value::from_bytes(bob)]), Value::u128(transfer_amount)], + ); + + // Submit the transfer transaction and wait for finalization + log::info!("Submitting transfer transaction and waiting for finalization"); + let transfer_block_hash = + submit_extrinsic_and_wait_for_finalization_success(¶_client, &transfer_call, &alice) + .await?; + + log::info!("Transfer transaction finalized in block: {:?}", transfer_block_hash); + + // Get RPC client to make tracing_block call + let rpc_client = para_node.rpc().await?; + + log::info!("Calling tracing_block RPC for the block containing the transfer"); + + // Make the tracing_block RPC call for the block containing our transfer + let trace_result: TraceBlockResponse = rpc_client + .request( + "state_traceBlock", + rpc_params![ + format!("{:?}", transfer_block_hash), + None::, + None::, + None:: + ], + ) + .await?; + + log::info!("Successfully received tracing result for transfer block"); + + // Decode and verify the BlockTrace is successful + match trace_result { + TraceBlockResponse::TraceError(error) => { + Err(anyhow!("Block tracing failed: {}", error.error)) + }, + TraceBlockResponse::BlockTrace(_) => { + log::info!("✅ Block trace successful!"); + Ok(()) + }, + } +} + +async fn build_network_config() -> Result { + // images are not relevant for `native`, but we leave it here in case we use `k8s` some day + let images = zombienet_sdk::environment::get_images_from_env(); + log::info!("Using images: {images:?}"); + + NetworkConfigBuilder::new() + .with_relaychain(|r| { + let r = r + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image(images.polkadot.as_str()) + .with_default_args(vec![("-lparachain=trace").into()]) + .with_default_resources(|resources| { + // These settings are applicable only for `k8s` provider. + // Leaving them in case we switch to `k8s` some day. + resources.with_request_cpu(4).with_request_memory("4G") + }) + .with_genesis_overrides(json!({ + "configuration": { + "config": { + "scheduler_params": { + "num_cores": 7, + "max_validators_per_core": 1 + } + } + } + })) + // Have to set a `with_validator` outside of the loop below, so that `r` has the + // right type. + .with_validator(|node| node.with_name("validator-0")); + (1..9).fold(r, |acc, i| { + acc.with_validator(|node| node.with_name(&format!("validator-{i}"))) + }) + }) + .with_parachain(|p| { + p.with_id(PARA_ID) + .with_default_command("test-parachain") + .with_default_image(images.cumulus.as_str()) + .with_chain("block-bundling") + .with_default_args(vec![ + ("--authoring").into(), + ("slot-based").into(), + ("-lparachain=debug,aura=trace").into(), + ("--enable-offchain-indexing=true").into(), + ]) + .with_collator(|n| n.with_name("collator-0")) + }) + .with_global_settings(|global_settings| match std::env::var("ZOMBIENET_SDK_BASE_DIR") { + Ok(val) => global_settings.with_base_dir(val), + _ => global_settings, + }) + .build() + .map_err(|e| { + let errs = e.into_iter().map(|e| e.to_string()).collect::>().join(" "); + anyhow!("config errs: {errs}") + }) +} diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/warp_sync.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/warp_sync.rs new file mode 100644 index 0000000000000..908bc959659a7 --- /dev/null +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/block_bundling/warp_sync.rs @@ -0,0 +1,179 @@ +// This file is part of Cumulus. + +// Copyright (C) Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::utils::{initialize_network, BEST_BLOCK_METRIC, FINALIZED_BLOCK_METRIC}; +use anyhow::anyhow; +use cumulus_zombienet_sdk_helpers::assign_cores; +use serde_json::json; +use std::time::Duration; +use zombienet_orchestrator::network::node::LogLineCountOptions; +use zombienet_sdk::{ + subxt::{OnlineClient, PolkadotConfig}, + AddCollatorOptions, NetworkConfig, NetworkConfigBuilder, +}; + +const PARA_ID: u32 = 2400; + +/// Warp-sync regression test for block bundling. +/// +/// Verifies that a fresh full node can warp-sync a chain that already has bundled blocks +/// (with BundleInfo/CoreInfo digests). +/// +/// When a fresh node joins, it warp-syncs the relay chain (jumping to a finalized target +/// with `StateAction::ApplyChanges`), then backfills the gap (blocks #1..#target) via +/// gap sync with `StateAction::Skip`. +/// +/// `SlotBasedBlockImport::import_block` must respect both `StateAction::Skip` and +/// `ApplyChanges`, and not attempt to call `execute_block_and_collect_storage_proof` +/// for these blocks, since the parent state is unavailable. +/// +/// If the guard is wrong, the full node fails to import blocks and never catches up. +#[tokio::test(flavor = "multi_thread")] +async fn warp_sync_with_bundled_blocks() -> Result<(), anyhow::Error> { + let _ = env_logger::try_init_from_env( + env_logger::Env::default().filter_or(env_logger::DEFAULT_FILTER_ENV, "info"), + ); + + log::info!("Spawning network without full node"); + let config = build_network_config().await?; + let mut network = initialize_network(config).await?; + + let relay_node = network.get_node("validator-0")?; + let relay_client: OnlineClient = relay_node.wait_client().await?; + + // Assign 2 extra cores (zombienet auto-assigns 1), for 3 total. + assign_cores(&relay_client, PARA_ID, vec![0, 1]).await?; + + // Wait for steady-state bundled block production: collator finalizes parachain block #72. + log::info!("Waiting for collator to finalize parachain block #72"); + network + .get_node("collator-0")? + .wait_metric_with_timeout(FINALIZED_BLOCK_METRIC, |b| b >= 72.0, 200u64) + .await?; + + // Query collator's current best block to set a sync target. + let target_block = network.get_node("collator-0")?.reports(BEST_BLOCK_METRIC).await? as u64; + log::info!("Full node sync target: #{target_block}"); + + // Add a fresh full node that will warp-sync to the already-running chain. + log::info!("Adding fresh full node with warp sync"); + let col_opts = AddCollatorOptions { + is_validator: false, + args: vec![ + ("--sync=warp").into(), + ("-lsync=debug,parachain=debug,sync::cumulus=debug,aura=trace").into(), + ("--relay-chain-rpc-urls", "{{ZOMBIE:validator-0:ws_uri}}").into(), + ], + ..Default::default() + }; + network.add_collator("para-full-node", col_opts, PARA_ID).await?; + + let full_node = network.get_node("para-full-node")?; + + // Wait for the full node to sync and catch up. + // If the bug is present, the node fails to import bundled blocks and never advances. + log::info!("Waiting for full node best block to reach #{target_block}"); + full_node + .wait_metric_with_timeout(BEST_BLOCK_METRIC, |b| b >= target_block as f64, 120u64) + .await?; + log::info!("Full node synced past #{target_block}"); + + // Verify the full node actually used warp sync (not full sync). + log::info!("Verifying warp sync was used"); + let option_1_line = LogLineCountOptions::new(|n| n == 1, Duration::from_secs(5), false); + let result = full_node + .wait_log_line_count_with_timeout( + r"\[Parachain\] Warp sync is complete", + false, + option_1_line, + ) + .await?; + if !result.success() { + return Err(anyhow!("Full node did not complete parachain warp sync")); + } + + // Make sure the full node keeps progressing on live blocks after the initial sync. + // Wait for it to advance 24 blocks beyond the collator's current best. + let collator_best = network.get_node("collator-0")?.reports(BEST_BLOCK_METRIC).await? as u64; + let live_target = (collator_best + 24) as f64; + log::info!("Collator best: #{collator_best}, waiting for full node to reach #{live_target}"); + + full_node + .wait_metric_with_timeout(BEST_BLOCK_METRIC, |b| b >= live_target, 120u64) + .await?; + + log::info!("Test finished successfully"); + Ok(()) +} + +async fn build_network_config() -> Result { + let images = zombienet_sdk::environment::get_images_from_env(); + log::info!("Using images: {images:?}"); + + NetworkConfigBuilder::new() + .with_relaychain(|r| { + let r = r + .with_chain("rococo-local") + .with_default_command("polkadot") + .with_default_image(images.polkadot.as_str()) + .with_default_args(vec![("-lparachain=trace").into()]) + .with_default_resources(|resources| { + resources.with_request_cpu(4).with_request_memory("4G") + }) + .with_genesis_overrides(json!({ + "configuration": { + "config": { + "scheduler_params": { + "num_cores": 2, + "max_validators_per_core": 1 + } + } + } + })) + .with_validator(|node| node.with_name("validator-0")); + (1..9).fold(r, |acc, i| { + acc.with_validator(|node| node.with_name(&format!("validator-{i}"))) + }) + }) + .with_parachain(|p| { + p.with_id(PARA_ID) + .with_default_command("test-parachain") + .with_default_image(images.cumulus.as_str()) + .with_chain("block-bundling") + .with_default_args(vec![ + ("--authoring").into(), + ("slot-based").into(), + ("-lparachain=trace,aura=trace,sync::cumulus=trace,consensus::common::parent_search=debug,runtime::parachain-system=debug").into(), + ]) + .with_genesis_overrides(json!({ + "testPallet": { + "enableBigValueMove": true + } + })) + .with_collator(|n| n.with_name("collator-0")) + .with_collator(|n| n.with_name("collator-1")) + }) + .with_global_settings(|global_settings| match std::env::var("ZOMBIENET_SDK_BASE_DIR") { + Ok(val) => global_settings.with_base_dir(val), + _ => global_settings, + }) + .build() + .map_err(|e| { + let errs = e.into_iter().map(|e| e.to_string()).collect::>().join(" "); + anyhow!("config errs: {errs}") + }) +} diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/asset_hub_westend.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/asset_hub_westend.rs index ea18fb310b322..f05f604941f3d 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/asset_hub_westend.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/asset_hub_westend.rs @@ -86,14 +86,14 @@ async fn elastic_scaling_asset_hub_westend() -> Result<(), anyhow::Error> { assign_cores(&relay_client, PARA_ID, vec![0]).await?; - assert_para_throughput(&relay_client, 10, [(ParaId::from(PARA_ID), 3..18)]).await?; + assert_para_throughput(&relay_client, 10, [(ParaId::from(PARA_ID), 3..18)], []).await?; // 1 core is assigned by default, we are assigning 2 more cores: 0 and 1. assign_cores(&relay_client, PARA_ID, vec![1]).await?; log::info!("Ensure elastic scaling works, 3 blocks should be produced in each 6s slot"); - assert_para_throughput(&relay_client, 20, [(ParaId::from(PARA_ID), 50..61)]).await?; + assert_para_throughput(&relay_client, 20, [(ParaId::from(PARA_ID), 50..61)], []).await?; log::info!("Test finished successfully."); diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/mod.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/mod.rs index 3ad06efbfae90..6bc71464fdcbc 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/mod.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/mod.rs @@ -1,8 +1,21 @@ +// This file is part of Cumulus. + // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + mod asset_hub_westend; -mod multiple_blocks_per_slot; mod pov_recovery; mod slot_based_authoring; mod slot_based_rp_offset; diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs index e0cef3218741a..220ba829912ae 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/pov_recovery.rs @@ -47,7 +47,7 @@ async fn elastic_scaling_pov_recovery() -> Result<(), anyhow::Error> { assign_cores(&relay_client, PARA_ID, vec![0]).await?; log::info!("Ensuring parachain making progress"); - assert_para_throughput(&relay_client, 20, [(ParaId::from(PARA_ID), 40..65)]).await?; + assert_para_throughput(&relay_client, 20, [(ParaId::from(PARA_ID), 40..65)], []).await?; // We want to make sure that none of the consensus hook checks fail, even if the chain makes // progress. If below log line occurred 1 or more times then test failed. @@ -164,8 +164,8 @@ async fn build_network_config() -> Result { .with_limit_cpu(2) .with_limit_memory("4G") }) - .with_collator(|n| { - n.with_name("recovery-target").validator(false).with_args(vec![ + .with_fullnode(|n| { + n.with_name("recovery-target").with_args(vec![ ("-lparachain::availability=trace,sync=debug,parachain=debug,cumulus-pov-recovery=debug,cumulus-consensus=debug").into(), ("--disable-block-announcements").into(), ("--in-peers", "0").into(), diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/upgrade_to_3_cores.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/upgrade_to_3_cores.rs index 8f7ab38dab99e..4d099978a1233 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/upgrade_to_3_cores.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/elastic_scaling/upgrade_to_3_cores.rs @@ -1,11 +1,8 @@ // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 -use anyhow::anyhow; -use serde_json::json; - use crate::utils::initialize_network; - +use anyhow::anyhow; use cumulus_test_runtime::{ elastic_scaling::WASM_BINARY as WASM_ELASTIC_SCALING, elastic_scaling_12s_slot::WASM_BINARY as WASM_ELASTIC_SCALING_12S_SLOT, @@ -15,6 +12,7 @@ use cumulus_zombienet_sdk_helpers::{ }; use polkadot_primitives::Id as ParaId; use rstest::rstest; +use serde_json::json; use zombienet_sdk::{ subxt::{OnlineClient, PolkadotConfig}, subxt_signer::sr25519::dev, @@ -49,10 +47,10 @@ async fn elastic_scaling_upgrade_to_3_cores( if async_backing { log::info!("Ensuring parachain makes progress making 6s blocks"); - assert_para_throughput(&alice_client, 20, [(ParaId::from(PARA_ID), 15..21)]).await?; + assert_para_throughput(&alice_client, 20, [(ParaId::from(PARA_ID), 15..21)], []).await?; } else { log::info!("Ensuring parachain makes progress making 12s blocks"); - assert_para_throughput(&alice_client, 20, [(ParaId::from(PARA_ID), 7..12)]).await?; + assert_para_throughput(&alice_client, 20, [(ParaId::from(PARA_ID), 7..12)], []).await?; } assign_cores(&alice_client, PARA_ID, vec![1, 2]).await?; @@ -87,7 +85,7 @@ async fn elastic_scaling_upgrade_to_3_cores( ); log::info!("Ensure elastic scaling works, 3 blocks should be produced in each 6s slot"); - assert_para_throughput(&alice_client, 20, [(ParaId::from(PARA_ID), 50..61)]).await?; + assert_para_throughput(&alice_client, 20, [(ParaId::from(PARA_ID), 50..61)], []).await?; Ok(()) } diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/full_node_catching_up.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/full_node_catching_up.rs index 1c80f90c26d8f..420787fe00942 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/full_node_catching_up.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/full_node_catching_up.rs @@ -32,7 +32,7 @@ async fn full_node_catching_up() -> Result<(), anyhow::Error> { let relay_client: OnlineClient = relay_alice.wait_client().await?; log::info!("Ensuring parachain making progress"); - assert_para_throughput(&relay_client, 20, [(ParaId::from(PARA_ID), 2..40)]).await?; + assert_para_throughput(&relay_client, 20, [(ParaId::from(PARA_ID), 2..40)], []).await?; for (name, timeout_secs) in [("dave", 250u64), ("eve", 250u64)] { log::info!("Ensuring {name} reports expected block height"); diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/migrate_solo.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/migrate_solo.rs index 3c1be1d5e4840..f5c067c1137ae 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/migrate_solo.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/migrate_solo.rs @@ -49,7 +49,7 @@ async fn migrate_solo_to_para() -> Result<(), anyhow::Error> { let alice_client: OnlineClient = alice.wait_client().await?; log::info!("Ensuring parachain making progress"); - assert_para_throughput(&alice_client, 20, [(ParaId::from(PARA_ID), 2..40)]).await?; + assert_para_throughput(&alice_client, 20, [(ParaId::from(PARA_ID), 2..40)], []).await?; let dave = network.get_node("dave")?; diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/mod.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/mod.rs index cd85cd87b9f9a..5fc3d0385e769 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/mod.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/mod.rs @@ -1,6 +1,7 @@ // Copyright (C) Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 +mod block_bundling; mod bootnodes; mod elastic_scaling; mod full_node_catching_up; diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/parachain_runtime_upgrade_slot_duration_18s.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/parachain_runtime_upgrade_slot_duration_18s.rs index 079e8fffbdf62..f7d533ecef978 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/parachain_runtime_upgrade_slot_duration_18s.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/parachain_runtime_upgrade_slot_duration_18s.rs @@ -81,7 +81,7 @@ async fn parachain_runtime_upgrade_slot_duration_18s() -> Result<(), anyhow::Err log::info!("Checking that parachain continues producing blocks after upgrade..."); - assert_para_throughput(&relay_client, 15, [(ParaId::from(PARA_ID), 10..30)]).await?; + assert_para_throughput(&relay_client, 15, [(ParaId::from(PARA_ID), 10..30)], []).await?; Ok(()) } diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_recovery.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_recovery.rs index 5c0b2e1236cf2..9d8757a493480 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_recovery.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/pov_recovery.rs @@ -54,7 +54,7 @@ async fn pov_recovery() -> Result<(), anyhow::Error> { assert_para_is_registered(&validator_client, ParaId::from(PARA_ID), 30).await?; log::info!("Ensuring parachain making progress"); - assert_para_throughput(&validator_client, 20, [(ParaId::from(PARA_ID), 2..20)]).await?; + assert_para_throughput(&validator_client, 20, [(ParaId::from(PARA_ID), 2..20)], []).await?; for (name, timeout_secs) in [("bob", 600u64)] { log::info!("Checking block production for {name} within {timeout_secs}s"); diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/rpc_collator_build_blocks.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/rpc_collator_build_blocks.rs index 3348f6cdd3aae..a27ac9f4a3383 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/rpc_collator_build_blocks.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/rpc_collator_build_blocks.rs @@ -31,7 +31,7 @@ async fn rpc_collator_builds_blocks() -> Result<(), anyhow::Error> { let alice_client: OnlineClient = alice.wait_client().await?; log::info!("Ensuring parachain making progress"); - assert_para_throughput(&alice_client, 20, [(ParaId::from(PARA_ID), 2..40)]).await?; + assert_para_throughput(&alice_client, 20, [(ParaId::from(PARA_ID), 2..40)], []).await?; let dave = network.get_node("dave")?; let eve = network.get_node("eve")?; diff --git a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/sync_blocks.rs b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/sync_blocks.rs index 952ad681082d2..b9ab9c123c49e 100644 --- a/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/sync_blocks.rs +++ b/cumulus/zombienet/zombienet-sdk/tests/zombie_ci/sync_blocks.rs @@ -28,7 +28,7 @@ async fn sync_blocks_from_tip_without_connected_collator() -> Result<(), anyhow: let relay_client: OnlineClient = relay_alice.wait_client().await?; log::info!("Ensuring parachain making progress"); - assert_para_throughput(&relay_client, 10, [(ParaId::from(PARA_ID), 5..11)]).await?; + assert_para_throughput(&relay_client, 10, [(ParaId::from(PARA_ID), 5..11)], []).await?; let para_ferdie = network.get_node("ferdie")?; let para_eve = network.get_node("eve")?; diff --git a/polkadot/node/subsystem-util/src/runtime/mod.rs b/polkadot/node/subsystem-util/src/runtime/mod.rs index 667aa709d7c4e..62e664d881d46 100644 --- a/polkadot/node/subsystem-util/src/runtime/mod.rs +++ b/polkadot/node/subsystem-util/src/runtime/mod.rs @@ -619,95 +619,3 @@ pub async fn fetch_validation_code_bomb_limit( res } } - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn iter_claims_at_depth_for_para_works() { - let claim_queue = ClaimQueueSnapshot(BTreeMap::from_iter( - [ - ( - CoreIndex(0), - VecDeque::from_iter([ParaId::from(1), ParaId::from(2), ParaId::from(1)]), - ), - ( - CoreIndex(1), - VecDeque::from_iter([ParaId::from(1), ParaId::from(1), ParaId::from(2)]), - ), - ( - CoreIndex(2), - VecDeque::from_iter([ParaId::from(1), ParaId::from(2), ParaId::from(3)]), - ), - ( - CoreIndex(3), - VecDeque::from_iter([ParaId::from(2), ParaId::from(1), ParaId::from(3)]), - ), - ] - .into_iter(), - )); - - // Test getting claims for para_id 1 at depth 0: cores 0, 1, 2 - let depth_0_cores = - claim_queue.iter_claims_at_depth_for_para(0, 1u32.into()).collect::>(); - assert_eq!(depth_0_cores.len(), 3); - assert_eq!(depth_0_cores, vec![CoreIndex(0), CoreIndex(1), CoreIndex(2)]); - - // Test getting claims for para_id 1 at depth 1: cores 1, 3 - let depth_1_cores = - claim_queue.iter_claims_at_depth_for_para(1, 1u32.into()).collect::>(); - assert_eq!(depth_1_cores.len(), 2); - assert_eq!(depth_1_cores, vec![CoreIndex(1), CoreIndex(3)]); - - // Test getting claims for para_id 1 at depth 2: core 0 - let depth_2_cores = - claim_queue.iter_claims_at_depth_for_para(2, 1u32.into()).collect::>(); - assert_eq!(depth_2_cores.len(), 1); - assert_eq!(depth_2_cores, vec![CoreIndex(0)]); - - // Test getting claims for para_id 1 at depth 3: no claims - let depth_3_cores = - claim_queue.iter_claims_at_depth_for_para(3, 1u32.into()).collect::>(); - assert!(depth_3_cores.is_empty()); - - // Test getting claims for para_id 2 at depth 0: core 3 - let depth_0_cores = - claim_queue.iter_claims_at_depth_for_para(0, 2u32.into()).collect::>(); - assert_eq!(depth_0_cores.len(), 1); - assert_eq!(depth_0_cores, vec![CoreIndex(3)]); - - // Test getting claims for para_id 2 at depth 1: cores 0, 2 - let depth_1_cores = - claim_queue.iter_claims_at_depth_for_para(1, 2u32.into()).collect::>(); - assert_eq!(depth_1_cores.len(), 2); - assert_eq!(depth_1_cores, vec![CoreIndex(0), CoreIndex(2)]); - - // Test getting claims for para_id 2 at depth 2: core 1 - let depth_2_cores = - claim_queue.iter_claims_at_depth_for_para(2, 2u32.into()).collect::>(); - assert_eq!(depth_2_cores.len(), 1); - assert_eq!(depth_2_cores, vec![CoreIndex(1)]); - - // Test getting claims for para_id 3 at depth 0: no claims - let depth_0_cores = - claim_queue.iter_claims_at_depth_for_para(0, 3u32.into()).collect::>(); - assert!(depth_0_cores.is_empty()); - - // Test getting claims for para_id 3 at depth 1: no claims - let depth_1_cores = - claim_queue.iter_claims_at_depth_for_para(1, 3u32.into()).collect::>(); - assert!(depth_1_cores.is_empty()); - - // Test getting claims for para_id 3 at depth 2: cores 2, 3 - let depth_2_cores = - claim_queue.iter_claims_at_depth_for_para(2, 3u32.into()).collect::>(); - assert_eq!(depth_2_cores.len(), 2); - assert_eq!(depth_2_cores, vec![CoreIndex(2), CoreIndex(3)]); - - // Test getting claims for non-existent para_id at depth 0: no claims - let depth_0_cores = - claim_queue.iter_claims_at_depth_for_para(0, 99u32.into()).collect::>(); - assert!(depth_0_cores.is_empty()); - } -} diff --git a/polkadot/zombienet-sdk-tests/tests/disabling/slashing.rs b/polkadot/zombienet-sdk-tests/tests/disabling/slashing.rs index 886f039086918..327f2be94894d 100644 --- a/polkadot/zombienet-sdk-tests/tests/disabling/slashing.rs +++ b/polkadot/zombienet-sdk-tests/tests/disabling/slashing.rs @@ -92,7 +92,7 @@ async fn dispute_past_session_slashing() -> Result<(), anyhow::Error> { let relay_client: OnlineClient = honest.wait_client().await?; // Wait for some para blocks being produced - assert_para_throughput(&relay_client, 20, [(ParaId::from(1337), 10..20)]).await?; + assert_para_throughput(&relay_client, 20, [(ParaId::from(1337), 10..20)], []).await?; // Let's initiate a dispute malus.resume().await?; diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs index 9ceed94642f37..bef39ccc62eb6 100644 --- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs +++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/basic_3cores.rs @@ -82,6 +82,7 @@ async fn basic_3cores_test() -> Result<(), anyhow::Error> { &relay_client, 15, [(ParaId::from(2000), 38..46), (ParaId::from(2001), 12..16)], + [], ) .await?; diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs index efe4b137f66f9..26de41cac4ded 100644 --- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs +++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/doesnt_break_parachains.rs @@ -75,7 +75,7 @@ async fn doesnt_break_parachains_test() -> Result<(), anyhow::Error> { let para_id = ParaId::from(2000); // Expect the parachain to be making normal progress, 1 candidate backed per relay chain block. // Lowering to 12 to make sure CI passes. - assert_para_throughput(&relay_client, 15, [(para_id, 12..16)]).await?; + assert_para_throughput(&relay_client, 15, [(para_id, 12..16)], []).await?; let para_client = para_node.wait_client().await?; // Assert the parachain finalized block height is also on par with the number of backed diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_12cores.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_12cores.rs index a6df53b66019f..2582386d9310b 100644 --- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_12cores.rs +++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_12cores.rs @@ -93,7 +93,7 @@ async fn slot_based_12cores_test() -> Result<(), anyhow::Error> { // change will be counted. // Since the calculated backed candidate count is theoretical and the CI tests are observed to // occasionally fail, let's apply 15% tolerance to the expected range: 170 - 15% = 144 - assert_para_throughput(&relay_client, 15, [(ParaId::from(2300), 144..181)]).await?; + assert_para_throughput(&relay_client, 15, [(ParaId::from(2300), 144..181)], []).await?; // Expect that `collator-5` claims at least 3 slots during this run. let result = para_node diff --git a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs index b9cada7f64362..1913979d4e289 100644 --- a/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs +++ b/polkadot/zombienet-sdk-tests/tests/elastic_scaling/slot_based_3cores.rs @@ -108,6 +108,7 @@ async fn slot_based_3cores_test() -> Result<(), anyhow::Error> { &relay_client, 15, [(ParaId::from(2100), 34..46), (ParaId::from(2200), 34..46)], + [], ) .await?; diff --git a/polkadot/zombienet-sdk-tests/tests/functional/approval_voting_coalescing.rs b/polkadot/zombienet-sdk-tests/tests/functional/approval_voting_coalescing.rs index 399ed5a19b107..bd51069b5d4f0 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/approval_voting_coalescing.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/approval_voting_coalescing.rs @@ -4,7 +4,6 @@ // Test that checks approval voting coalescing does not lag finality. use anyhow::anyhow; - use cumulus_zombienet_sdk_helpers::{assert_finality_lag, assert_para_throughput}; use polkadot_primitives::Id as ParaId; use serde_json::json; @@ -90,6 +89,7 @@ async fn approval_voting_coalescing_test() -> Result<(), anyhow::Error> { (ParaId::from(2006), 11..35), (ParaId::from(2007), 11..35), ], + [], ) .await?; diff --git a/polkadot/zombienet-sdk-tests/tests/functional/approved_peer_mixed_validators.rs b/polkadot/zombienet-sdk-tests/tests/functional/approved_peer_mixed_validators.rs index ed63d7a1bd748..e3129cde5bc40 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/approved_peer_mixed_validators.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/approved_peer_mixed_validators.rs @@ -114,6 +114,7 @@ async fn approved_peer_mixed_validators_test() -> Result<(), anyhow::Error> { &relay_client, 15, [(ParaId::from(2000), 6..15), (ParaId::from(2001), 11..16)], + [], ) .await?; diff --git a/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs b/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs index 70c8cd78a7698..52dda31c4230e 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/async_backing_6_seconds_rate.rs @@ -87,6 +87,7 @@ async fn async_backing_6_seconds_rate_test() -> Result<(), anyhow::Error> { &relay_client, 15, [(ParaId::from(2000), 11..16), (ParaId::from(2001), 11..16)], + [], ) .await?; diff --git a/polkadot/zombienet-sdk-tests/tests/functional/chunk_fetching_network_compatibility.rs b/polkadot/zombienet-sdk-tests/tests/functional/chunk_fetching_network_compatibility.rs index 5ecaf926cbf25..eac17eb2e756d 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/chunk_fetching_network_compatibility.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/chunk_fetching_network_compatibility.rs @@ -43,7 +43,7 @@ async fn chunk_fetching_network_compatibility_test() -> Result<(), anyhow::Error let relay_client = validator_nodes[0].wait_client().await?; log::info!("Checking parachain block production (all paras registered at genesis)"); let para_throughput: [(ParaId, Range); 2] = PARAS.map(|id| (ParaId::from(id), 2..6)); - assert_para_throughput(&relay_client, 5, para_throughput).await?; + assert_para_throughput(&relay_client, 5, para_throughput, []).await?; log::info!("All parachains producing blocks"); log::info!("Ensure approval checking works."); diff --git a/polkadot/zombienet-sdk-tests/tests/functional/collators_reputation_persistence.rs b/polkadot/zombienet-sdk-tests/tests/functional/collators_reputation_persistence.rs index 2c9bd43f5d389..3f892e0225bbe 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/collators_reputation_persistence.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/collators_reputation_persistence.rs @@ -145,6 +145,7 @@ async fn comprehensive_reputation_persistence_test() -> Result<(), anyhow::Error &validator0_client, 10, [(ParaId::from(PARA_ID_1), 8..11), (ParaId::from(PARA_ID_2), 8..11)], + [], ) .await?; @@ -194,6 +195,7 @@ async fn comprehensive_reputation_persistence_test() -> Result<(), anyhow::Error &relay_client, 5, [(ParaId::from(PARA_ID_1), 3..7), (ParaId::from(PARA_ID_2), 3..7)], + [], ) .await?; @@ -326,7 +328,8 @@ async fn comprehensive_reputation_persistence_test() -> Result<(), anyhow::Error // Verify para 2000 continues normal operation log::info!("Verifying para {} continues normal operation", PARA_ID_1); - assert_para_throughput(&validator0_client_after, 5, [(ParaId::from(PARA_ID_1), 3..7)]).await?; + assert_para_throughput(&validator0_client_after, 5, [(ParaId::from(PARA_ID_1), 3..7)], []) + .await?; log::info!("Phase 3 passed: Pruning successfully removed deregistered parachain"); Ok(()) diff --git a/polkadot/zombienet-sdk-tests/tests/functional/coretime_collation_fetching_fairness.rs b/polkadot/zombienet-sdk-tests/tests/functional/coretime_collation_fetching_fairness.rs index cadea914311f0..2d5cfc476cf09 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/coretime_collation_fetching_fairness.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/coretime_collation_fetching_fairness.rs @@ -120,6 +120,7 @@ async fn coretime_collation_fetching_fairness_test() -> Result<(), anyhow::Error &relay_client, 12, [(ParaId::from(2000), 6..10), (ParaId::from(2001), 2..5)], + [], ) .await?; diff --git a/polkadot/zombienet-sdk-tests/tests/functional/coretime_shared_core.rs b/polkadot/zombienet-sdk-tests/tests/functional/coretime_shared_core.rs index 56194d5857150..0ce63b831674d 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/coretime_shared_core.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/coretime_shared_core.rs @@ -113,7 +113,7 @@ async fn coretime_shared_core_test() -> Result<(), anyhow::Error> { // time=6s. 4 paras share 1 core → slot every 24s, ~2 para blocks/slot (async backing). log::info!("Checking parachain block production"); let para_throughput: [(ParaId, Range); 4] = PARAS.map(|id| (ParaId::from(id), 5..15)); - assert_para_throughput(&relay_client, 40, para_throughput).await?; + assert_para_throughput(&relay_client, 40, para_throughput, []).await?; log::info!("All parachains producing blocks"); log::info!("Test finished successfully"); diff --git a/polkadot/zombienet-sdk-tests/tests/functional/dispute_freshly_finalized.rs b/polkadot/zombienet-sdk-tests/tests/functional/dispute_freshly_finalized.rs index 92424fbf2b7d1..36c89fc97d1e9 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/dispute_freshly_finalized.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/dispute_freshly_finalized.rs @@ -57,7 +57,7 @@ async fn dispute_freshly_finalized_test() -> Result<(), anyhow::Error> { // Ensure parachain made progress log::info!("Waiting for parachain {} to produce blocks", PARA_ID); - assert_para_throughput(&relay_client, 5, [(ParaId::from(PARA_ID), 2..6)]).await?; + assert_para_throughput(&relay_client, 5, [(ParaId::from(PARA_ID), 2..6)], []).await?; log::info!("Parachain {} is producing blocks", PARA_ID); // Ensure that malus is already attempting to dispute diff --git a/polkadot/zombienet-sdk-tests/tests/functional/dispute_old_finalized.rs b/polkadot/zombienet-sdk-tests/tests/functional/dispute_old_finalized.rs index 255619d419d4f..f04c7afd79073 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/dispute_old_finalized.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/dispute_old_finalized.rs @@ -18,7 +18,6 @@ // concluded. use anyhow::anyhow; - use cumulus_zombienet_sdk_helpers::assert_para_throughput; use serde_json::json; use tokio::time::Duration; @@ -108,7 +107,7 @@ async fn dispute_old_finalized() -> Result<(), anyhow::Error> { let malus = network.get_node("malus")?; log::info!("Waiting for parablocks to be produced"); - assert_para_throughput(&relay_client, 20, [(polkadot_primitives::Id::from(2000), 10..30)]) + assert_para_throughput(&relay_client, 20, [(polkadot_primitives::Id::from(2000), 10..30)], []) .await?; let result = malus diff --git a/polkadot/zombienet-sdk-tests/tests/functional/duplicate_collations.rs b/polkadot/zombienet-sdk-tests/tests/functional/duplicate_collations.rs index ea53406633f9f..ddb691ab6214f 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/duplicate_collations.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/duplicate_collations.rs @@ -90,7 +90,7 @@ async fn duplicate_collations_test() -> Result<(), anyhow::Error> { log::info!("2 more cores assigned to parachain-2000"); - assert_para_throughput(&relay_client, 15, [(ParaId::from(2000), 40..46)]).await?; + assert_para_throughput(&relay_client, 15, [(ParaId::from(2000), 40..46)], []).await?; let log_line_options = LogLineCountOptions::new( |n| n == 1, diff --git a/polkadot/zombienet-sdk-tests/tests/functional/parachains_disputes.rs b/polkadot/zombienet-sdk-tests/tests/functional/parachains_disputes.rs index 421886898e212..207b681fdcaa7 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/parachains_disputes.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/parachains_disputes.rs @@ -44,7 +44,7 @@ async fn parachains_disputes_test() -> Result<(), anyhow::Error> { // Check that all parachains produce at least 5 blocks within 1 session and 5 blocks (RC) log::info!("Checking parachain block production (all paras registered at genesis)"); let para_throughput: [(ParaId, Range); 4] = PARAS.map(|id| (ParaId::from(id), 2..6)); - assert_para_throughput(&relay_client, 5, para_throughput).await?; + assert_para_throughput(&relay_client, 5, para_throughput, []).await?; log::info!("All parachains producing blocks"); // Check if disputes are initiated and concluded. diff --git a/polkadot/zombienet-sdk-tests/tests/functional/parachains_disputes_garbage_candidate.rs b/polkadot/zombienet-sdk-tests/tests/functional/parachains_disputes_garbage_candidate.rs index 6d24ca1b60ec1..12fca50cf2657 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/parachains_disputes_garbage_candidate.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/parachains_disputes_garbage_candidate.rs @@ -46,7 +46,7 @@ async fn parachains_disputes_garbage_candidate_test() -> Result<(), anyhow::Erro // Check that all parachains produce at least 5 blocks within 1 session and 5 blocks (RC) log::info!("Checking parachain block production (all paras registered at genesis)"); let para_throughput: [(ParaId, Range); 3] = PARAS.map(|id| (ParaId::from(id), 2..6)); - assert_para_throughput(&relay_client, 5, para_throughput).await?; + assert_para_throughput(&relay_client, 5, para_throughput, []).await?; log::info!("All parachains producing blocks"); log::info!("Check there is an offence report after dispute conclusion."); diff --git a/polkadot/zombienet-sdk-tests/tests/functional/parachains_max_tranche0.rs b/polkadot/zombienet-sdk-tests/tests/functional/parachains_max_tranche0.rs index bef001f544d0d..3dcad7bf6ab98 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/parachains_max_tranche0.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/parachains_max_tranche0.rs @@ -65,6 +65,7 @@ async fn parachains_max_tranche0_test() -> Result<(), anyhow::Error> { (ParaId::from(2003u32), 5..100), (ParaId::from(2004u32), 5..100), ], + [], ) .await?; log::info!("All parachains producing blocks"); diff --git a/polkadot/zombienet-sdk-tests/tests/functional/parachains_pvf.rs b/polkadot/zombienet-sdk-tests/tests/functional/parachains_pvf.rs index 3f414140112a0..42f716f11944f 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/parachains_pvf.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/parachains_pvf.rs @@ -44,7 +44,7 @@ async fn parachains_pvf_preparation_and_execution_test() -> Result<(), anyhow::E // Using 60 relay blocks as window (~180 seconds with 3s block time) log::info!("Checking parachain block production"); let para_throughput: [(ParaId, Range); 8] = PARAS.map(|id| (ParaId::from(id), 5..61)); - assert_para_throughput(&relay_client, 60, para_throughput).await?; + assert_para_throughput(&relay_client, 60, para_throughput, []).await?; log::info!("All parachains producing blocks"); relay_node diff --git a/polkadot/zombienet-sdk-tests/tests/functional/shared_core_idle_parachain.rs b/polkadot/zombienet-sdk-tests/tests/functional/shared_core_idle_parachain.rs index 11a40384f18a7..9af68bdda9c6e 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/shared_core_idle_parachain.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/shared_core_idle_parachain.rs @@ -93,7 +93,7 @@ async fn shared_core_idle_parachain_test() -> Result<(), anyhow::Error> { // Check that para 2000 is essentially getting 12-second block time, while para 2001 does not // produce anything. - assert_para_throughput(&relay_client, 15, [(ParaId::from(2000), 5..9)]).await?; + assert_para_throughput(&relay_client, 15, [(ParaId::from(2000), 5..9)], []).await?; assert_finality_lag(¶_node_2000.wait_client().await?, 5).await?; diff --git a/polkadot/zombienet-sdk-tests/tests/functional/spam_statement_distribution_requests.rs b/polkadot/zombienet-sdk-tests/tests/functional/spam_statement_distribution_requests.rs index 8efe3990d8462..ad0c189b1e592 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/spam_statement_distribution_requests.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/spam_statement_distribution_requests.rs @@ -117,6 +117,7 @@ async fn spam_statement_distribution_requests_test() -> Result<(), anyhow::Error &relay_client, 2, [(ParaId::from(2000), 2..3), (ParaId::from(2001), 2..3)], + [], ) .await?; @@ -135,6 +136,7 @@ async fn spam_statement_distribution_requests_test() -> Result<(), anyhow::Error &relay_client, 10, [(ParaId::from(2000), 9..11), (ParaId::from(2001), 9..11)], + [], ) .await?; diff --git a/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs b/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs index 95d07e17ca06a..6fc04f58204a4 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/sync_backing.rs @@ -65,7 +65,7 @@ async fn sync_backing_test() -> Result<(), anyhow::Error> { let relay_client: OnlineClient = relay_node.wait_client().await?; - assert_para_throughput(&relay_client, 15, [(ParaId::from(2500), 5..9)]).await?; + assert_para_throughput(&relay_client, 15, [(ParaId::from(2500), 5..9)], []).await?; // Assert the parachain finalized block height is also on par with the number of backed // candidates. diff --git a/polkadot/zombienet-sdk-tests/tests/functional/systematic_chunk_recovery.rs b/polkadot/zombienet-sdk-tests/tests/functional/systematic_chunk_recovery.rs index 26b073d1be844..26238467ac35e 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/systematic_chunk_recovery.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/systematic_chunk_recovery.rs @@ -47,7 +47,7 @@ async fn systematic_chunk_recovery_test() -> Result<(), anyhow::Error> { // Check that all parachains produce at least 5 blocks within 1 session and 5 blocks (RC) log::info!("Checking parachain block production (all paras registered at genesis)"); let para_throughput: [(ParaId, Range); 2] = PARAS.map(|id| (ParaId::from(id), 2..6)); - assert_para_throughput(&alice_client, 5, para_throughput).await?; + assert_para_throughput(&alice_client, 5, para_throughput, []).await?; log::info!("All parachains producing blocks"); // remove alice and use the others validators for the rest of the checks. diff --git a/polkadot/zombienet-sdk-tests/tests/functional/validator_disabling.rs b/polkadot/zombienet-sdk-tests/tests/functional/validator_disabling.rs index 7fd443c1f68a0..6aa7ed310b670 100644 --- a/polkadot/zombienet-sdk-tests/tests/functional/validator_disabling.rs +++ b/polkadot/zombienet-sdk-tests/tests/functional/validator_disabling.rs @@ -88,7 +88,7 @@ async fn validator_disabling_test() -> Result<(), anyhow::Error> { log::info!("Waiting for parablocks to be produced"); let honest_validator = network.get_node("honest-validator-0")?; let relay_client: OnlineClient = honest_validator.wait_client().await?; - assert_para_throughput(&relay_client, 20, [(polkadot_primitives::Id::from(1000), 10..30)]) + assert_para_throughput(&relay_client, 20, [(polkadot_primitives::Id::from(1000), 10..30)], []) .await?; log::info!("Wait for a dispute to be initialized."); diff --git a/polkadot/zombienet-sdk-tests/tests/misc/paritydb.rs b/polkadot/zombienet-sdk-tests/tests/misc/paritydb.rs index 2258f985d5d3a..49c1f853760e1 100644 --- a/polkadot/zombienet-sdk-tests/tests/misc/paritydb.rs +++ b/polkadot/zombienet-sdk-tests/tests/misc/paritydb.rs @@ -55,7 +55,7 @@ async fn paritydb_test() -> Result<(), anyhow::Error> { // Check that all parachains produce at least 5 blocks within 1 session and 5 blocks (RC) log::info!("Checking parachain block production (all paras registered at genesis)"); let para_throughput: [(ParaId, Range); 10] = PARAS.map(|id| (ParaId::from(id), 2..6)); - assert_para_throughput(&relay_client, 5, para_throughput).await?; + assert_para_throughput(&relay_client, 5, para_throughput, []).await?; log::info!("All parachains producing blocks"); log::info!("Check lag - approval / dispute conclusion."); diff --git a/polkadot/zombienet-sdk-tests/tests/smoke/coretime_smoke.rs b/polkadot/zombienet-sdk-tests/tests/smoke/coretime_smoke.rs index 7c83c19e5ee97..a9687dd244752 100644 --- a/polkadot/zombienet-sdk-tests/tests/smoke/coretime_smoke.rs +++ b/polkadot/zombienet-sdk-tests/tests/smoke/coretime_smoke.rs @@ -83,7 +83,8 @@ async fn coretime_smoke_test() -> Result<(), anyhow::Error> { // Wait for coretime chain to produce blocks log::info!("Waiting for coretime chain to produce blocks"); - assert_para_throughput(&alice_client, 30, [(ParaId::from(CORETIME_PARA_ID), 5..31)]).await?; + assert_para_throughput(&alice_client, 30, [(ParaId::from(CORETIME_PARA_ID), 5..31)], []) + .await?; log::info!("Coretime chain is producing blocks"); // Configure broker chain @@ -103,6 +104,7 @@ async fn coretime_smoke_test() -> Result<(), anyhow::Error> { &alice_client, 30, [(ParaId::from(CORETIME_PARA_ID), 5..31), (ParaId::from(TEST_PARA_ID), 5..31)], + [], ) .await?; log::info!("Parachain {} is producing blocks", TEST_PARA_ID); diff --git a/polkadot/zombienet-sdk-tests/tests/smoke/parachains_smoke.rs b/polkadot/zombienet-sdk-tests/tests/smoke/parachains_smoke.rs index 0dfcee7c48de2..5e898952a5535 100644 --- a/polkadot/zombienet-sdk-tests/tests/smoke/parachains_smoke.rs +++ b/polkadot/zombienet-sdk-tests/tests/smoke/parachains_smoke.rs @@ -40,7 +40,7 @@ async fn parachains_smoke_test() -> Result<(), anyhow::Error> { // Check parachain produces at least 5 blocks (60 seconds) // Using 10 relay blocks as measurement window log::info!("Checking parachain {} is producing blocks", PARA_ID); - assert_para_throughput(&alice_client, 5, [(ParaId::from(PARA_ID), 2..6)]).await?; + assert_para_throughput(&alice_client, 5, [(ParaId::from(PARA_ID), 2..6)], []).await?; log::info!("Parachain {} is producing blocks successfully", PARA_ID); log::info!("Test finished successfully"); diff --git a/polkadot/zombienet-sdk-tests/tests/smoke/precompile_pvf_smoke.rs b/polkadot/zombienet-sdk-tests/tests/smoke/precompile_pvf_smoke.rs index da406b7bfc8f8..9ea54daf9f3ce 100644 --- a/polkadot/zombienet-sdk-tests/tests/smoke/precompile_pvf_smoke.rs +++ b/polkadot/zombienet-sdk-tests/tests/smoke/precompile_pvf_smoke.rs @@ -124,7 +124,7 @@ async fn precompile_pvf_smoke_test() -> Result<(), anyhow::Error> { // Wait for parachain to produce blocks log::info!("Waiting for parachain {} to be registered and produce blocks", PARA_ID); - assert_para_throughput(&relay_client, 20, [(ParaId::from(PARA_ID), 5..21)]).await?; + assert_para_throughput(&relay_client, 20, [(ParaId::from(PARA_ID), 5..21)], []).await?; log::info!("Parachain {} is producing blocks", PARA_ID); // Check Dave didn't prepare PVF diff --git a/prdoc/pr_10477.prdoc b/prdoc/pr_10477.prdoc new file mode 100644 index 0000000000000..0fe85f988362f --- /dev/null +++ b/prdoc/pr_10477.prdoc @@ -0,0 +1,63 @@ +title: "Block Bundling Node Side" +doc: +- audience: Node Dev + description: | + Implements the node-side logic for block bundling (aka 500ms blocks) in parachains. + The main changes are in the slot-based collator: instead of building one block per core, + blocks are built as requested and distributed over the available cores. + +crates: +- name: frame-support + bump: patch +- name: sp-trie + bump: minor +- name: frame-system + bump: patch +- name: sc-block-builder + bump: major +- name: sp-block-builder + bump: major +- name: sc-consensus + bump: minor +- name: sp-consensus-slots + bump: minor +- name: cumulus-primitives-core + bump: major +- name: cumulus-pallet-parachain-system + bump: major +- name: pallet-glutton + bump: patch +- name: cumulus-client-collator + bump: major +- name: cumulus-client-consensus-common + bump: major +- name: polkadot-node-subsystem-util + bump: minor +- name: sc-basic-authorship + bump: patch +- name: cumulus-client-consensus-aura + bump: major +- name: cumulus-client-proof-size-recording + bump: patch +- name: cumulus-client-service + bump: minor +- name: polkadot-omni-node-lib + bump: patch +- name: testnet-parachains-constants + bump: patch +- name: asset-hub-rococo-runtime + bump: patch +- name: coretime-westend-runtime + bump: patch +- name: penpal-runtime + bump: patch +- name: cumulus-pallet-xcmp-queue + bump: patch +- name: parachains-runtimes-test-utils + bump: patch +- name: bridge-hub-rococo-runtime + bump: patch +- name: bridge-hub-westend-runtime + bump: patch +- name: polkadot-sdk + bump: minor diff --git a/substrate/client/basic-authorship/src/basic_authorship.rs b/substrate/client/basic-authorship/src/basic_authorship.rs index b9cd2abc4ef42..96e117ccbf5c8 100644 --- a/substrate/client/basic-authorship/src/basic_authorship.rs +++ b/substrate/client/basic-authorship/src/basic_authorship.rs @@ -960,7 +960,7 @@ mod tests { let builder = BlockBuilderBuilder::new(&*client) .on_parent_block(genesis_header.hash()) .with_parent_block_number(0) - .enable_proof_recording() + .with_proof_recorder(Some(Default::default())) .build() .unwrap(); builder.estimate_block_size() + extrinsics[0].encoded_size() diff --git a/substrate/client/block-builder/src/lib.rs b/substrate/client/block-builder/src/lib.rs index a2fb9c7e91b89..81ad4e9f300ac 100644 --- a/substrate/client/block-builder/src/lib.rs +++ b/substrate/client/block-builder/src/lib.rs @@ -371,7 +371,7 @@ where size + self.api.proof_recorder().map_or(0, |pr| pr.estimate_encoded_size()) } - /// Returns the [`ProofRecorder`] set for the block building. + /// Returns the [`ProofRecorder`] used by the block builder. pub fn proof_recorder(&self) -> Option> { self.api.proof_recorder() } diff --git a/substrate/client/consensus/common/src/block_import.rs b/substrate/client/consensus/common/src/block_import.rs index cf30e8d4a8adf..076a7151cb8de 100644 --- a/substrate/client/consensus/common/src/block_import.rs +++ b/substrate/client/consensus/common/src/block_import.rs @@ -153,6 +153,17 @@ pub enum StateAction { Skip, } +impl std::fmt::Debug for StateAction { + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + Self::ApplyChanges(_) => fmt.write_str("ApplyChanges(..)"), + Self::Execute => fmt.write_str("Execute"), + Self::ExecuteIfPossible => fmt.write_str("ExecuteIfPossible"), + Self::Skip => fmt.write_str("Skip"), + } + } +} + impl StateAction { /// Check if execution checks that require runtime calls should be skipped. pub fn skip_execution_checks(&self) -> bool { @@ -163,6 +174,16 @@ impl StateAction { StateAction::Skip => true, } } + + /// Returns as storage changes. + pub fn as_storage_changes( + &self, + ) -> Option<&sp_state_machine::StorageChanges>> { + match self { + StateAction::ApplyChanges(StorageChanges::Changes(changes)) => Some(&changes), + _ => None, + } + } } impl From> for StateAction { diff --git a/substrate/frame/glutton/src/lib.rs b/substrate/frame/glutton/src/lib.rs index 8045280382ab4..eb5d5c3f2ea9a 100644 --- a/substrate/frame/glutton/src/lib.rs +++ b/substrate/frame/glutton/src/lib.rs @@ -47,6 +47,8 @@ use sp_runtime::{traits::Zero, FixedPointNumber, FixedU64}; pub use pallet::*; pub use weights::WeightInfo; +const LOG_TARGET: &str = "runtime::glutton"; + /// The size of each value in the `TrashData` storage in bytes. pub const VALUE_SIZE: usize = 1024; /// Max number of entries for the `TrashData` map. @@ -207,6 +209,8 @@ pub mod pallet { } fn on_idle(_: BlockNumberFor, remaining_weight: Weight) -> Weight { + log::debug!(target: LOG_TARGET, "Running `on_idle` with remaining weight: {remaining_weight:?}"); + let mut meter = WeightMeter::with_limit(remaining_weight); if meter.try_consume(T::WeightInfo::empty_on_idle()).is_err() { return T::WeightInfo::empty_on_idle(); @@ -216,6 +220,9 @@ pub mod pallet { Storage::::get().saturating_mul_int(meter.remaining().proof_size()); let computation_weight_limit = Compute::::get().saturating_mul_int(meter.remaining().ref_time()); + + log::debug!(target: LOG_TARGET, "Going to waste: proof_size {proof_size_limit:?}; compute {computation_weight_limit:?}"); + let mut meter = WeightMeter::with_limit(Weight::from_parts( computation_weight_limit, proof_size_limit, diff --git a/substrate/frame/support/src/traits/hooks.rs b/substrate/frame/support/src/traits/hooks.rs index b4fc657f37d2e..07ad76244c715 100644 --- a/substrate/frame/support/src/traits/hooks.rs +++ b/substrate/frame/support/src/traits/hooks.rs @@ -128,6 +128,7 @@ impl_for_tuples_attr! { fn on_idle(n: BlockNumber, remaining_weight: Weight) -> Weight { let on_idle_functions: &[fn(BlockNumber, Weight) -> Weight] = &[for_tuples!( #( Tuple::on_idle ),* )]; + let mut weight = Weight::zero(); let len = on_idle_functions.len(); diff --git a/substrate/frame/system/src/limits.rs b/substrate/frame/system/src/limits.rs index f8b48ca210a7f..54828d876db7d 100644 --- a/substrate/frame/system/src/limits.rs +++ b/substrate/frame/system/src/limits.rs @@ -25,6 +25,7 @@ //! `DispatchClass`. This module contains configuration object for both resources, //! which should be passed to `frame_system` configuration when runtime is being set up. +use alloc::{format, string::String, vec::Vec}; use frame_support::{ dispatch::{DispatchClass, OneOrMany, PerDispatchClass}, weights::{constants, Weight}, @@ -142,7 +143,6 @@ impl BlockLengthBuilder { #[derive(Default, Debug)] pub struct ValidationErrors { pub has_errors: bool, - #[cfg(feature = "std")] pub errors: Vec, } @@ -150,7 +150,6 @@ macro_rules! error_assert { ($cond : expr, $err : expr, $format : expr $(, $params: expr )*$(,)*) => { if !$cond { $err.has_errors = true; - #[cfg(feature = "std")] { $err.errors.push(format!($format $(, &$params )*)); } } } diff --git a/substrate/frame/system/src/mocking.rs b/substrate/frame/system/src/mocking.rs index 833309e05ecc9..3e900eaf51845 100644 --- a/substrate/frame/system/src/mocking.rs +++ b/substrate/frame/system/src/mocking.rs @@ -28,20 +28,20 @@ pub type MockUncheckedExtrinsic = generic::Unchec >; /// An implementation of `sp_runtime::traits::Block` to be used in tests. -pub type MockBlock = generic::Block< +pub type MockBlock = generic::Block< generic::Header, - MockUncheckedExtrinsic, + MockUncheckedExtrinsic, >; /// An implementation of `sp_runtime::traits::Block` to be used in tests with u32 BlockNumber type. -pub type MockBlockU32 = generic::Block< +pub type MockBlockU32 = generic::Block< generic::Header, - MockUncheckedExtrinsic, + MockUncheckedExtrinsic, >; /// An implementation of `sp_runtime::traits::Block` to be used in tests with u128 BlockNumber /// type. -pub type MockBlockU128 = generic::Block< +pub type MockBlockU128 = generic::Block< generic::Header, - MockUncheckedExtrinsic, + MockUncheckedExtrinsic, >; diff --git a/substrate/primitives/block-builder/Cargo.toml b/substrate/primitives/block-builder/Cargo.toml index dcd6ba8a91d71..1266626e2daa4 100644 --- a/substrate/primitives/block-builder/Cargo.toml +++ b/substrate/primitives/block-builder/Cargo.toml @@ -16,10 +16,16 @@ workspace = true targets = ["x86_64-unknown-linux-gnu"] [dependencies] +codec = { features = ["derive"], workspace = true } sp-api = { workspace = true } sp-inherents = { workspace = true } sp-runtime = { workspace = true } [features] default = ["std"] -std = ["sp-api/std", "sp-inherents/std", "sp-runtime/std"] +std = [ + "codec/std", + "sp-api/std", + "sp-inherents/std", + "sp-runtime/std", +] diff --git a/substrate/primitives/block-builder/src/lib.rs b/substrate/primitives/block-builder/src/lib.rs index 62966524994c2..b6c0ab270450f 100644 --- a/substrate/primitives/block-builder/src/lib.rs +++ b/substrate/primitives/block-builder/src/lib.rs @@ -21,15 +21,14 @@ extern crate alloc; +use sp_inherents::{CheckInherentsResult, InherentData}; +use sp_runtime::{traits::Block as BlockT, ApplyExtrinsicResult}; #[cfg(feature = "std")] mod client_side; #[cfg(feature = "std")] pub use client_side::*; -use sp_inherents::{CheckInherentsResult, InherentData}; -use sp_runtime::{traits::Block as BlockT, ApplyExtrinsicResult}; - sp_api::decl_runtime_apis! { /// The `BlockBuilder` api trait that provides the required functionality for building a block. #[api_version(6)] diff --git a/substrate/primitives/consensus/slots/src/lib.rs b/substrate/primitives/consensus/slots/src/lib.rs index 21f6f2e95b1bd..4279184ac2116 100644 --- a/substrate/primitives/consensus/slots/src/lib.rs +++ b/substrate/primitives/consensus/slots/src/lib.rs @@ -164,20 +164,21 @@ impl SlotDuration { pub const fn from_millis(millis: u64) -> Self { Self(millis) } -} -impl SlotDuration { + /// Returns `self` as [`core::time::Duration`]. + pub const fn as_duration(&self) -> core::time::Duration { + core::time::Duration::from_millis(self.0) + } + /// Returns `self` as a `u64` representing the duration in milliseconds. pub const fn as_millis(&self) -> u64 { self.0 } } -#[cfg(feature = "std")] -impl SlotDuration { - /// Returns `self` as [`core::time::Duration`]. - pub const fn as_duration(&self) -> core::time::Duration { - core::time::Duration::from_millis(self.0) +impl From for SlotDuration { + fn from(duration: core::time::Duration) -> Self { + Self::from_millis(duration.as_millis() as u64) } } diff --git a/substrate/primitives/trie/Cargo.toml b/substrate/primitives/trie/Cargo.toml index 1d34f0fce0329..00f29de25dee4 100644 --- a/substrate/primitives/trie/Cargo.toml +++ b/substrate/primitives/trie/Cargo.toml @@ -22,7 +22,7 @@ harness = false [dependencies] ahash = { optional = true, workspace = true } -codec = { workspace = true } +codec = { features = ["derive"], workspace = true } foldhash = { workspace = true } hash-db = { workspace = true } hashbrown = { workspace = true } diff --git a/substrate/primitives/trie/src/proof_size_extension.rs b/substrate/primitives/trie/src/proof_size_extension.rs index 49d3036c4add0..36f7396a81d1f 100644 --- a/substrate/primitives/trie/src/proof_size_extension.rs +++ b/substrate/primitives/trie/src/proof_size_extension.rs @@ -62,6 +62,12 @@ impl ProofSizeExt { /// need to be replayed in the exact same order. pub struct RecordedProofSizeEstimations(pub VecDeque); +impl From> for RecordedProofSizeEstimations { + fn from(recordings: Vec) -> Self { + Self(recordings.into_iter().map(|x| x as usize).collect()) + } +} + /// Inner structure of [`RecordingProofSizeProvider`]. struct RecordingProofSizeProviderInner { inner: Box, diff --git a/substrate/primitives/trie/src/recorder.rs b/substrate/primitives/trie/src/recorder.rs index a7ec4fc73cdf6..eaa8028975cd2 100644 --- a/substrate/primitives/trie/src/recorder.rs +++ b/substrate/primitives/trie/src/recorder.rs @@ -21,7 +21,7 @@ //! to record storage accesses to the state to generate a [`StorageProof`]. use crate::{GenericMemoryDB, NodeCodec, StorageProof}; -use codec::Encode; +use codec::{Compact, Decode, Encode}; use hash_db::Hasher; use memory_db::KeyFunction; use parking_lot::{Mutex, MutexGuard}; @@ -42,11 +42,27 @@ const LOG_TARGET: &str = "trie-recorder"; /// A list of ignored nodes for [`Recorder`]. /// /// These nodes when passed to a recorder will be ignored and not recorded by the recorder. -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct IgnoredNodes { nodes: HashSet, } +impl Encode for IgnoredNodes { + fn encode(&self) -> Vec { + let mut encoded = Compact::(self.nodes.len() as _).encode(); + self.nodes.iter().for_each(|n| n.encode_to(&mut encoded)); + encoded + } +} + +impl Decode for IgnoredNodes { + fn decode(input: &mut I) -> Result { + let len = Compact::::decode(input)?; + let data = codec::decode_vec_with_len(input, len.0 as _)?; + Ok(Self { nodes: HashSet::from_iter(data.into_iter()) }) + } +} + impl Default for IgnoredNodes { fn default() -> Self { Self { nodes: HashSet::default() } diff --git a/umbrella/Cargo.toml b/umbrella/Cargo.toml index b6f7d5764c508..b52c2a416a381 100644 --- a/umbrella/Cargo.toml +++ b/umbrella/Cargo.toml @@ -892,6 +892,7 @@ node = [ "cumulus-client-network", "cumulus-client-parachain-inherent", "cumulus-client-pov-recovery", + "cumulus-client-proof-size-recording", "cumulus-client-service", "cumulus-relay-chain-inprocess-interface", "cumulus-relay-chain-interface", @@ -2305,6 +2306,11 @@ default-features = false optional = true path = "../cumulus/client/pov-recovery" +[dependencies.cumulus-client-proof-size-recording] +default-features = false +optional = true +path = "../cumulus/client/proof-size-recording" + [dependencies.cumulus-client-service] default-features = false optional = true diff --git a/umbrella/src/lib.rs b/umbrella/src/lib.rs index 55c8c45846f5f..ce59ebd279f7c 100644 --- a/umbrella/src/lib.rs +++ b/umbrella/src/lib.rs @@ -109,6 +109,10 @@ pub use cumulus_client_parachain_inherent; #[cfg(feature = "cumulus-client-pov-recovery")] pub use cumulus_client_pov_recovery; +/// Storage proof size recording utilities. +#[cfg(feature = "cumulus-client-proof-size-recording")] +pub use cumulus_client_proof_size_recording; + /// Common functions used to assemble the components of a parachain node. #[cfg(feature = "cumulus-client-service")] pub use cumulus_client_service;